David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __ASM_IRQFLAGS_H |
| 6 | #define __ASM_IRQFLAGS_H |
| 7 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #include <asm/alternative.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | #include <asm/ptrace.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10 | #include <asm/sysreg.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | |
| 12 | /* |
| 13 | * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and |
| 14 | * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' |
| 15 | * order: |
| 16 | * Masking debug exceptions causes all other exceptions to be masked too/ |
| 17 | * Masking SError masks irq, but not debug exceptions. Masking irqs has no |
| 18 | * side effects for other flags. Keeping to this order makes it easier for |
| 19 | * entry.S to know which exceptions should be unmasked. |
| 20 | * |
| 21 | * FIQ is never expected, but we mask it when we disable debug exceptions, and |
| 22 | * unmask it at all other times. |
| 23 | */ |
| 24 | |
| 25 | /* |
| 26 | * CPU interrupt mask handling. |
| 27 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | static inline void arch_local_irq_enable(void) |
| 29 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | if (system_has_prio_mask_debugging()) { |
| 31 | u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
| 32 | |
| 33 | WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); |
| 34 | } |
| 35 | |
| 36 | asm volatile(ALTERNATIVE( |
| 37 | "msr daifclr, #2 // arch_local_irq_enable\n" |
| 38 | "nop", |
| 39 | __msr_s(SYS_ICC_PMR_EL1, "%0") |
| 40 | "dsb sy", |
| 41 | ARM64_HAS_IRQ_PRIO_MASKING) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | : |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 43 | : "r" ((unsigned long) GIC_PRIO_IRQON) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | : "memory"); |
| 45 | } |
| 46 | |
| 47 | static inline void arch_local_irq_disable(void) |
| 48 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | if (system_has_prio_mask_debugging()) { |
| 50 | u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
| 51 | |
| 52 | WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); |
| 53 | } |
| 54 | |
| 55 | asm volatile(ALTERNATIVE( |
| 56 | "msr daifset, #2 // arch_local_irq_disable", |
| 57 | __msr_s(SYS_ICC_PMR_EL1, "%0"), |
| 58 | ARM64_HAS_IRQ_PRIO_MASKING) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | : |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 60 | : "r" ((unsigned long) GIC_PRIO_IRQOFF) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | : "memory"); |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * Save the current interrupt enable state. |
| 66 | */ |
| 67 | static inline unsigned long arch_local_save_flags(void) |
| 68 | { |
| 69 | unsigned long flags; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 70 | |
| 71 | asm volatile(ALTERNATIVE( |
| 72 | "mrs %0, daif", |
| 73 | __mrs_s("%0", SYS_ICC_PMR_EL1), |
| 74 | ARM64_HAS_IRQ_PRIO_MASKING) |
| 75 | : "=&r" (flags) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | : |
| 77 | : "memory"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 78 | |
| 79 | return flags; |
| 80 | } |
| 81 | |
| 82 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
| 83 | { |
| 84 | int res; |
| 85 | |
| 86 | asm volatile(ALTERNATIVE( |
| 87 | "and %w0, %w1, #" __stringify(PSR_I_BIT), |
| 88 | "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), |
| 89 | ARM64_HAS_IRQ_PRIO_MASKING) |
| 90 | : "=&r" (res) |
| 91 | : "r" ((int) flags) |
| 92 | : "memory"); |
| 93 | |
| 94 | return res; |
| 95 | } |
| 96 | |
| 97 | static inline unsigned long arch_local_irq_save(void) |
| 98 | { |
| 99 | unsigned long flags; |
| 100 | |
| 101 | flags = arch_local_save_flags(); |
| 102 | |
| 103 | /* |
| 104 | * There are too many states with IRQs disabled, just keep the current |
| 105 | * state if interrupts are already disabled/masked. |
| 106 | */ |
| 107 | if (!arch_irqs_disabled_flags(flags)) |
| 108 | arch_local_irq_disable(); |
| 109 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 110 | return flags; |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * restore saved IRQ state |
| 115 | */ |
| 116 | static inline void arch_local_irq_restore(unsigned long flags) |
| 117 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | asm volatile(ALTERNATIVE( |
| 119 | "msr daif, %0\n" |
| 120 | "nop", |
| 121 | __msr_s(SYS_ICC_PMR_EL1, "%0") |
| 122 | "dsb sy", |
| 123 | ARM64_HAS_IRQ_PRIO_MASKING) |
| 124 | : |
| 125 | : "r" (flags) |
| 126 | : "memory"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | } |
| 128 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | #endif /* __ASM_IRQFLAGS_H */ |