irqflags.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 ARM Ltd.
  4. */
  5. #ifndef __ASM_IRQFLAGS_H
  6. #define __ASM_IRQFLAGS_H
  7. #include <asm/barrier.h>
  8. #include <asm/ptrace.h>
  9. #include <asm/sysreg.h>
  10. /*
  11. * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
  12. * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
  13. * order:
  14. * Masking debug exceptions causes all other exceptions to be masked too/
  15. * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
  16. * always masked and unmasked together, and have no side effects for other
  17. * flags. Keeping to this order makes it easier for entry.S to know which
  18. * exceptions should be unmasked.
  19. */
  20. static __always_inline void __daif_local_irq_enable(void)
  21. {
  22. barrier();
  23. asm volatile("msr daifclr, #3");
  24. barrier();
  25. }
  26. static __always_inline void __pmr_local_irq_enable(void)
  27. {
  28. if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
  29. u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
  30. WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
  31. }
  32. barrier();
  33. write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
  34. pmr_sync();
  35. barrier();
  36. }
  37. static inline void arch_local_irq_enable(void)
  38. {
  39. if (system_uses_irq_prio_masking()) {
  40. __pmr_local_irq_enable();
  41. } else {
  42. __daif_local_irq_enable();
  43. }
  44. }
  45. static __always_inline void __daif_local_irq_disable(void)
  46. {
  47. barrier();
  48. asm volatile("msr daifset, #3");
  49. barrier();
  50. }
  51. static __always_inline void __pmr_local_irq_disable(void)
  52. {
  53. if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
  54. u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
  55. WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
  56. }
  57. barrier();
  58. write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
  59. barrier();
  60. }
  61. static inline void arch_local_irq_disable(void)
  62. {
  63. if (system_uses_irq_prio_masking()) {
  64. __pmr_local_irq_disable();
  65. } else {
  66. __daif_local_irq_disable();
  67. }
  68. }
  69. static __always_inline unsigned long __daif_local_save_flags(void)
  70. {
  71. return read_sysreg(daif);
  72. }
  73. static __always_inline unsigned long __pmr_local_save_flags(void)
  74. {
  75. return read_sysreg_s(SYS_ICC_PMR_EL1);
  76. }
  77. /*
  78. * Save the current interrupt enable state.
  79. */
  80. static inline unsigned long arch_local_save_flags(void)
  81. {
  82. if (system_uses_irq_prio_masking()) {
  83. return __pmr_local_save_flags();
  84. } else {
  85. return __daif_local_save_flags();
  86. }
  87. }
  88. static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
  89. {
  90. return flags & PSR_I_BIT;
  91. }
  92. static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
  93. {
  94. return flags != GIC_PRIO_IRQON;
  95. }
  96. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  97. {
  98. if (system_uses_irq_prio_masking()) {
  99. return __pmr_irqs_disabled_flags(flags);
  100. } else {
  101. return __daif_irqs_disabled_flags(flags);
  102. }
  103. }
  104. static __always_inline bool __daif_irqs_disabled(void)
  105. {
  106. return __daif_irqs_disabled_flags(__daif_local_save_flags());
  107. }
  108. static __always_inline bool __pmr_irqs_disabled(void)
  109. {
  110. return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
  111. }
  112. static inline bool arch_irqs_disabled(void)
  113. {
  114. if (system_uses_irq_prio_masking()) {
  115. return __pmr_irqs_disabled();
  116. } else {
  117. return __daif_irqs_disabled();
  118. }
  119. }
  120. static __always_inline unsigned long __daif_local_irq_save(void)
  121. {
  122. unsigned long flags = __daif_local_save_flags();
  123. __daif_local_irq_disable();
  124. return flags;
  125. }
  126. static __always_inline unsigned long __pmr_local_irq_save(void)
  127. {
  128. unsigned long flags = __pmr_local_save_flags();
  129. /*
  130. * There are too many states with IRQs disabled, just keep the current
  131. * state if interrupts are already disabled/masked.
  132. */
  133. if (!__pmr_irqs_disabled_flags(flags))
  134. __pmr_local_irq_disable();
  135. return flags;
  136. }
  137. static inline unsigned long arch_local_irq_save(void)
  138. {
  139. if (system_uses_irq_prio_masking()) {
  140. return __pmr_local_irq_save();
  141. } else {
  142. return __daif_local_irq_save();
  143. }
  144. }
  145. static __always_inline void __daif_local_irq_restore(unsigned long flags)
  146. {
  147. barrier();
  148. write_sysreg(flags, daif);
  149. barrier();
  150. }
  151. static __always_inline void __pmr_local_irq_restore(unsigned long flags)
  152. {
  153. barrier();
  154. write_sysreg_s(flags, SYS_ICC_PMR_EL1);
  155. pmr_sync();
  156. barrier();
  157. }
  158. /*
  159. * restore saved IRQ state
  160. */
  161. static inline void arch_local_irq_restore(unsigned long flags)
  162. {
  163. if (system_uses_irq_prio_masking()) {
  164. __pmr_local_irq_restore(flags);
  165. } else {
  166. __daif_local_irq_restore(flags);
  167. }
  168. }
  169. #endif /* __ASM_IRQFLAGS_H */