1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef __ASM_IRQFLAGS_H
6*4882a593Smuzhiyun #define __ASM_IRQFLAGS_H
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <asm/alternative.h>
9*4882a593Smuzhiyun #include <asm/barrier.h>
10*4882a593Smuzhiyun #include <asm/ptrace.h>
11*4882a593Smuzhiyun #include <asm/sysreg.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15*4882a593Smuzhiyun * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
16*4882a593Smuzhiyun * order:
17*4882a593Smuzhiyun * Masking debug exceptions causes all other exceptions to be masked too/
18*4882a593Smuzhiyun * Masking SError masks irq, but not debug exceptions. Masking irqs has no
19*4882a593Smuzhiyun * side effects for other flags. Keeping to this order makes it easier for
20*4882a593Smuzhiyun * entry.S to know which exceptions should be unmasked.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * FIQ is never expected, but we mask it when we disable debug exceptions, and
23*4882a593Smuzhiyun * unmask it at all other times.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * CPU interrupt mask handling.
28*4882a593Smuzhiyun */
arch_local_irq_enable(void)29*4882a593Smuzhiyun static inline void arch_local_irq_enable(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun if (system_has_prio_mask_debugging()) {
32*4882a593Smuzhiyun u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun asm volatile(ALTERNATIVE(
38*4882a593Smuzhiyun "msr daifclr, #2 // arch_local_irq_enable",
39*4882a593Smuzhiyun __msr_s(SYS_ICC_PMR_EL1, "%0"),
40*4882a593Smuzhiyun ARM64_HAS_IRQ_PRIO_MASKING)
41*4882a593Smuzhiyun :
42*4882a593Smuzhiyun : "r" ((unsigned long) GIC_PRIO_IRQON)
43*4882a593Smuzhiyun : "memory");
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun pmr_sync();
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
arch_local_irq_disable(void)48*4882a593Smuzhiyun static inline void arch_local_irq_disable(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun if (system_has_prio_mask_debugging()) {
51*4882a593Smuzhiyun u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun asm volatile(ALTERNATIVE(
57*4882a593Smuzhiyun "msr daifset, #2 // arch_local_irq_disable",
58*4882a593Smuzhiyun __msr_s(SYS_ICC_PMR_EL1, "%0"),
59*4882a593Smuzhiyun ARM64_HAS_IRQ_PRIO_MASKING)
60*4882a593Smuzhiyun :
61*4882a593Smuzhiyun : "r" ((unsigned long) GIC_PRIO_IRQOFF)
62*4882a593Smuzhiyun : "memory");
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Save the current interrupt enable state.
67*4882a593Smuzhiyun */
arch_local_save_flags(void)68*4882a593Smuzhiyun static inline unsigned long arch_local_save_flags(void)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun unsigned long flags;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun asm volatile(ALTERNATIVE(
73*4882a593Smuzhiyun "mrs %0, daif",
74*4882a593Smuzhiyun __mrs_s("%0", SYS_ICC_PMR_EL1),
75*4882a593Smuzhiyun ARM64_HAS_IRQ_PRIO_MASKING)
76*4882a593Smuzhiyun : "=&r" (flags)
77*4882a593Smuzhiyun :
78*4882a593Smuzhiyun : "memory");
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun return flags;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
arch_irqs_disabled_flags(unsigned long flags)83*4882a593Smuzhiyun static inline int arch_irqs_disabled_flags(unsigned long flags)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun int res;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun asm volatile(ALTERNATIVE(
88*4882a593Smuzhiyun "and %w0, %w1, #" __stringify(PSR_I_BIT),
89*4882a593Smuzhiyun "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
90*4882a593Smuzhiyun ARM64_HAS_IRQ_PRIO_MASKING)
91*4882a593Smuzhiyun : "=&r" (res)
92*4882a593Smuzhiyun : "r" ((int) flags)
93*4882a593Smuzhiyun : "memory");
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return res;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
arch_irqs_disabled(void)98*4882a593Smuzhiyun static inline int arch_irqs_disabled(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun return arch_irqs_disabled_flags(arch_local_save_flags());
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
arch_local_irq_save(void)103*4882a593Smuzhiyun static inline unsigned long arch_local_irq_save(void)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun unsigned long flags;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun flags = arch_local_save_flags();
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * There are too many states with IRQs disabled, just keep the current
111*4882a593Smuzhiyun * state if interrupts are already disabled/masked.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun if (!arch_irqs_disabled_flags(flags))
114*4882a593Smuzhiyun arch_local_irq_disable();
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun return flags;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * restore saved IRQ state
121*4882a593Smuzhiyun */
arch_local_irq_restore(unsigned long flags)122*4882a593Smuzhiyun static inline void arch_local_irq_restore(unsigned long flags)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun asm volatile(ALTERNATIVE(
125*4882a593Smuzhiyun "msr daif, %0",
126*4882a593Smuzhiyun __msr_s(SYS_ICC_PMR_EL1, "%0"),
127*4882a593Smuzhiyun ARM64_HAS_IRQ_PRIO_MASKING)
128*4882a593Smuzhiyun :
129*4882a593Smuzhiyun : "r" (flags)
130*4882a593Smuzhiyun : "memory");
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun pmr_sync();
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #endif /* __ASM_IRQFLAGS_H */
136