xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/hardirq.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ASM_HARDIRQ_H
6*4882a593Smuzhiyun #define __ASM_HARDIRQ_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/cache.h>
9*4882a593Smuzhiyun #include <linux/percpu.h>
10*4882a593Smuzhiyun #include <linux/threads.h>
11*4882a593Smuzhiyun #include <asm/barrier.h>
12*4882a593Smuzhiyun #include <asm/irq.h>
13*4882a593Smuzhiyun #include <asm/kvm_arm.h>
14*4882a593Smuzhiyun #include <asm/sysreg.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun typedef struct {
17*4882a593Smuzhiyun 	unsigned int __softirq_pending;
18*4882a593Smuzhiyun } ____cacheline_aligned irq_cpustat_t;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct nmi_ctx {
25*4882a593Smuzhiyun 	u64 hcr;
26*4882a593Smuzhiyun 	unsigned int cnt;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define arch_nmi_enter()						\
32*4882a593Smuzhiyun do {									\
33*4882a593Smuzhiyun 	struct nmi_ctx *___ctx;						\
34*4882a593Smuzhiyun 	u64 ___hcr;							\
35*4882a593Smuzhiyun 									\
36*4882a593Smuzhiyun 	if (!is_kernel_in_hyp_mode())					\
37*4882a593Smuzhiyun 		break;							\
38*4882a593Smuzhiyun 									\
39*4882a593Smuzhiyun 	___ctx = this_cpu_ptr(&nmi_contexts);				\
40*4882a593Smuzhiyun 	if (___ctx->cnt) {						\
41*4882a593Smuzhiyun 		___ctx->cnt++;						\
42*4882a593Smuzhiyun 		break;							\
43*4882a593Smuzhiyun 	}								\
44*4882a593Smuzhiyun 									\
45*4882a593Smuzhiyun 	___hcr = read_sysreg(hcr_el2);					\
46*4882a593Smuzhiyun 	if (!(___hcr & HCR_TGE)) {					\
47*4882a593Smuzhiyun 		write_sysreg(___hcr | HCR_TGE, hcr_el2);		\
48*4882a593Smuzhiyun 		isb();							\
49*4882a593Smuzhiyun 	}								\
50*4882a593Smuzhiyun 	/*								\
51*4882a593Smuzhiyun 	 * Make sure the sysreg write is performed before ___ctx->cnt	\
52*4882a593Smuzhiyun 	 * is set to 1. NMIs that see cnt == 1 will rely on us.		\
53*4882a593Smuzhiyun 	 */								\
54*4882a593Smuzhiyun 	barrier();							\
55*4882a593Smuzhiyun 	___ctx->cnt = 1;                                                \
56*4882a593Smuzhiyun 	/*								\
57*4882a593Smuzhiyun 	 * Make sure ___ctx->cnt is set before we save ___hcr. We	\
58*4882a593Smuzhiyun 	 * don't want ___ctx->hcr to be overwritten.			\
59*4882a593Smuzhiyun 	 */								\
60*4882a593Smuzhiyun 	barrier();							\
61*4882a593Smuzhiyun 	___ctx->hcr = ___hcr;						\
62*4882a593Smuzhiyun } while (0)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define arch_nmi_exit()							\
65*4882a593Smuzhiyun do {									\
66*4882a593Smuzhiyun 	struct nmi_ctx *___ctx;						\
67*4882a593Smuzhiyun 	u64 ___hcr;							\
68*4882a593Smuzhiyun 									\
69*4882a593Smuzhiyun 	if (!is_kernel_in_hyp_mode())					\
70*4882a593Smuzhiyun 		break;							\
71*4882a593Smuzhiyun 									\
72*4882a593Smuzhiyun 	___ctx = this_cpu_ptr(&nmi_contexts);				\
73*4882a593Smuzhiyun 	___hcr = ___ctx->hcr;						\
74*4882a593Smuzhiyun 	/*								\
75*4882a593Smuzhiyun 	 * Make sure we read ___ctx->hcr before we release		\
76*4882a593Smuzhiyun 	 * ___ctx->cnt as it makes ___ctx->hcr updatable again.		\
77*4882a593Smuzhiyun 	 */								\
78*4882a593Smuzhiyun 	barrier();							\
79*4882a593Smuzhiyun 	___ctx->cnt--;							\
80*4882a593Smuzhiyun 	/*								\
81*4882a593Smuzhiyun 	 * Make sure ___ctx->cnt release is visible before we		\
82*4882a593Smuzhiyun 	 * restore the sysreg. Otherwise a new NMI occurring		\
83*4882a593Smuzhiyun 	 * right after write_sysreg() can be fooled and think		\
84*4882a593Smuzhiyun 	 * we secured things for it.					\
85*4882a593Smuzhiyun 	 */								\
86*4882a593Smuzhiyun 	barrier();							\
87*4882a593Smuzhiyun 	if (!___ctx->cnt && !(___hcr & HCR_TGE))			\
88*4882a593Smuzhiyun 		write_sysreg(___hcr, hcr_el2);				\
89*4882a593Smuzhiyun } while (0)
90*4882a593Smuzhiyun 
ack_bad_irq(unsigned int irq)91*4882a593Smuzhiyun static inline void ack_bad_irq(unsigned int irq)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	extern unsigned long irq_err_count;
94*4882a593Smuzhiyun 	irq_err_count++;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #endif /* __ASM_HARDIRQ_H */
98