xref: /OK3568_Linux_fs/kernel/include/linux/context_tracking.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_CONTEXT_TRACKING_H
3*4882a593Smuzhiyun #define _LINUX_CONTEXT_TRACKING_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun #include <linux/vtime.h>
7*4882a593Smuzhiyun #include <linux/context_tracking_state.h>
8*4882a593Smuzhiyun #include <linux/instrumentation.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/ptrace.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifdef CONFIG_CONTEXT_TRACKING
14*4882a593Smuzhiyun extern void context_tracking_cpu_set(int cpu);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* Called with interrupts disabled.  */
17*4882a593Smuzhiyun extern void __context_tracking_enter(enum ctx_state state);
18*4882a593Smuzhiyun extern void __context_tracking_exit(enum ctx_state state);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun extern void context_tracking_enter(enum ctx_state state);
21*4882a593Smuzhiyun extern void context_tracking_exit(enum ctx_state state);
22*4882a593Smuzhiyun extern void context_tracking_user_enter(void);
23*4882a593Smuzhiyun extern void context_tracking_user_exit(void);
24*4882a593Smuzhiyun 
user_enter(void)25*4882a593Smuzhiyun static inline void user_enter(void)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	if (context_tracking_enabled())
28*4882a593Smuzhiyun 		context_tracking_enter(CONTEXT_USER);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun }
user_exit(void)31*4882a593Smuzhiyun static inline void user_exit(void)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	if (context_tracking_enabled())
34*4882a593Smuzhiyun 		context_tracking_exit(CONTEXT_USER);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Called with interrupts disabled.  */
user_enter_irqoff(void)38*4882a593Smuzhiyun static __always_inline void user_enter_irqoff(void)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	if (context_tracking_enabled())
41*4882a593Smuzhiyun 		__context_tracking_enter(CONTEXT_USER);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun }
user_exit_irqoff(void)44*4882a593Smuzhiyun static __always_inline void user_exit_irqoff(void)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	if (context_tracking_enabled())
47*4882a593Smuzhiyun 		__context_tracking_exit(CONTEXT_USER);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
exception_enter(void)50*4882a593Smuzhiyun static inline enum ctx_state exception_enter(void)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	enum ctx_state prev_ctx;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (!context_tracking_enabled())
55*4882a593Smuzhiyun 		return 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	prev_ctx = this_cpu_read(context_tracking.state);
58*4882a593Smuzhiyun 	if (prev_ctx != CONTEXT_KERNEL)
59*4882a593Smuzhiyun 		context_tracking_exit(prev_ctx);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return prev_ctx;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
exception_exit(enum ctx_state prev_ctx)64*4882a593Smuzhiyun static inline void exception_exit(enum ctx_state prev_ctx)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	if (context_tracking_enabled()) {
67*4882a593Smuzhiyun 		if (prev_ctx != CONTEXT_KERNEL)
68*4882a593Smuzhiyun 			context_tracking_enter(prev_ctx);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun  * ct_state() - return the current context tracking state if known
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * Returns the current cpu's context tracking state if context tracking
77*4882a593Smuzhiyun  * is enabled.  If context tracking is disabled, returns
78*4882a593Smuzhiyun  * CONTEXT_DISABLED.  This should be used primarily for debugging.
79*4882a593Smuzhiyun  */
ct_state(void)80*4882a593Smuzhiyun static __always_inline enum ctx_state ct_state(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	return context_tracking_enabled() ?
83*4882a593Smuzhiyun 		this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun #else
user_enter(void)86*4882a593Smuzhiyun static inline void user_enter(void) { }
user_exit(void)87*4882a593Smuzhiyun static inline void user_exit(void) { }
user_enter_irqoff(void)88*4882a593Smuzhiyun static inline void user_enter_irqoff(void) { }
user_exit_irqoff(void)89*4882a593Smuzhiyun static inline void user_exit_irqoff(void) { }
exception_enter(void)90*4882a593Smuzhiyun static inline enum ctx_state exception_enter(void) { return 0; }
exception_exit(enum ctx_state prev_ctx)91*4882a593Smuzhiyun static inline void exception_exit(enum ctx_state prev_ctx) { }
ct_state(void)92*4882a593Smuzhiyun static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
93*4882a593Smuzhiyun #endif /* !CONFIG_CONTEXT_TRACKING */
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #ifdef CONFIG_CONTEXT_TRACKING_FORCE
98*4882a593Smuzhiyun extern void context_tracking_init(void);
99*4882a593Smuzhiyun #else
context_tracking_init(void)100*4882a593Smuzhiyun static inline void context_tracking_init(void) { }
101*4882a593Smuzhiyun #endif /* CONFIG_CONTEXT_TRACKING_FORCE */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
105*4882a593Smuzhiyun /* must be called with irqs disabled */
guest_enter_irqoff(void)106*4882a593Smuzhiyun static __always_inline void guest_enter_irqoff(void)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	instrumentation_begin();
109*4882a593Smuzhiyun 	if (vtime_accounting_enabled_this_cpu())
110*4882a593Smuzhiyun 		vtime_guest_enter(current);
111*4882a593Smuzhiyun 	else
112*4882a593Smuzhiyun 		current->flags |= PF_VCPU;
113*4882a593Smuzhiyun 	instrumentation_end();
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (context_tracking_enabled())
116*4882a593Smuzhiyun 		__context_tracking_enter(CONTEXT_GUEST);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* KVM does not hold any references to rcu protected data when it
119*4882a593Smuzhiyun 	 * switches CPU into a guest mode. In fact switching to a guest mode
120*4882a593Smuzhiyun 	 * is very similar to exiting to userspace from rcu point of view. In
121*4882a593Smuzhiyun 	 * addition CPU may stay in a guest mode for quite a long time (up to
122*4882a593Smuzhiyun 	 * one time slice). Lets treat guest mode as quiescent state, just like
123*4882a593Smuzhiyun 	 * we do with user-mode execution.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	if (!context_tracking_enabled_this_cpu()) {
126*4882a593Smuzhiyun 		instrumentation_begin();
127*4882a593Smuzhiyun 		rcu_virt_note_context_switch(smp_processor_id());
128*4882a593Smuzhiyun 		instrumentation_end();
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
context_tracking_guest_exit(void)132*4882a593Smuzhiyun static __always_inline void context_tracking_guest_exit(void)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	if (context_tracking_enabled())
135*4882a593Smuzhiyun 		__context_tracking_exit(CONTEXT_GUEST);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
vtime_account_guest_exit(void)138*4882a593Smuzhiyun static __always_inline void vtime_account_guest_exit(void)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	if (vtime_accounting_enabled_this_cpu())
141*4882a593Smuzhiyun 		vtime_guest_exit(current);
142*4882a593Smuzhiyun 	else
143*4882a593Smuzhiyun 		current->flags &= ~PF_VCPU;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
guest_exit_irqoff(void)146*4882a593Smuzhiyun static __always_inline void guest_exit_irqoff(void)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	context_tracking_guest_exit();
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	instrumentation_begin();
151*4882a593Smuzhiyun 	vtime_account_guest_exit();
152*4882a593Smuzhiyun 	instrumentation_end();
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #else
guest_enter_irqoff(void)156*4882a593Smuzhiyun static __always_inline void guest_enter_irqoff(void)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	/*
159*4882a593Smuzhiyun 	 * This is running in ioctl context so its safe
160*4882a593Smuzhiyun 	 * to assume that it's the stime pending cputime
161*4882a593Smuzhiyun 	 * to flush.
162*4882a593Smuzhiyun 	 */
163*4882a593Smuzhiyun 	instrumentation_begin();
164*4882a593Smuzhiyun 	vtime_account_kernel(current);
165*4882a593Smuzhiyun 	current->flags |= PF_VCPU;
166*4882a593Smuzhiyun 	rcu_virt_note_context_switch(smp_processor_id());
167*4882a593Smuzhiyun 	instrumentation_end();
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
context_tracking_guest_exit(void)170*4882a593Smuzhiyun static __always_inline void context_tracking_guest_exit(void) { }
171*4882a593Smuzhiyun 
vtime_account_guest_exit(void)172*4882a593Smuzhiyun static __always_inline void vtime_account_guest_exit(void)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	vtime_account_kernel(current);
175*4882a593Smuzhiyun 	current->flags &= ~PF_VCPU;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
guest_exit_irqoff(void)178*4882a593Smuzhiyun static __always_inline void guest_exit_irqoff(void)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	instrumentation_begin();
181*4882a593Smuzhiyun 	/* Flush the guest cputime we spent on the guest */
182*4882a593Smuzhiyun 	vtime_account_guest_exit();
183*4882a593Smuzhiyun 	instrumentation_end();
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
186*4882a593Smuzhiyun 
guest_exit(void)187*4882a593Smuzhiyun static inline void guest_exit(void)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	unsigned long flags;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	local_irq_save(flags);
192*4882a593Smuzhiyun 	guest_exit_irqoff();
193*4882a593Smuzhiyun 	local_irq_restore(flags);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #endif
197