xref: /OK3568_Linux_fs/kernel/kernel/rcu/tree_exp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0+ */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * RCU expedited grace periods
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corporation, 2016
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/lockdep.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun static void rcu_exp_handler(void *unused);
13*4882a593Smuzhiyun static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * Record the start of an expedited grace period.
17*4882a593Smuzhiyun  */
rcu_exp_gp_seq_start(void)18*4882a593Smuzhiyun static void rcu_exp_gp_seq_start(void)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	rcu_seq_start(&rcu_state.expedited_sequence);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Return the value that the expedited-grace-period counter will have
25*4882a593Smuzhiyun  * at the end of the current grace period.
26*4882a593Smuzhiyun  */
rcu_exp_gp_seq_endval(void)27*4882a593Smuzhiyun static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	return rcu_seq_endval(&rcu_state.expedited_sequence);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Record the end of an expedited grace period.
34*4882a593Smuzhiyun  */
rcu_exp_gp_seq_end(void)35*4882a593Smuzhiyun static void rcu_exp_gp_seq_end(void)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	rcu_seq_end(&rcu_state.expedited_sequence);
38*4882a593Smuzhiyun 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * Take a snapshot of the expedited-grace-period counter, which is the
43*4882a593Smuzhiyun  * earliest value that will indicate that a full grace period has
44*4882a593Smuzhiyun  * elapsed since the current time.
45*4882a593Smuzhiyun  */
rcu_exp_gp_seq_snap(void)46*4882a593Smuzhiyun static unsigned long rcu_exp_gp_seq_snap(void)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned long s;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
51*4882a593Smuzhiyun 	s = rcu_seq_snap(&rcu_state.expedited_sequence);
52*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
53*4882a593Smuzhiyun 	return s;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
58*4882a593Smuzhiyun  * if a full expedited grace period has elapsed since that snapshot
59*4882a593Smuzhiyun  * was taken.
60*4882a593Smuzhiyun  */
rcu_exp_gp_seq_done(unsigned long s)61*4882a593Smuzhiyun static bool rcu_exp_gp_seq_done(unsigned long s)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	return rcu_seq_done(&rcu_state.expedited_sequence, s);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
68*4882a593Smuzhiyun  * recent CPU-online activity.  Note that these masks are not cleared
69*4882a593Smuzhiyun  * when CPUs go offline, so they reflect the union of all CPUs that have
70*4882a593Smuzhiyun  * ever been online.  This means that this function normally takes its
71*4882a593Smuzhiyun  * no-work-to-do fastpath.
72*4882a593Smuzhiyun  */
sync_exp_reset_tree_hotplug(void)73*4882a593Smuzhiyun static void sync_exp_reset_tree_hotplug(void)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	bool done;
76*4882a593Smuzhiyun 	unsigned long flags;
77*4882a593Smuzhiyun 	unsigned long mask;
78*4882a593Smuzhiyun 	unsigned long oldmask;
79*4882a593Smuzhiyun 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
80*4882a593Smuzhiyun 	struct rcu_node *rnp;
81*4882a593Smuzhiyun 	struct rcu_node *rnp_up;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* If no new CPUs onlined since last time, nothing to do. */
84*4882a593Smuzhiyun 	if (likely(ncpus == rcu_state.ncpus_snap))
85*4882a593Smuzhiyun 		return;
86*4882a593Smuzhiyun 	rcu_state.ncpus_snap = ncpus;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * Each pass through the following loop propagates newly onlined
90*4882a593Smuzhiyun 	 * CPUs for the current rcu_node structure up the rcu_node tree.
91*4882a593Smuzhiyun 	 */
92*4882a593Smuzhiyun 	rcu_for_each_leaf_node(rnp) {
93*4882a593Smuzhiyun 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
94*4882a593Smuzhiyun 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
95*4882a593Smuzhiyun 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96*4882a593Smuzhiyun 			continue;  /* No new CPUs, nothing to do. */
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 		/* Update this node's mask, track old value for propagation. */
100*4882a593Smuzhiyun 		oldmask = rnp->expmaskinit;
101*4882a593Smuzhiyun 		rnp->expmaskinit = rnp->expmaskinitnext;
102*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		/* If was already nonzero, nothing to propagate. */
105*4882a593Smuzhiyun 		if (oldmask)
106*4882a593Smuzhiyun 			continue;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		/* Propagate the new CPU up the tree. */
109*4882a593Smuzhiyun 		mask = rnp->grpmask;
110*4882a593Smuzhiyun 		rnp_up = rnp->parent;
111*4882a593Smuzhiyun 		done = false;
112*4882a593Smuzhiyun 		while (rnp_up) {
113*4882a593Smuzhiyun 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114*4882a593Smuzhiyun 			if (rnp_up->expmaskinit)
115*4882a593Smuzhiyun 				done = true;
116*4882a593Smuzhiyun 			rnp_up->expmaskinit |= mask;
117*4882a593Smuzhiyun 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118*4882a593Smuzhiyun 			if (done)
119*4882a593Smuzhiyun 				break;
120*4882a593Smuzhiyun 			mask = rnp_up->grpmask;
121*4882a593Smuzhiyun 			rnp_up = rnp_up->parent;
122*4882a593Smuzhiyun 		}
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * Reset the ->expmask values in the rcu_node tree in preparation for
128*4882a593Smuzhiyun  * a new expedited grace period.
129*4882a593Smuzhiyun  */
sync_exp_reset_tree(void)130*4882a593Smuzhiyun static void __maybe_unused sync_exp_reset_tree(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	unsigned long flags;
133*4882a593Smuzhiyun 	struct rcu_node *rnp;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	sync_exp_reset_tree_hotplug();
136*4882a593Smuzhiyun 	rcu_for_each_node_breadth_first(rnp) {
137*4882a593Smuzhiyun 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
138*4882a593Smuzhiyun 		WARN_ON_ONCE(rnp->expmask);
139*4882a593Smuzhiyun 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Return non-zero if there is no RCU expedited grace period in progress
146*4882a593Smuzhiyun  * for the specified rcu_node structure, in other words, if all CPUs and
147*4882a593Smuzhiyun  * tasks covered by the specified rcu_node structure have done their bit
148*4882a593Smuzhiyun  * for the current expedited grace period.
149*4882a593Smuzhiyun  */
sync_rcu_exp_done(struct rcu_node * rnp)150*4882a593Smuzhiyun static bool sync_rcu_exp_done(struct rcu_node *rnp)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	raw_lockdep_assert_held_rcu_node(rnp);
153*4882a593Smuzhiyun 	return READ_ONCE(rnp->exp_tasks) == NULL &&
154*4882a593Smuzhiyun 	       READ_ONCE(rnp->expmask) == 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun  * Like sync_rcu_exp_done(), but where the caller does not hold the
159*4882a593Smuzhiyun  * rcu_node's ->lock.
160*4882a593Smuzhiyun  */
sync_rcu_exp_done_unlocked(struct rcu_node * rnp)161*4882a593Smuzhiyun static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	unsigned long flags;
164*4882a593Smuzhiyun 	bool ret;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
167*4882a593Smuzhiyun 	ret = sync_rcu_exp_done(rnp);
168*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return ret;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun  * Report the exit from RCU read-side critical section for the last task
176*4882a593Smuzhiyun  * that queued itself during or before the current expedited preemptible-RCU
177*4882a593Smuzhiyun  * grace period.  This event is reported either to the rcu_node structure on
178*4882a593Smuzhiyun  * which the task was queued or to one of that rcu_node structure's ancestors,
179*4882a593Smuzhiyun  * recursively up the tree.  (Calm down, calm down, we do the recursion
180*4882a593Smuzhiyun  * iteratively!)
181*4882a593Smuzhiyun  */
__rcu_report_exp_rnp(struct rcu_node * rnp,bool wake,unsigned long flags)182*4882a593Smuzhiyun static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183*4882a593Smuzhiyun 				 bool wake, unsigned long flags)
184*4882a593Smuzhiyun 	__releases(rnp->lock)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	unsigned long mask;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	raw_lockdep_assert_held_rcu_node(rnp);
189*4882a593Smuzhiyun 	for (;;) {
190*4882a593Smuzhiyun 		if (!sync_rcu_exp_done(rnp)) {
191*4882a593Smuzhiyun 			if (!rnp->expmask)
192*4882a593Smuzhiyun 				rcu_initiate_boost(rnp, flags);
193*4882a593Smuzhiyun 			else
194*4882a593Smuzhiyun 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195*4882a593Smuzhiyun 			break;
196*4882a593Smuzhiyun 		}
197*4882a593Smuzhiyun 		if (rnp->parent == NULL) {
198*4882a593Smuzhiyun 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199*4882a593Smuzhiyun 			if (wake) {
200*4882a593Smuzhiyun 				smp_mb(); /* EGP done before wake_up(). */
201*4882a593Smuzhiyun 				swake_up_one(&rcu_state.expedited_wq);
202*4882a593Smuzhiyun 			}
203*4882a593Smuzhiyun 			break;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 		mask = rnp->grpmask;
206*4882a593Smuzhiyun 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207*4882a593Smuzhiyun 		rnp = rnp->parent;
208*4882a593Smuzhiyun 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209*4882a593Smuzhiyun 		WARN_ON_ONCE(!(rnp->expmask & mask));
210*4882a593Smuzhiyun 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * Report expedited quiescent state for specified node.  This is a
216*4882a593Smuzhiyun  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
217*4882a593Smuzhiyun  */
rcu_report_exp_rnp(struct rcu_node * rnp,bool wake)218*4882a593Smuzhiyun static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	unsigned long flags;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
223*4882a593Smuzhiyun 	__rcu_report_exp_rnp(rnp, wake, flags);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * Report expedited quiescent state for multiple CPUs, all covered by the
228*4882a593Smuzhiyun  * specified leaf rcu_node structure.
229*4882a593Smuzhiyun  */
rcu_report_exp_cpu_mult(struct rcu_node * rnp,unsigned long mask,bool wake)230*4882a593Smuzhiyun static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231*4882a593Smuzhiyun 				    unsigned long mask, bool wake)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	int cpu;
234*4882a593Smuzhiyun 	unsigned long flags;
235*4882a593Smuzhiyun 	struct rcu_data *rdp;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
238*4882a593Smuzhiyun 	if (!(rnp->expmask & mask)) {
239*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240*4882a593Smuzhiyun 		return;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243*4882a593Smuzhiyun 	for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244*4882a593Smuzhiyun 		rdp = per_cpu_ptr(&rcu_data, cpu);
245*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246*4882a593Smuzhiyun 			continue;
247*4882a593Smuzhiyun 		rdp->rcu_forced_tick_exp = false;
248*4882a593Smuzhiyun 		tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * Report expedited quiescent state for specified rcu_data (CPU).
255*4882a593Smuzhiyun  */
rcu_report_exp_rdp(struct rcu_data * rdp)256*4882a593Smuzhiyun static void rcu_report_exp_rdp(struct rcu_data *rdp)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	WRITE_ONCE(rdp->exp_deferred_qs, false);
259*4882a593Smuzhiyun 	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /* Common code for work-done checking. */
sync_exp_work_done(unsigned long s)263*4882a593Smuzhiyun static bool sync_exp_work_done(unsigned long s)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	if (rcu_exp_gp_seq_done(s)) {
266*4882a593Smuzhiyun 		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267*4882a593Smuzhiyun 		smp_mb(); /* Ensure test happens before caller kfree(). */
268*4882a593Smuzhiyun 		return true;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 	return false;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun  * Funnel-lock acquisition for expedited grace periods.  Returns true
275*4882a593Smuzhiyun  * if some other task completed an expedited grace period that this task
276*4882a593Smuzhiyun  * can piggy-back on, and with no mutex held.  Otherwise, returns false
277*4882a593Smuzhiyun  * with the mutex held, indicating that the caller must actually do the
278*4882a593Smuzhiyun  * expedited grace period.
279*4882a593Smuzhiyun  */
exp_funnel_lock(unsigned long s)280*4882a593Smuzhiyun static bool exp_funnel_lock(unsigned long s)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283*4882a593Smuzhiyun 	struct rcu_node *rnp = rdp->mynode;
284*4882a593Smuzhiyun 	struct rcu_node *rnp_root = rcu_get_root();
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* Low-contention fastpath. */
287*4882a593Smuzhiyun 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288*4882a593Smuzhiyun 	    (rnp == rnp_root ||
289*4882a593Smuzhiyun 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290*4882a593Smuzhiyun 	    mutex_trylock(&rcu_state.exp_mutex))
291*4882a593Smuzhiyun 		goto fastpath;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/*
294*4882a593Smuzhiyun 	 * Each pass through the following loop works its way up
295*4882a593Smuzhiyun 	 * the rcu_node tree, returning if others have done the work or
296*4882a593Smuzhiyun 	 * otherwise falls through to acquire ->exp_mutex.  The mapping
297*4882a593Smuzhiyun 	 * from CPU to rcu_node structure can be inexact, as it is just
298*4882a593Smuzhiyun 	 * promoting locality and is not strictly needed for correctness.
299*4882a593Smuzhiyun 	 */
300*4882a593Smuzhiyun 	for (; rnp != NULL; rnp = rnp->parent) {
301*4882a593Smuzhiyun 		if (sync_exp_work_done(s))
302*4882a593Smuzhiyun 			return true;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		/* Work not done, either wait here or go up. */
305*4882a593Smuzhiyun 		spin_lock(&rnp->exp_lock);
306*4882a593Smuzhiyun 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 			/* Someone else doing GP, so wait for them. */
309*4882a593Smuzhiyun 			spin_unlock(&rnp->exp_lock);
310*4882a593Smuzhiyun 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311*4882a593Smuzhiyun 						  rnp->grplo, rnp->grphi,
312*4882a593Smuzhiyun 						  TPS("wait"));
313*4882a593Smuzhiyun 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314*4882a593Smuzhiyun 				   sync_exp_work_done(s));
315*4882a593Smuzhiyun 			return true;
316*4882a593Smuzhiyun 		}
317*4882a593Smuzhiyun 		WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
318*4882a593Smuzhiyun 		spin_unlock(&rnp->exp_lock);
319*4882a593Smuzhiyun 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320*4882a593Smuzhiyun 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 	mutex_lock(&rcu_state.exp_mutex);
323*4882a593Smuzhiyun fastpath:
324*4882a593Smuzhiyun 	if (sync_exp_work_done(s)) {
325*4882a593Smuzhiyun 		mutex_unlock(&rcu_state.exp_mutex);
326*4882a593Smuzhiyun 		return true;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	rcu_exp_gp_seq_start();
329*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330*4882a593Smuzhiyun 	return false;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun  * Select the CPUs within the specified rcu_node that the upcoming
335*4882a593Smuzhiyun  * expedited grace period needs to wait for.
336*4882a593Smuzhiyun  */
sync_rcu_exp_select_node_cpus(struct work_struct * wp)337*4882a593Smuzhiyun static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	int cpu;
340*4882a593Smuzhiyun 	unsigned long flags;
341*4882a593Smuzhiyun 	unsigned long mask_ofl_test;
342*4882a593Smuzhiyun 	unsigned long mask_ofl_ipi;
343*4882a593Smuzhiyun 	int ret;
344*4882a593Smuzhiyun 	struct rcu_exp_work *rewp =
345*4882a593Smuzhiyun 		container_of(wp, struct rcu_exp_work, rew_work);
346*4882a593Smuzhiyun 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Each pass checks a CPU for identity, offline, and idle. */
351*4882a593Smuzhiyun 	mask_ofl_test = 0;
352*4882a593Smuzhiyun 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353*4882a593Smuzhiyun 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354*4882a593Smuzhiyun 		unsigned long mask = rdp->grpmask;
355*4882a593Smuzhiyun 		int snap;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		if (raw_smp_processor_id() == cpu ||
358*4882a593Smuzhiyun 		    !(rnp->qsmaskinitnext & mask)) {
359*4882a593Smuzhiyun 			mask_ofl_test |= mask;
360*4882a593Smuzhiyun 		} else {
361*4882a593Smuzhiyun 			snap = rcu_dynticks_snap(rdp);
362*4882a593Smuzhiyun 			if (rcu_dynticks_in_eqs(snap))
363*4882a593Smuzhiyun 				mask_ofl_test |= mask;
364*4882a593Smuzhiyun 			else
365*4882a593Smuzhiyun 				rdp->exp_dynticks_snap = snap;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/*
371*4882a593Smuzhiyun 	 * Need to wait for any blocked tasks as well.	Note that
372*4882a593Smuzhiyun 	 * additional blocking tasks will also block the expedited GP
373*4882a593Smuzhiyun 	 * until such time as the ->expmask bits are cleared.
374*4882a593Smuzhiyun 	 */
375*4882a593Smuzhiyun 	if (rcu_preempt_has_tasks(rnp))
376*4882a593Smuzhiyun 		WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* IPI the remaining CPUs for expedited quiescent state. */
380*4882a593Smuzhiyun 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381*4882a593Smuzhiyun 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382*4882a593Smuzhiyun 		unsigned long mask = rdp->grpmask;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun retry_ipi:
385*4882a593Smuzhiyun 		if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386*4882a593Smuzhiyun 			mask_ofl_test |= mask;
387*4882a593Smuzhiyun 			continue;
388*4882a593Smuzhiyun 		}
389*4882a593Smuzhiyun 		if (get_cpu() == cpu) {
390*4882a593Smuzhiyun 			mask_ofl_test |= mask;
391*4882a593Smuzhiyun 			put_cpu();
392*4882a593Smuzhiyun 			continue;
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
395*4882a593Smuzhiyun 		put_cpu();
396*4882a593Smuzhiyun 		/* The CPU will report the QS in response to the IPI. */
397*4882a593Smuzhiyun 		if (!ret)
398*4882a593Smuzhiyun 			continue;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		/* Failed, raced with CPU hotplug operation. */
401*4882a593Smuzhiyun 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
402*4882a593Smuzhiyun 		if ((rnp->qsmaskinitnext & mask) &&
403*4882a593Smuzhiyun 		    (rnp->expmask & mask)) {
404*4882a593Smuzhiyun 			/* Online, so delay for a bit and try again. */
405*4882a593Smuzhiyun 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406*4882a593Smuzhiyun 			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
407*4882a593Smuzhiyun 			schedule_timeout_idle(1);
408*4882a593Smuzhiyun 			goto retry_ipi;
409*4882a593Smuzhiyun 		}
410*4882a593Smuzhiyun 		/* CPU really is offline, so we must report its QS. */
411*4882a593Smuzhiyun 		if (rnp->expmask & mask)
412*4882a593Smuzhiyun 			mask_ofl_test |= mask;
413*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 	/* Report quiescent states for those that went offline. */
416*4882a593Smuzhiyun 	if (mask_ofl_test)
417*4882a593Smuzhiyun 		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Select the nodes that the upcoming expedited grace period needs
422*4882a593Smuzhiyun  * to wait for.
423*4882a593Smuzhiyun  */
sync_rcu_exp_select_cpus(void)424*4882a593Smuzhiyun static void sync_rcu_exp_select_cpus(void)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	int cpu;
427*4882a593Smuzhiyun 	struct rcu_node *rnp;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
430*4882a593Smuzhiyun 	sync_exp_reset_tree();
431*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* Schedule work for each leaf rcu_node structure. */
434*4882a593Smuzhiyun 	rcu_for_each_leaf_node(rnp) {
435*4882a593Smuzhiyun 		rnp->exp_need_flush = false;
436*4882a593Smuzhiyun 		if (!READ_ONCE(rnp->expmask))
437*4882a593Smuzhiyun 			continue; /* Avoid early boot non-existent wq. */
438*4882a593Smuzhiyun 		if (!READ_ONCE(rcu_par_gp_wq) ||
439*4882a593Smuzhiyun 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
440*4882a593Smuzhiyun 		    rcu_is_last_leaf_node(rnp)) {
441*4882a593Smuzhiyun 			/* No workqueues yet or last leaf, do direct call. */
442*4882a593Smuzhiyun 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
443*4882a593Smuzhiyun 			continue;
444*4882a593Smuzhiyun 		}
445*4882a593Smuzhiyun 		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
446*4882a593Smuzhiyun 		cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
447*4882a593Smuzhiyun 		/* If all offline, queue the work on an unbound CPU. */
448*4882a593Smuzhiyun 		if (unlikely(cpu > rnp->grphi - rnp->grplo))
449*4882a593Smuzhiyun 			cpu = WORK_CPU_UNBOUND;
450*4882a593Smuzhiyun 		else
451*4882a593Smuzhiyun 			cpu += rnp->grplo;
452*4882a593Smuzhiyun 		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
453*4882a593Smuzhiyun 		rnp->exp_need_flush = true;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Wait for workqueue jobs (if any) to complete. */
457*4882a593Smuzhiyun 	rcu_for_each_leaf_node(rnp)
458*4882a593Smuzhiyun 		if (rnp->exp_need_flush)
459*4882a593Smuzhiyun 			flush_work(&rnp->rew.rew_work);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun  * Wait for the expedited grace period to elapse, within time limit.
464*4882a593Smuzhiyun  * If the time limit is exceeded without the grace period elapsing,
465*4882a593Smuzhiyun  * return false, otherwise return true.
466*4882a593Smuzhiyun  */
synchronize_rcu_expedited_wait_once(long tlimit)467*4882a593Smuzhiyun static bool synchronize_rcu_expedited_wait_once(long tlimit)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	int t;
470*4882a593Smuzhiyun 	struct rcu_node *rnp_root = rcu_get_root();
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
473*4882a593Smuzhiyun 					  sync_rcu_exp_done_unlocked(rnp_root),
474*4882a593Smuzhiyun 					  tlimit);
475*4882a593Smuzhiyun 	// Workqueues should not be signaled.
476*4882a593Smuzhiyun 	if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
477*4882a593Smuzhiyun 		return true;
478*4882a593Smuzhiyun 	WARN_ON(t < 0);  /* workqueues should not be signaled. */
479*4882a593Smuzhiyun 	return false;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun  * Wait for the expedited grace period to elapse, issuing any needed
484*4882a593Smuzhiyun  * RCU CPU stall warnings along the way.
485*4882a593Smuzhiyun  */
synchronize_rcu_expedited_wait(void)486*4882a593Smuzhiyun static void synchronize_rcu_expedited_wait(void)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	int cpu;
489*4882a593Smuzhiyun 	unsigned long j;
490*4882a593Smuzhiyun 	unsigned long jiffies_stall;
491*4882a593Smuzhiyun 	unsigned long jiffies_start;
492*4882a593Smuzhiyun 	unsigned long mask;
493*4882a593Smuzhiyun 	int ndetected;
494*4882a593Smuzhiyun 	struct rcu_data *rdp;
495*4882a593Smuzhiyun 	struct rcu_node *rnp;
496*4882a593Smuzhiyun 	struct rcu_node *rnp_root = rcu_get_root();
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
499*4882a593Smuzhiyun 	jiffies_stall = rcu_jiffies_till_stall_check();
500*4882a593Smuzhiyun 	jiffies_start = jiffies;
501*4882a593Smuzhiyun 	if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
502*4882a593Smuzhiyun 		if (synchronize_rcu_expedited_wait_once(1))
503*4882a593Smuzhiyun 			return;
504*4882a593Smuzhiyun 		rcu_for_each_leaf_node(rnp) {
505*4882a593Smuzhiyun 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
506*4882a593Smuzhiyun 				rdp = per_cpu_ptr(&rcu_data, cpu);
507*4882a593Smuzhiyun 				if (rdp->rcu_forced_tick_exp)
508*4882a593Smuzhiyun 					continue;
509*4882a593Smuzhiyun 				rdp->rcu_forced_tick_exp = true;
510*4882a593Smuzhiyun 				tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
511*4882a593Smuzhiyun 			}
512*4882a593Smuzhiyun 		}
513*4882a593Smuzhiyun 		j = READ_ONCE(jiffies_till_first_fqs);
514*4882a593Smuzhiyun 		if (synchronize_rcu_expedited_wait_once(j + HZ))
515*4882a593Smuzhiyun 			return;
516*4882a593Smuzhiyun 		WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	for (;;) {
520*4882a593Smuzhiyun 		if (synchronize_rcu_expedited_wait_once(jiffies_stall))
521*4882a593Smuzhiyun 			return;
522*4882a593Smuzhiyun 		if (rcu_stall_is_suppressed())
523*4882a593Smuzhiyun 			continue;
524*4882a593Smuzhiyun 		panic_on_rcu_stall();
525*4882a593Smuzhiyun 		trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
526*4882a593Smuzhiyun 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
527*4882a593Smuzhiyun 		       rcu_state.name);
528*4882a593Smuzhiyun 		ndetected = 0;
529*4882a593Smuzhiyun 		rcu_for_each_leaf_node(rnp) {
530*4882a593Smuzhiyun 			ndetected += rcu_print_task_exp_stall(rnp);
531*4882a593Smuzhiyun 			for_each_leaf_node_possible_cpu(rnp, cpu) {
532*4882a593Smuzhiyun 				struct rcu_data *rdp;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 				mask = leaf_node_cpu_bit(rnp, cpu);
535*4882a593Smuzhiyun 				if (!(READ_ONCE(rnp->expmask) & mask))
536*4882a593Smuzhiyun 					continue;
537*4882a593Smuzhiyun 				ndetected++;
538*4882a593Smuzhiyun 				rdp = per_cpu_ptr(&rcu_data, cpu);
539*4882a593Smuzhiyun 				pr_cont(" %d-%c%c%c", cpu,
540*4882a593Smuzhiyun 					"O."[!!cpu_online(cpu)],
541*4882a593Smuzhiyun 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
542*4882a593Smuzhiyun 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
543*4882a593Smuzhiyun 			}
544*4882a593Smuzhiyun 		}
545*4882a593Smuzhiyun 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
546*4882a593Smuzhiyun 			jiffies - jiffies_start, rcu_state.expedited_sequence,
547*4882a593Smuzhiyun 			data_race(rnp_root->expmask),
548*4882a593Smuzhiyun 			".T"[!!data_race(rnp_root->exp_tasks)]);
549*4882a593Smuzhiyun 		if (ndetected) {
550*4882a593Smuzhiyun 			pr_err("blocking rcu_node structures:");
551*4882a593Smuzhiyun 			rcu_for_each_node_breadth_first(rnp) {
552*4882a593Smuzhiyun 				if (rnp == rnp_root)
553*4882a593Smuzhiyun 					continue; /* printed unconditionally */
554*4882a593Smuzhiyun 				if (sync_rcu_exp_done_unlocked(rnp))
555*4882a593Smuzhiyun 					continue;
556*4882a593Smuzhiyun 				pr_cont(" l=%u:%d-%d:%#lx/%c",
557*4882a593Smuzhiyun 					rnp->level, rnp->grplo, rnp->grphi,
558*4882a593Smuzhiyun 					data_race(rnp->expmask),
559*4882a593Smuzhiyun 					".T"[!!data_race(rnp->exp_tasks)]);
560*4882a593Smuzhiyun 			}
561*4882a593Smuzhiyun 			pr_cont("\n");
562*4882a593Smuzhiyun 		}
563*4882a593Smuzhiyun 		rcu_for_each_leaf_node(rnp) {
564*4882a593Smuzhiyun 			for_each_leaf_node_possible_cpu(rnp, cpu) {
565*4882a593Smuzhiyun 				mask = leaf_node_cpu_bit(rnp, cpu);
566*4882a593Smuzhiyun 				if (!(READ_ONCE(rnp->expmask) & mask))
567*4882a593Smuzhiyun 					continue;
568*4882a593Smuzhiyun 				dump_cpu_task(cpu);
569*4882a593Smuzhiyun 			}
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun  * Wait for the current expedited grace period to complete, and then
577*4882a593Smuzhiyun  * wake up everyone who piggybacked on the just-completed expedited
578*4882a593Smuzhiyun  * grace period.  Also update all the ->exp_seq_rq counters as needed
579*4882a593Smuzhiyun  * in order to avoid counter-wrap problems.
580*4882a593Smuzhiyun  */
rcu_exp_wait_wake(unsigned long s)581*4882a593Smuzhiyun static void rcu_exp_wait_wake(unsigned long s)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	struct rcu_node *rnp;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	synchronize_rcu_expedited_wait();
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	// Switch over to wakeup mode, allowing the next GP to proceed.
588*4882a593Smuzhiyun 	// End the previous grace period only after acquiring the mutex
589*4882a593Smuzhiyun 	// to ensure that only one GP runs concurrently with wakeups.
590*4882a593Smuzhiyun 	mutex_lock(&rcu_state.exp_wake_mutex);
591*4882a593Smuzhiyun 	rcu_exp_gp_seq_end();
592*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	rcu_for_each_node_breadth_first(rnp) {
595*4882a593Smuzhiyun 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
596*4882a593Smuzhiyun 			spin_lock(&rnp->exp_lock);
597*4882a593Smuzhiyun 			/* Recheck, avoid hang in case someone just arrived. */
598*4882a593Smuzhiyun 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
599*4882a593Smuzhiyun 				WRITE_ONCE(rnp->exp_seq_rq, s);
600*4882a593Smuzhiyun 			spin_unlock(&rnp->exp_lock);
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 		smp_mb(); /* All above changes before wakeup. */
603*4882a593Smuzhiyun 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
606*4882a593Smuzhiyun 	mutex_unlock(&rcu_state.exp_wake_mutex);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun  * Common code to drive an expedited grace period forward, used by
611*4882a593Smuzhiyun  * workqueues and mid-boot-time tasks.
612*4882a593Smuzhiyun  */
rcu_exp_sel_wait_wake(unsigned long s)613*4882a593Smuzhiyun static void rcu_exp_sel_wait_wake(unsigned long s)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	/* Initialize the rcu_node tree in preparation for the wait. */
616*4882a593Smuzhiyun 	sync_rcu_exp_select_cpus();
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	/* Wait and clean up, including waking everyone. */
619*4882a593Smuzhiyun 	rcu_exp_wait_wake(s);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun  * Work-queue handler to drive an expedited grace period forward.
624*4882a593Smuzhiyun  */
wait_rcu_exp_gp(struct work_struct * wp)625*4882a593Smuzhiyun static void wait_rcu_exp_gp(struct work_struct *wp)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	struct rcu_exp_work *rewp;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
630*4882a593Smuzhiyun 	rcu_exp_sel_wait_wake(rewp->rew_s);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun #ifdef CONFIG_PREEMPT_RCU
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun  * Remote handler for smp_call_function_single().  If there is an
637*4882a593Smuzhiyun  * RCU read-side critical section in effect, request that the
638*4882a593Smuzhiyun  * next rcu_read_unlock() record the quiescent state up the
639*4882a593Smuzhiyun  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
640*4882a593Smuzhiyun  * report the quiescent state.
641*4882a593Smuzhiyun  */
rcu_exp_handler(void * unused)642*4882a593Smuzhiyun static void rcu_exp_handler(void *unused)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	int depth = rcu_preempt_depth();
645*4882a593Smuzhiyun 	unsigned long flags;
646*4882a593Smuzhiyun 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
647*4882a593Smuzhiyun 	struct rcu_node *rnp = rdp->mynode;
648*4882a593Smuzhiyun 	struct task_struct *t = current;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/*
651*4882a593Smuzhiyun 	 * First, the common case of not being in an RCU read-side
652*4882a593Smuzhiyun 	 * critical section.  If also enabled or idle, immediately
653*4882a593Smuzhiyun 	 * report the quiescent state, otherwise defer.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	if (!depth) {
656*4882a593Smuzhiyun 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
657*4882a593Smuzhiyun 		    rcu_dynticks_curr_cpu_in_eqs()) {
658*4882a593Smuzhiyun 			rcu_report_exp_rdp(rdp);
659*4882a593Smuzhiyun 		} else {
660*4882a593Smuzhiyun 			rdp->exp_deferred_qs = true;
661*4882a593Smuzhiyun 			set_tsk_need_resched(t);
662*4882a593Smuzhiyun 			set_preempt_need_resched();
663*4882a593Smuzhiyun 		}
664*4882a593Smuzhiyun 		return;
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	/*
668*4882a593Smuzhiyun 	 * Second, the less-common case of being in an RCU read-side
669*4882a593Smuzhiyun 	 * critical section.  In this case we can count on a future
670*4882a593Smuzhiyun 	 * rcu_read_unlock().  However, this rcu_read_unlock() might
671*4882a593Smuzhiyun 	 * execute on some other CPU, but in that case there will be
672*4882a593Smuzhiyun 	 * a future context switch.  Either way, if the expedited
673*4882a593Smuzhiyun 	 * grace period is still waiting on this CPU, set ->deferred_qs
674*4882a593Smuzhiyun 	 * so that the eventual quiescent state will be reported.
675*4882a593Smuzhiyun 	 * Note that there is a large group of race conditions that
676*4882a593Smuzhiyun 	 * can have caused this quiescent state to already have been
677*4882a593Smuzhiyun 	 * reported, so we really do need to check ->expmask.
678*4882a593Smuzhiyun 	 */
679*4882a593Smuzhiyun 	if (depth > 0) {
680*4882a593Smuzhiyun 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
681*4882a593Smuzhiyun 		if (rnp->expmask & rdp->grpmask) {
682*4882a593Smuzhiyun 			rdp->exp_deferred_qs = true;
683*4882a593Smuzhiyun 			t->rcu_read_unlock_special.b.exp_hint = true;
684*4882a593Smuzhiyun 		}
685*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
686*4882a593Smuzhiyun 		return;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	// Finally, negative nesting depth should not happen.
690*4882a593Smuzhiyun 	WARN_ON_ONCE(1);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
sync_sched_exp_online_cleanup(int cpu)694*4882a593Smuzhiyun static void sync_sched_exp_online_cleanup(int cpu)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun  * Scan the current list of tasks blocked within RCU read-side critical
700*4882a593Smuzhiyun  * sections, printing out the tid of each that is blocking the current
701*4882a593Smuzhiyun  * expedited grace period.
702*4882a593Smuzhiyun  */
rcu_print_task_exp_stall(struct rcu_node * rnp)703*4882a593Smuzhiyun static int rcu_print_task_exp_stall(struct rcu_node *rnp)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	unsigned long flags;
706*4882a593Smuzhiyun 	int ndetected = 0;
707*4882a593Smuzhiyun 	struct task_struct *t;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (!READ_ONCE(rnp->exp_tasks))
710*4882a593Smuzhiyun 		return 0;
711*4882a593Smuzhiyun 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
712*4882a593Smuzhiyun 	t = list_entry(rnp->exp_tasks->prev,
713*4882a593Smuzhiyun 		       struct task_struct, rcu_node_entry);
714*4882a593Smuzhiyun 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
715*4882a593Smuzhiyun 		pr_cont(" P%d", t->pid);
716*4882a593Smuzhiyun 		ndetected++;
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
719*4882a593Smuzhiyun 	return ndetected;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun #else /* #ifdef CONFIG_PREEMPT_RCU */
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun /* Request an expedited quiescent state. */
rcu_exp_need_qs(void)725*4882a593Smuzhiyun static void rcu_exp_need_qs(void)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
728*4882a593Smuzhiyun 	/* Store .exp before .rcu_urgent_qs. */
729*4882a593Smuzhiyun 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
730*4882a593Smuzhiyun 	set_tsk_need_resched(current);
731*4882a593Smuzhiyun 	set_preempt_need_resched();
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun /* Invoked on each online non-idle CPU for expedited quiescent state. */
rcu_exp_handler(void * unused)735*4882a593Smuzhiyun static void rcu_exp_handler(void *unused)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
738*4882a593Smuzhiyun 	struct rcu_node *rnp = rdp->mynode;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
741*4882a593Smuzhiyun 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
742*4882a593Smuzhiyun 		return;
743*4882a593Smuzhiyun 	if (rcu_is_cpu_rrupt_from_idle()) {
744*4882a593Smuzhiyun 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
745*4882a593Smuzhiyun 		return;
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 	rcu_exp_need_qs();
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
sync_sched_exp_online_cleanup(int cpu)751*4882a593Smuzhiyun static void sync_sched_exp_online_cleanup(int cpu)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	unsigned long flags;
754*4882a593Smuzhiyun 	int my_cpu;
755*4882a593Smuzhiyun 	struct rcu_data *rdp;
756*4882a593Smuzhiyun 	int ret;
757*4882a593Smuzhiyun 	struct rcu_node *rnp;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	rdp = per_cpu_ptr(&rcu_data, cpu);
760*4882a593Smuzhiyun 	rnp = rdp->mynode;
761*4882a593Smuzhiyun 	my_cpu = get_cpu();
762*4882a593Smuzhiyun 	/* Quiescent state either not needed or already requested, leave. */
763*4882a593Smuzhiyun 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
764*4882a593Smuzhiyun 	    rdp->cpu_no_qs.b.exp) {
765*4882a593Smuzhiyun 		put_cpu();
766*4882a593Smuzhiyun 		return;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 	/* Quiescent state needed on current CPU, so set it up locally. */
769*4882a593Smuzhiyun 	if (my_cpu == cpu) {
770*4882a593Smuzhiyun 		local_irq_save(flags);
771*4882a593Smuzhiyun 		rcu_exp_need_qs();
772*4882a593Smuzhiyun 		local_irq_restore(flags);
773*4882a593Smuzhiyun 		put_cpu();
774*4882a593Smuzhiyun 		return;
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 	/* Quiescent state needed on some other CPU, send IPI. */
777*4882a593Smuzhiyun 	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
778*4882a593Smuzhiyun 	put_cpu();
779*4882a593Smuzhiyun 	WARN_ON_ONCE(ret);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun /*
783*4882a593Smuzhiyun  * Because preemptible RCU does not exist, we never have to check for
784*4882a593Smuzhiyun  * tasks blocked within RCU read-side critical sections that are
785*4882a593Smuzhiyun  * blocking the current expedited grace period.
786*4882a593Smuzhiyun  */
rcu_print_task_exp_stall(struct rcu_node * rnp)787*4882a593Smuzhiyun static int rcu_print_task_exp_stall(struct rcu_node *rnp)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	return 0;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /**
795*4882a593Smuzhiyun  * synchronize_rcu_expedited - Brute-force RCU grace period
796*4882a593Smuzhiyun  *
797*4882a593Smuzhiyun  * Wait for an RCU grace period, but expedite it.  The basic idea is to
798*4882a593Smuzhiyun  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
799*4882a593Smuzhiyun  * the CPU is in an RCU critical section, and if so, it sets a flag that
800*4882a593Smuzhiyun  * causes the outermost rcu_read_unlock() to report the quiescent state
801*4882a593Smuzhiyun  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
802*4882a593Smuzhiyun  * other hand, if the CPU is not in an RCU read-side critical section,
803*4882a593Smuzhiyun  * the IPI handler reports the quiescent state immediately.
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * Although this is a great improvement over previous expedited
806*4882a593Smuzhiyun  * implementations, it is still unfriendly to real-time workloads, so is
807*4882a593Smuzhiyun  * thus not recommended for any sort of common-case code.  In fact, if
808*4882a593Smuzhiyun  * you are using synchronize_rcu_expedited() in a loop, please restructure
809*4882a593Smuzhiyun  * your code to batch your updates, and then use a single synchronize_rcu()
810*4882a593Smuzhiyun  * instead.
811*4882a593Smuzhiyun  *
812*4882a593Smuzhiyun  * This has the same semantics as (but is more brutal than) synchronize_rcu().
813*4882a593Smuzhiyun  */
synchronize_rcu_expedited(void)814*4882a593Smuzhiyun void synchronize_rcu_expedited(void)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	bool no_wq;
817*4882a593Smuzhiyun 	struct rcu_exp_work rew;
818*4882a593Smuzhiyun 	struct rcu_node *rnp;
819*4882a593Smuzhiyun 	unsigned long s;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
822*4882a593Smuzhiyun 			 lock_is_held(&rcu_lock_map) ||
823*4882a593Smuzhiyun 			 lock_is_held(&rcu_sched_lock_map),
824*4882a593Smuzhiyun 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/* Is the state is such that the call is a grace period? */
827*4882a593Smuzhiyun 	if (rcu_blocking_is_gp())
828*4882a593Smuzhiyun 		return;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* If expedited grace periods are prohibited, fall back to normal. */
831*4882a593Smuzhiyun 	if (rcu_gp_is_normal()) {
832*4882a593Smuzhiyun 		wait_rcu_gp(call_rcu);
833*4882a593Smuzhiyun 		return;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* Take a snapshot of the sequence number.  */
837*4882a593Smuzhiyun 	s = rcu_exp_gp_seq_snap();
838*4882a593Smuzhiyun 	if (exp_funnel_lock(s))
839*4882a593Smuzhiyun 		return;  /* Someone else did our work for us. */
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* Don't use workqueue during boot or from an incoming CPU. */
842*4882a593Smuzhiyun 	preempt_disable();
843*4882a593Smuzhiyun 	no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT ||
844*4882a593Smuzhiyun 		!cpumask_test_cpu(smp_processor_id(), cpu_active_mask);
845*4882a593Smuzhiyun 	preempt_enable();
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/* Ensure that load happens before action based on it. */
848*4882a593Smuzhiyun 	if (unlikely(no_wq)) {
849*4882a593Smuzhiyun 		/* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */
850*4882a593Smuzhiyun 		rcu_exp_sel_wait_wake(s);
851*4882a593Smuzhiyun 	} else {
852*4882a593Smuzhiyun 		/* Marshall arguments & schedule the expedited grace period. */
853*4882a593Smuzhiyun 		rew.rew_s = s;
854*4882a593Smuzhiyun 		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
855*4882a593Smuzhiyun 		queue_work(rcu_gp_wq, &rew.rew_work);
856*4882a593Smuzhiyun 	}
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/* Wait for expedited grace period to complete. */
859*4882a593Smuzhiyun 	rnp = rcu_get_root();
860*4882a593Smuzhiyun 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
861*4882a593Smuzhiyun 		   sync_exp_work_done(s));
862*4882a593Smuzhiyun 	smp_mb(); /* Workqueue actions happen before return. */
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/* Let the next expedited grace period start. */
865*4882a593Smuzhiyun 	mutex_unlock(&rcu_state.exp_mutex);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	if (likely(!no_wq))
868*4882a593Smuzhiyun 		destroy_work_on_stack(&rew.rew_work);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
871