xref: /OK3568_Linux_fs/kernel/kernel/sched/fair.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  */
23 #include "sched.h"
24 
25 #include <trace/hooks/sched.h>
26 
27 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_runtime);
28 
29 /*
30  * Targeted preemption latency for CPU-bound tasks:
31  *
32  * NOTE: this latency value is not the same as the concept of
33  * 'timeslice length' - timeslices in CFS are of variable length
34  * and have no persistent notion like in traditional, time-slice
35  * based scheduling concepts.
36  *
37  * (to see the precise effective timeslice length of your workload,
38  *  run vmstat and monitor the context-switches (cs) field)
39  *
40  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41  */
42 unsigned int sysctl_sched_latency			= 6000000ULL;
43 EXPORT_SYMBOL_GPL(sysctl_sched_latency);
44 static unsigned int normalized_sysctl_sched_latency	= 6000000ULL;
45 
46 /*
47  * The initial- and re-scaling of tunables is configurable
48  *
49  * Options are:
50  *
51  *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
52  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
53  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
54  *
55  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56  */
57 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
58 
59 /*
60  * Minimal preemption granularity for CPU-bound tasks:
61  *
62  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
63  */
64 unsigned int sysctl_sched_min_granularity			= 750000ULL;
65 EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity);
66 static unsigned int normalized_sysctl_sched_min_granularity	= 750000ULL;
67 
68 /*
69  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
70  */
71 static unsigned int sched_nr_latency = 8;
72 
73 /*
74  * After fork, child runs first. If set to 0 (default) then
75  * parent will (try to) run first.
76  */
77 unsigned int sysctl_sched_child_runs_first __read_mostly;
78 
79 /*
80  * SCHED_OTHER wake-up granularity.
81  *
82  * This option delays the preemption effects of decoupled workloads
83  * and reduces their over-scheduling. Synchronous workloads will still
84  * have immediate wakeup/sleep latencies.
85  *
86  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
87  */
88 unsigned int sysctl_sched_wakeup_granularity			= 1000000UL;
89 static unsigned int normalized_sysctl_sched_wakeup_granularity	= 1000000UL;
90 
91 const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
92 
93 int sched_thermal_decay_shift;
setup_sched_thermal_decay_shift(char * str)94 static int __init setup_sched_thermal_decay_shift(char *str)
95 {
96 	int _shift = 0;
97 
98 	if (kstrtoint(str, 0, &_shift))
99 		pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
100 
101 	sched_thermal_decay_shift = clamp(_shift, 0, 10);
102 	return 1;
103 }
104 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
105 
106 #ifdef CONFIG_SMP
107 /*
108  * For asym packing, by default the lower numbered CPU has higher priority.
109  */
arch_asym_cpu_priority(int cpu)110 int __weak arch_asym_cpu_priority(int cpu)
111 {
112 	return -cpu;
113 }
114 
115 /*
116  * The margin used when comparing utilization with CPU capacity.
117  *
118  * (default: ~20%)
119  */
120 #define fits_capacity(cap, max)	((cap) * 1280 < (max) * 1024)
121 
122 #endif
123 
124 #ifdef CONFIG_CFS_BANDWIDTH
125 /*
126  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
127  * each time a cfs_rq requests quota.
128  *
129  * Note: in the case that the slice exceeds the runtime remaining (either due
130  * to consumption or the quota being specified to be smaller than the slice)
131  * we will always only issue the remaining available time.
132  *
133  * (default: 5 msec, units: microseconds)
134  */
135 unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
136 #endif
137 
update_load_add(struct load_weight * lw,unsigned long inc)138 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
139 {
140 	lw->weight += inc;
141 	lw->inv_weight = 0;
142 }
143 
update_load_sub(struct load_weight * lw,unsigned long dec)144 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
145 {
146 	lw->weight -= dec;
147 	lw->inv_weight = 0;
148 }
149 
update_load_set(struct load_weight * lw,unsigned long w)150 static inline void update_load_set(struct load_weight *lw, unsigned long w)
151 {
152 	lw->weight = w;
153 	lw->inv_weight = 0;
154 }
155 
156 /*
157  * Increase the granularity value when there are more CPUs,
158  * because with more CPUs the 'effective latency' as visible
159  * to users decreases. But the relationship is not linear,
160  * so pick a second-best guess by going with the log2 of the
161  * number of CPUs.
162  *
163  * This idea comes from the SD scheduler of Con Kolivas:
164  */
get_update_sysctl_factor(void)165 static unsigned int get_update_sysctl_factor(void)
166 {
167 	unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
168 	unsigned int factor;
169 
170 	switch (sysctl_sched_tunable_scaling) {
171 	case SCHED_TUNABLESCALING_NONE:
172 		factor = 1;
173 		break;
174 	case SCHED_TUNABLESCALING_LINEAR:
175 		factor = cpus;
176 		break;
177 	case SCHED_TUNABLESCALING_LOG:
178 	default:
179 		factor = 1 + ilog2(cpus);
180 		break;
181 	}
182 
183 	return factor;
184 }
185 
update_sysctl(void)186 static void update_sysctl(void)
187 {
188 	unsigned int factor = get_update_sysctl_factor();
189 
190 #define SET_SYSCTL(name) \
191 	(sysctl_##name = (factor) * normalized_sysctl_##name)
192 	SET_SYSCTL(sched_min_granularity);
193 	SET_SYSCTL(sched_latency);
194 	SET_SYSCTL(sched_wakeup_granularity);
195 #undef SET_SYSCTL
196 }
197 
sched_init_granularity(void)198 void __init sched_init_granularity(void)
199 {
200 	update_sysctl();
201 }
202 
203 #define WMULT_CONST	(~0U)
204 #define WMULT_SHIFT	32
205 
__update_inv_weight(struct load_weight * lw)206 static void __update_inv_weight(struct load_weight *lw)
207 {
208 	unsigned long w;
209 
210 	if (likely(lw->inv_weight))
211 		return;
212 
213 	w = scale_load_down(lw->weight);
214 
215 	if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
216 		lw->inv_weight = 1;
217 	else if (unlikely(!w))
218 		lw->inv_weight = WMULT_CONST;
219 	else
220 		lw->inv_weight = WMULT_CONST / w;
221 }
222 
223 /*
224  * delta_exec * weight / lw.weight
225  *   OR
226  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
227  *
228  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
229  * we're guaranteed shift stays positive because inv_weight is guaranteed to
230  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
231  *
232  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
233  * weight/lw.weight <= 1, and therefore our shift will also be positive.
234  */
__calc_delta(u64 delta_exec,unsigned long weight,struct load_weight * lw)235 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
236 {
237 	u64 fact = scale_load_down(weight);
238 	int shift = WMULT_SHIFT;
239 
240 	__update_inv_weight(lw);
241 
242 	if (unlikely(fact >> 32)) {
243 		while (fact >> 32) {
244 			fact >>= 1;
245 			shift--;
246 		}
247 	}
248 
249 	fact = mul_u32_u32(fact, lw->inv_weight);
250 
251 	while (fact >> 32) {
252 		fact >>= 1;
253 		shift--;
254 	}
255 
256 	return mul_u64_u32_shr(delta_exec, fact, shift);
257 }
258 
259 
260 const struct sched_class fair_sched_class;
261 
262 /**************************************************************
263  * CFS operations on generic schedulable entities:
264  */
265 
266 #ifdef CONFIG_FAIR_GROUP_SCHED
task_of(struct sched_entity * se)267 static inline struct task_struct *task_of(struct sched_entity *se)
268 {
269 	SCHED_WARN_ON(!entity_is_task(se));
270 	return container_of(se, struct task_struct, se);
271 }
272 
273 /* Walk up scheduling entities hierarchy */
274 #define for_each_sched_entity(se) \
275 		for (; se; se = se->parent)
276 
task_cfs_rq(struct task_struct * p)277 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
278 {
279 	return p->se.cfs_rq;
280 }
281 
282 /* runqueue on which this entity is (to be) queued */
cfs_rq_of(struct sched_entity * se)283 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
284 {
285 	return se->cfs_rq;
286 }
287 
288 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)289 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
290 {
291 	return grp->my_q;
292 }
293 
cfs_rq_tg_path(struct cfs_rq * cfs_rq,char * path,int len)294 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
295 {
296 	if (!path)
297 		return;
298 
299 	if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))
300 		autogroup_path(cfs_rq->tg, path, len);
301 	else if (cfs_rq && cfs_rq->tg->css.cgroup)
302 		cgroup_path(cfs_rq->tg->css.cgroup, path, len);
303 	else
304 		strlcpy(path, "(null)", len);
305 }
306 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)307 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
308 {
309 	struct rq *rq = rq_of(cfs_rq);
310 	int cpu = cpu_of(rq);
311 
312 	if (cfs_rq->on_list)
313 		return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
314 
315 	cfs_rq->on_list = 1;
316 
317 	/*
318 	 * Ensure we either appear before our parent (if already
319 	 * enqueued) or force our parent to appear after us when it is
320 	 * enqueued. The fact that we always enqueue bottom-up
321 	 * reduces this to two cases and a special case for the root
322 	 * cfs_rq. Furthermore, it also means that we will always reset
323 	 * tmp_alone_branch either when the branch is connected
324 	 * to a tree or when we reach the top of the tree
325 	 */
326 	if (cfs_rq->tg->parent &&
327 	    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
328 		/*
329 		 * If parent is already on the list, we add the child
330 		 * just before. Thanks to circular linked property of
331 		 * the list, this means to put the child at the tail
332 		 * of the list that starts by parent.
333 		 */
334 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
335 			&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
336 		/*
337 		 * The branch is now connected to its tree so we can
338 		 * reset tmp_alone_branch to the beginning of the
339 		 * list.
340 		 */
341 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
342 		return true;
343 	}
344 
345 	if (!cfs_rq->tg->parent) {
346 		/*
347 		 * cfs rq without parent should be put
348 		 * at the tail of the list.
349 		 */
350 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
351 			&rq->leaf_cfs_rq_list);
352 		/*
353 		 * We have reach the top of a tree so we can reset
354 		 * tmp_alone_branch to the beginning of the list.
355 		 */
356 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
357 		return true;
358 	}
359 
360 	/*
361 	 * The parent has not already been added so we want to
362 	 * make sure that it will be put after us.
363 	 * tmp_alone_branch points to the begin of the branch
364 	 * where we will add parent.
365 	 */
366 	list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
367 	/*
368 	 * update tmp_alone_branch to points to the new begin
369 	 * of the branch
370 	 */
371 	rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
372 	return false;
373 }
374 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)375 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
376 {
377 	if (cfs_rq->on_list) {
378 		struct rq *rq = rq_of(cfs_rq);
379 
380 		/*
381 		 * With cfs_rq being unthrottled/throttled during an enqueue,
382 		 * it can happen the tmp_alone_branch points the a leaf that
383 		 * we finally want to del. In this case, tmp_alone_branch moves
384 		 * to the prev element but it will point to rq->leaf_cfs_rq_list
385 		 * at the end of the enqueue.
386 		 */
387 		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
388 			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
389 
390 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
391 		cfs_rq->on_list = 0;
392 	}
393 }
394 
assert_list_leaf_cfs_rq(struct rq * rq)395 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
396 {
397 	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
398 }
399 
400 /* Iterate thr' all leaf cfs_rq's on a runqueue */
401 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
402 	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
403 				 leaf_cfs_rq_list)
404 
405 /* Do the two (enqueued) entities belong to the same group ? */
406 static inline struct cfs_rq *
is_same_group(struct sched_entity * se,struct sched_entity * pse)407 is_same_group(struct sched_entity *se, struct sched_entity *pse)
408 {
409 	if (se->cfs_rq == pse->cfs_rq)
410 		return se->cfs_rq;
411 
412 	return NULL;
413 }
414 
parent_entity(struct sched_entity * se)415 static inline struct sched_entity *parent_entity(struct sched_entity *se)
416 {
417 	return se->parent;
418 }
419 
420 static void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)421 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
422 {
423 	int se_depth, pse_depth;
424 
425 	/*
426 	 * preemption test can be made between sibling entities who are in the
427 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
428 	 * both tasks until we find their ancestors who are siblings of common
429 	 * parent.
430 	 */
431 
432 	/* First walk up until both entities are at same depth */
433 	se_depth = (*se)->depth;
434 	pse_depth = (*pse)->depth;
435 
436 	while (se_depth > pse_depth) {
437 		se_depth--;
438 		*se = parent_entity(*se);
439 	}
440 
441 	while (pse_depth > se_depth) {
442 		pse_depth--;
443 		*pse = parent_entity(*pse);
444 	}
445 
446 	while (!is_same_group(*se, *pse)) {
447 		*se = parent_entity(*se);
448 		*pse = parent_entity(*pse);
449 	}
450 }
451 
452 #else	/* !CONFIG_FAIR_GROUP_SCHED */
453 
task_of(struct sched_entity * se)454 static inline struct task_struct *task_of(struct sched_entity *se)
455 {
456 	return container_of(se, struct task_struct, se);
457 }
458 
459 #define for_each_sched_entity(se) \
460 		for (; se; se = NULL)
461 
task_cfs_rq(struct task_struct * p)462 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
463 {
464 	return &task_rq(p)->cfs;
465 }
466 
cfs_rq_of(struct sched_entity * se)467 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
468 {
469 	struct task_struct *p = task_of(se);
470 	struct rq *rq = task_rq(p);
471 
472 	return &rq->cfs;
473 }
474 
475 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)476 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
477 {
478 	return NULL;
479 }
480 
cfs_rq_tg_path(struct cfs_rq * cfs_rq,char * path,int len)481 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
482 {
483 	if (path)
484 		strlcpy(path, "(null)", len);
485 }
486 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)487 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
488 {
489 	return true;
490 }
491 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)492 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
493 {
494 }
495 
assert_list_leaf_cfs_rq(struct rq * rq)496 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
497 {
498 }
499 
500 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
501 		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
502 
parent_entity(struct sched_entity * se)503 static inline struct sched_entity *parent_entity(struct sched_entity *se)
504 {
505 	return NULL;
506 }
507 
508 static inline void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)509 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
510 {
511 }
512 
513 #endif	/* CONFIG_FAIR_GROUP_SCHED */
514 
515 static __always_inline
516 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
517 
518 /**************************************************************
519  * Scheduling class tree data structure manipulation methods:
520  */
521 
max_vruntime(u64 max_vruntime,u64 vruntime)522 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
523 {
524 	s64 delta = (s64)(vruntime - max_vruntime);
525 	if (delta > 0)
526 		max_vruntime = vruntime;
527 
528 	return max_vruntime;
529 }
530 
min_vruntime(u64 min_vruntime,u64 vruntime)531 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
532 {
533 	s64 delta = (s64)(vruntime - min_vruntime);
534 	if (delta < 0)
535 		min_vruntime = vruntime;
536 
537 	return min_vruntime;
538 }
539 
entity_before(struct sched_entity * a,struct sched_entity * b)540 static inline int entity_before(struct sched_entity *a,
541 				struct sched_entity *b)
542 {
543 	return (s64)(a->vruntime - b->vruntime) < 0;
544 }
545 
update_min_vruntime(struct cfs_rq * cfs_rq)546 static void update_min_vruntime(struct cfs_rq *cfs_rq)
547 {
548 	struct sched_entity *curr = cfs_rq->curr;
549 	struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
550 
551 	u64 vruntime = cfs_rq->min_vruntime;
552 
553 	if (curr) {
554 		if (curr->on_rq)
555 			vruntime = curr->vruntime;
556 		else
557 			curr = NULL;
558 	}
559 
560 	if (leftmost) { /* non-empty tree */
561 		struct sched_entity *se;
562 		se = rb_entry(leftmost, struct sched_entity, run_node);
563 
564 		if (!curr)
565 			vruntime = se->vruntime;
566 		else
567 			vruntime = min_vruntime(vruntime, se->vruntime);
568 	}
569 
570 	/* ensure we never gain time by being placed backwards. */
571 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
572 #ifndef CONFIG_64BIT
573 	smp_wmb();
574 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
575 #endif
576 }
577 
578 /*
579  * Enqueue an entity into the rb-tree:
580  */
__enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)581 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
582 {
583 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
584 	struct rb_node *parent = NULL;
585 	struct sched_entity *entry;
586 	bool leftmost = true;
587 
588 	trace_android_rvh_enqueue_entity(cfs_rq, se);
589 	/*
590 	 * Find the right place in the rbtree:
591 	 */
592 	while (*link) {
593 		parent = *link;
594 		entry = rb_entry(parent, struct sched_entity, run_node);
595 		/*
596 		 * We dont care about collisions. Nodes with
597 		 * the same key stay together.
598 		 */
599 		if (entity_before(se, entry)) {
600 			link = &parent->rb_left;
601 		} else {
602 			link = &parent->rb_right;
603 			leftmost = false;
604 		}
605 	}
606 
607 	rb_link_node(&se->run_node, parent, link);
608 	rb_insert_color_cached(&se->run_node,
609 			       &cfs_rq->tasks_timeline, leftmost);
610 }
611 
__dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)612 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
613 {
614 	trace_android_rvh_dequeue_entity(cfs_rq, se);
615 	rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
616 }
617 
__pick_first_entity(struct cfs_rq * cfs_rq)618 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
619 {
620 	struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
621 
622 	if (!left)
623 		return NULL;
624 
625 	return rb_entry(left, struct sched_entity, run_node);
626 }
627 
__pick_next_entity(struct sched_entity * se)628 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
629 {
630 	struct rb_node *next = rb_next(&se->run_node);
631 
632 	if (!next)
633 		return NULL;
634 
635 	return rb_entry(next, struct sched_entity, run_node);
636 }
637 
638 #ifdef CONFIG_SCHED_DEBUG
__pick_last_entity(struct cfs_rq * cfs_rq)639 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
640 {
641 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
642 
643 	if (!last)
644 		return NULL;
645 
646 	return rb_entry(last, struct sched_entity, run_node);
647 }
648 
649 /**************************************************************
650  * Scheduling class statistics methods:
651  */
652 
sched_proc_update_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)653 int sched_proc_update_handler(struct ctl_table *table, int write,
654 		void *buffer, size_t *lenp, loff_t *ppos)
655 {
656 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
657 	unsigned int factor = get_update_sysctl_factor();
658 
659 	if (ret || !write)
660 		return ret;
661 
662 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
663 					sysctl_sched_min_granularity);
664 
665 #define WRT_SYSCTL(name) \
666 	(normalized_sysctl_##name = sysctl_##name / (factor))
667 	WRT_SYSCTL(sched_min_granularity);
668 	WRT_SYSCTL(sched_latency);
669 	WRT_SYSCTL(sched_wakeup_granularity);
670 #undef WRT_SYSCTL
671 
672 	return 0;
673 }
674 #endif
675 
676 /*
677  * delta /= w
678  */
calc_delta_fair(u64 delta,struct sched_entity * se)679 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
680 {
681 	if (unlikely(se->load.weight != NICE_0_LOAD))
682 		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
683 
684 	return delta;
685 }
686 
687 /*
688  * The idea is to set a period in which each task runs once.
689  *
690  * When there are too many tasks (sched_nr_latency) we have to stretch
691  * this period because otherwise the slices get too small.
692  *
693  * p = (nr <= nl) ? l : l*nr/nl
694  */
__sched_period(unsigned long nr_running)695 static u64 __sched_period(unsigned long nr_running)
696 {
697 	if (unlikely(nr_running > sched_nr_latency))
698 		return nr_running * sysctl_sched_min_granularity;
699 	else
700 		return sysctl_sched_latency;
701 }
702 
703 /*
704  * We calculate the wall-time slice from the period by taking a part
705  * proportional to the weight.
706  *
707  * s = p*P[w/rw]
708  */
sched_slice(struct cfs_rq * cfs_rq,struct sched_entity * se)709 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
710 {
711 	unsigned int nr_running = cfs_rq->nr_running;
712 	u64 slice;
713 
714 	if (sched_feat(ALT_PERIOD))
715 		nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
716 
717 	slice = __sched_period(nr_running + !se->on_rq);
718 
719 	for_each_sched_entity(se) {
720 		struct load_weight *load;
721 		struct load_weight lw;
722 
723 		cfs_rq = cfs_rq_of(se);
724 		load = &cfs_rq->load;
725 
726 		if (unlikely(!se->on_rq)) {
727 			lw = cfs_rq->load;
728 
729 			update_load_add(&lw, se->load.weight);
730 			load = &lw;
731 		}
732 		slice = __calc_delta(slice, se->load.weight, load);
733 	}
734 
735 	if (sched_feat(BASE_SLICE))
736 		slice = max(slice, (u64)sysctl_sched_min_granularity);
737 
738 	return slice;
739 }
740 
741 /*
742  * We calculate the vruntime slice of a to-be-inserted task.
743  *
744  * vs = s/w
745  */
sched_vslice(struct cfs_rq * cfs_rq,struct sched_entity * se)746 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
747 {
748 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
749 }
750 
751 #include "pelt.h"
752 #ifdef CONFIG_SMP
753 
754 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
755 static unsigned long task_h_load(struct task_struct *p);
756 static unsigned long capacity_of(int cpu);
757 
758 /* Give new sched_entity start runnable values to heavy its load in infant time */
init_entity_runnable_average(struct sched_entity * se)759 void init_entity_runnable_average(struct sched_entity *se)
760 {
761 	struct sched_avg *sa = &se->avg;
762 
763 	memset(sa, 0, sizeof(*sa));
764 
765 	/*
766 	 * Tasks are initialized with full load to be seen as heavy tasks until
767 	 * they get a chance to stabilize to their real load level.
768 	 * Group entities are initialized with zero load to reflect the fact that
769 	 * nothing has been attached to the task group yet.
770 	 */
771 	if (entity_is_task(se))
772 		sa->load_avg = scale_load_down(se->load.weight);
773 
774 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
775 }
776 
777 static void attach_entity_cfs_rq(struct sched_entity *se);
778 
779 /*
780  * With new tasks being created, their initial util_avgs are extrapolated
781  * based on the cfs_rq's current util_avg:
782  *
783  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
784  *
785  * However, in many cases, the above util_avg does not give a desired
786  * value. Moreover, the sum of the util_avgs may be divergent, such
787  * as when the series is a harmonic series.
788  *
789  * To solve this problem, we also cap the util_avg of successive tasks to
790  * only 1/2 of the left utilization budget:
791  *
792  *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
793  *
794  * where n denotes the nth task and cpu_scale the CPU capacity.
795  *
796  * For example, for a CPU with 1024 of capacity, a simplest series from
797  * the beginning would be like:
798  *
799  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
800  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
801  *
802  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
803  * if util_avg > util_avg_cap.
804  */
post_init_entity_util_avg(struct task_struct * p)805 void post_init_entity_util_avg(struct task_struct *p)
806 {
807 	struct sched_entity *se = &p->se;
808 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
809 	struct sched_avg *sa = &se->avg;
810 	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
811 	long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
812 
813 	if (cap > 0) {
814 		if (cfs_rq->avg.util_avg != 0) {
815 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
816 			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
817 
818 			if (sa->util_avg > cap)
819 				sa->util_avg = cap;
820 		} else {
821 			sa->util_avg = cap;
822 		}
823 	}
824 
825 	sa->runnable_avg = sa->util_avg;
826 
827 	if (p->sched_class != &fair_sched_class) {
828 		/*
829 		 * For !fair tasks do:
830 		 *
831 		update_cfs_rq_load_avg(now, cfs_rq);
832 		attach_entity_load_avg(cfs_rq, se);
833 		switched_from_fair(rq, p);
834 		 *
835 		 * such that the next switched_to_fair() has the
836 		 * expected state.
837 		 */
838 		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
839 		return;
840 	}
841 
842 	/* Hook before this se's util is attached to cfs_rq's util */
843 	trace_android_rvh_post_init_entity_util_avg(se);
844 	attach_entity_cfs_rq(se);
845 }
846 
847 #else /* !CONFIG_SMP */
init_entity_runnable_average(struct sched_entity * se)848 void init_entity_runnable_average(struct sched_entity *se)
849 {
850 }
post_init_entity_util_avg(struct task_struct * p)851 void post_init_entity_util_avg(struct task_struct *p)
852 {
853 }
update_tg_load_avg(struct cfs_rq * cfs_rq)854 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
855 {
856 }
857 #endif /* CONFIG_SMP */
858 
859 /*
860  * Update the current task's runtime statistics.
861  */
update_curr(struct cfs_rq * cfs_rq)862 static void update_curr(struct cfs_rq *cfs_rq)
863 {
864 	struct sched_entity *curr = cfs_rq->curr;
865 	u64 now = rq_clock_task(rq_of(cfs_rq));
866 	u64 delta_exec;
867 
868 	if (unlikely(!curr))
869 		return;
870 
871 	delta_exec = now - curr->exec_start;
872 	if (unlikely((s64)delta_exec <= 0))
873 		return;
874 
875 	curr->exec_start = now;
876 
877 	schedstat_set(curr->statistics.exec_max,
878 		      max(delta_exec, curr->statistics.exec_max));
879 
880 	curr->sum_exec_runtime += delta_exec;
881 	schedstat_add(cfs_rq->exec_clock, delta_exec);
882 
883 	curr->vruntime += calc_delta_fair(delta_exec, curr);
884 	update_min_vruntime(cfs_rq);
885 
886 	if (entity_is_task(curr)) {
887 		struct task_struct *curtask = task_of(curr);
888 
889 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
890 		cgroup_account_cputime(curtask, delta_exec);
891 		account_group_exec_runtime(curtask, delta_exec);
892 	}
893 
894 	account_cfs_rq_runtime(cfs_rq, delta_exec);
895 }
896 
update_curr_fair(struct rq * rq)897 static void update_curr_fair(struct rq *rq)
898 {
899 	update_curr(cfs_rq_of(&rq->curr->se));
900 }
901 
902 static inline void
update_stats_wait_start(struct cfs_rq * cfs_rq,struct sched_entity * se)903 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
904 {
905 	u64 wait_start, prev_wait_start;
906 
907 	if (!schedstat_enabled())
908 		return;
909 
910 	wait_start = rq_clock(rq_of(cfs_rq));
911 	prev_wait_start = schedstat_val(se->statistics.wait_start);
912 
913 	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
914 	    likely(wait_start > prev_wait_start))
915 		wait_start -= prev_wait_start;
916 
917 	__schedstat_set(se->statistics.wait_start, wait_start);
918 }
919 
920 static inline void
update_stats_wait_end(struct cfs_rq * cfs_rq,struct sched_entity * se)921 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
922 {
923 	struct task_struct *p;
924 	u64 delta;
925 
926 	if (!schedstat_enabled())
927 		return;
928 
929 	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
930 
931 	if (entity_is_task(se)) {
932 		p = task_of(se);
933 		if (task_on_rq_migrating(p)) {
934 			/*
935 			 * Preserve migrating task's wait time so wait_start
936 			 * time stamp can be adjusted to accumulate wait time
937 			 * prior to migration.
938 			 */
939 			__schedstat_set(se->statistics.wait_start, delta);
940 			return;
941 		}
942 		trace_sched_stat_wait(p, delta);
943 	}
944 
945 	__schedstat_set(se->statistics.wait_max,
946 		      max(schedstat_val(se->statistics.wait_max), delta));
947 	__schedstat_inc(se->statistics.wait_count);
948 	__schedstat_add(se->statistics.wait_sum, delta);
949 	__schedstat_set(se->statistics.wait_start, 0);
950 }
951 
952 static inline void
update_stats_enqueue_sleeper(struct cfs_rq * cfs_rq,struct sched_entity * se)953 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
954 {
955 	struct task_struct *tsk = NULL;
956 	u64 sleep_start, block_start;
957 
958 	if (!schedstat_enabled())
959 		return;
960 
961 	sleep_start = schedstat_val(se->statistics.sleep_start);
962 	block_start = schedstat_val(se->statistics.block_start);
963 
964 	if (entity_is_task(se))
965 		tsk = task_of(se);
966 
967 	if (sleep_start) {
968 		u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
969 
970 		if ((s64)delta < 0)
971 			delta = 0;
972 
973 		if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
974 			__schedstat_set(se->statistics.sleep_max, delta);
975 
976 		__schedstat_set(se->statistics.sleep_start, 0);
977 		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
978 
979 		if (tsk) {
980 			account_scheduler_latency(tsk, delta >> 10, 1);
981 			trace_sched_stat_sleep(tsk, delta);
982 		}
983 	}
984 	if (block_start) {
985 		u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
986 
987 		if ((s64)delta < 0)
988 			delta = 0;
989 
990 		if (unlikely(delta > schedstat_val(se->statistics.block_max)))
991 			__schedstat_set(se->statistics.block_max, delta);
992 
993 		__schedstat_set(se->statistics.block_start, 0);
994 		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
995 
996 		if (tsk) {
997 			if (tsk->in_iowait) {
998 				__schedstat_add(se->statistics.iowait_sum, delta);
999 				__schedstat_inc(se->statistics.iowait_count);
1000 				trace_sched_stat_iowait(tsk, delta);
1001 			}
1002 
1003 			trace_sched_stat_blocked(tsk, delta);
1004 
1005 			/*
1006 			 * Blocking time is in units of nanosecs, so shift by
1007 			 * 20 to get a milliseconds-range estimation of the
1008 			 * amount of time that the task spent sleeping:
1009 			 */
1010 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1011 				profile_hits(SLEEP_PROFILING,
1012 						(void *)get_wchan(tsk),
1013 						delta >> 20);
1014 			}
1015 			account_scheduler_latency(tsk, delta >> 10, 0);
1016 		}
1017 	}
1018 }
1019 
1020 /*
1021  * Task is being enqueued - update stats:
1022  */
1023 static inline void
update_stats_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1024 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1025 {
1026 	if (!schedstat_enabled())
1027 		return;
1028 
1029 	/*
1030 	 * Are we enqueueing a waiting task? (for current tasks
1031 	 * a dequeue/enqueue event is a NOP)
1032 	 */
1033 	if (se != cfs_rq->curr)
1034 		update_stats_wait_start(cfs_rq, se);
1035 
1036 	if (flags & ENQUEUE_WAKEUP)
1037 		update_stats_enqueue_sleeper(cfs_rq, se);
1038 }
1039 
1040 static inline void
update_stats_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1041 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1042 {
1043 
1044 	if (!schedstat_enabled())
1045 		return;
1046 
1047 	/*
1048 	 * Mark the end of the wait period if dequeueing a
1049 	 * waiting task:
1050 	 */
1051 	if (se != cfs_rq->curr)
1052 		update_stats_wait_end(cfs_rq, se);
1053 
1054 	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1055 		struct task_struct *tsk = task_of(se);
1056 
1057 		if (tsk->state & TASK_INTERRUPTIBLE)
1058 			__schedstat_set(se->statistics.sleep_start,
1059 				      rq_clock(rq_of(cfs_rq)));
1060 		if (tsk->state & TASK_UNINTERRUPTIBLE)
1061 			__schedstat_set(se->statistics.block_start,
1062 				      rq_clock(rq_of(cfs_rq)));
1063 	}
1064 }
1065 
1066 /*
1067  * We are picking a new current task - update its stats:
1068  */
1069 static inline void
update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se)1070 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1071 {
1072 	/*
1073 	 * We are starting a new run period:
1074 	 */
1075 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
1076 }
1077 
1078 /**************************************************
1079  * Scheduling class queueing methods:
1080  */
1081 
1082 #ifdef CONFIG_NUMA_BALANCING
1083 /*
1084  * Approximate time to scan a full NUMA task in ms. The task scan period is
1085  * calculated based on the tasks virtual memory size and
1086  * numa_balancing_scan_size.
1087  */
1088 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1089 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1090 
1091 /* Portion of address space to scan in MB */
1092 unsigned int sysctl_numa_balancing_scan_size = 256;
1093 
1094 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1095 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1096 
1097 struct numa_group {
1098 	refcount_t refcount;
1099 
1100 	spinlock_t lock; /* nr_tasks, tasks */
1101 	int nr_tasks;
1102 	pid_t gid;
1103 	int active_nodes;
1104 
1105 	struct rcu_head rcu;
1106 	unsigned long total_faults;
1107 	unsigned long max_faults_cpu;
1108 	/*
1109 	 * Faults_cpu is used to decide whether memory should move
1110 	 * towards the CPU. As a consequence, these stats are weighted
1111 	 * more by CPU use than by memory faults.
1112 	 */
1113 	unsigned long *faults_cpu;
1114 	unsigned long faults[];
1115 };
1116 
1117 /*
1118  * For functions that can be called in multiple contexts that permit reading
1119  * ->numa_group (see struct task_struct for locking rules).
1120  */
deref_task_numa_group(struct task_struct * p)1121 static struct numa_group *deref_task_numa_group(struct task_struct *p)
1122 {
1123 	return rcu_dereference_check(p->numa_group, p == current ||
1124 		(lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
1125 }
1126 
deref_curr_numa_group(struct task_struct * p)1127 static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1128 {
1129 	return rcu_dereference_protected(p->numa_group, p == current);
1130 }
1131 
1132 static inline unsigned long group_faults_priv(struct numa_group *ng);
1133 static inline unsigned long group_faults_shared(struct numa_group *ng);
1134 
task_nr_scan_windows(struct task_struct * p)1135 static unsigned int task_nr_scan_windows(struct task_struct *p)
1136 {
1137 	unsigned long rss = 0;
1138 	unsigned long nr_scan_pages;
1139 
1140 	/*
1141 	 * Calculations based on RSS as non-present and empty pages are skipped
1142 	 * by the PTE scanner and NUMA hinting faults should be trapped based
1143 	 * on resident pages
1144 	 */
1145 	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1146 	rss = get_mm_rss(p->mm);
1147 	if (!rss)
1148 		rss = nr_scan_pages;
1149 
1150 	rss = round_up(rss, nr_scan_pages);
1151 	return rss / nr_scan_pages;
1152 }
1153 
1154 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1155 #define MAX_SCAN_WINDOW 2560
1156 
task_scan_min(struct task_struct * p)1157 static unsigned int task_scan_min(struct task_struct *p)
1158 {
1159 	unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1160 	unsigned int scan, floor;
1161 	unsigned int windows = 1;
1162 
1163 	if (scan_size < MAX_SCAN_WINDOW)
1164 		windows = MAX_SCAN_WINDOW / scan_size;
1165 	floor = 1000 / windows;
1166 
1167 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1168 	return max_t(unsigned int, floor, scan);
1169 }
1170 
task_scan_start(struct task_struct * p)1171 static unsigned int task_scan_start(struct task_struct *p)
1172 {
1173 	unsigned long smin = task_scan_min(p);
1174 	unsigned long period = smin;
1175 	struct numa_group *ng;
1176 
1177 	/* Scale the maximum scan period with the amount of shared memory. */
1178 	rcu_read_lock();
1179 	ng = rcu_dereference(p->numa_group);
1180 	if (ng) {
1181 		unsigned long shared = group_faults_shared(ng);
1182 		unsigned long private = group_faults_priv(ng);
1183 
1184 		period *= refcount_read(&ng->refcount);
1185 		period *= shared + 1;
1186 		period /= private + shared + 1;
1187 	}
1188 	rcu_read_unlock();
1189 
1190 	return max(smin, period);
1191 }
1192 
task_scan_max(struct task_struct * p)1193 static unsigned int task_scan_max(struct task_struct *p)
1194 {
1195 	unsigned long smin = task_scan_min(p);
1196 	unsigned long smax;
1197 	struct numa_group *ng;
1198 
1199 	/* Watch for min being lower than max due to floor calculations */
1200 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1201 
1202 	/* Scale the maximum scan period with the amount of shared memory. */
1203 	ng = deref_curr_numa_group(p);
1204 	if (ng) {
1205 		unsigned long shared = group_faults_shared(ng);
1206 		unsigned long private = group_faults_priv(ng);
1207 		unsigned long period = smax;
1208 
1209 		period *= refcount_read(&ng->refcount);
1210 		period *= shared + 1;
1211 		period /= private + shared + 1;
1212 
1213 		smax = max(smax, period);
1214 	}
1215 
1216 	return max(smin, smax);
1217 }
1218 
account_numa_enqueue(struct rq * rq,struct task_struct * p)1219 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1220 {
1221 	rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1222 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1223 }
1224 
account_numa_dequeue(struct rq * rq,struct task_struct * p)1225 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1226 {
1227 	rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1228 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1229 }
1230 
1231 /* Shared or private faults. */
1232 #define NR_NUMA_HINT_FAULT_TYPES 2
1233 
1234 /* Memory and CPU locality */
1235 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1236 
1237 /* Averaged statistics, and temporary buffers. */
1238 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1239 
task_numa_group_id(struct task_struct * p)1240 pid_t task_numa_group_id(struct task_struct *p)
1241 {
1242 	struct numa_group *ng;
1243 	pid_t gid = 0;
1244 
1245 	rcu_read_lock();
1246 	ng = rcu_dereference(p->numa_group);
1247 	if (ng)
1248 		gid = ng->gid;
1249 	rcu_read_unlock();
1250 
1251 	return gid;
1252 }
1253 
1254 /*
1255  * The averaged statistics, shared & private, memory & CPU,
1256  * occupy the first half of the array. The second half of the
1257  * array is for current counters, which are averaged into the
1258  * first set by task_numa_placement.
1259  */
task_faults_idx(enum numa_faults_stats s,int nid,int priv)1260 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1261 {
1262 	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1263 }
1264 
task_faults(struct task_struct * p,int nid)1265 static inline unsigned long task_faults(struct task_struct *p, int nid)
1266 {
1267 	if (!p->numa_faults)
1268 		return 0;
1269 
1270 	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1271 		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1272 }
1273 
group_faults(struct task_struct * p,int nid)1274 static inline unsigned long group_faults(struct task_struct *p, int nid)
1275 {
1276 	struct numa_group *ng = deref_task_numa_group(p);
1277 
1278 	if (!ng)
1279 		return 0;
1280 
1281 	return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1282 		ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1283 }
1284 
group_faults_cpu(struct numa_group * group,int nid)1285 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1286 {
1287 	return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1288 		group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1289 }
1290 
group_faults_priv(struct numa_group * ng)1291 static inline unsigned long group_faults_priv(struct numa_group *ng)
1292 {
1293 	unsigned long faults = 0;
1294 	int node;
1295 
1296 	for_each_online_node(node) {
1297 		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1298 	}
1299 
1300 	return faults;
1301 }
1302 
group_faults_shared(struct numa_group * ng)1303 static inline unsigned long group_faults_shared(struct numa_group *ng)
1304 {
1305 	unsigned long faults = 0;
1306 	int node;
1307 
1308 	for_each_online_node(node) {
1309 		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1310 	}
1311 
1312 	return faults;
1313 }
1314 
1315 /*
1316  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1317  * considered part of a numa group's pseudo-interleaving set. Migrations
1318  * between these nodes are slowed down, to allow things to settle down.
1319  */
1320 #define ACTIVE_NODE_FRACTION 3
1321 
numa_is_active_node(int nid,struct numa_group * ng)1322 static bool numa_is_active_node(int nid, struct numa_group *ng)
1323 {
1324 	return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1325 }
1326 
1327 /* Handle placement on systems where not all nodes are directly connected. */
score_nearby_nodes(struct task_struct * p,int nid,int maxdist,bool task)1328 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1329 					int maxdist, bool task)
1330 {
1331 	unsigned long score = 0;
1332 	int node;
1333 
1334 	/*
1335 	 * All nodes are directly connected, and the same distance
1336 	 * from each other. No need for fancy placement algorithms.
1337 	 */
1338 	if (sched_numa_topology_type == NUMA_DIRECT)
1339 		return 0;
1340 
1341 	/*
1342 	 * This code is called for each node, introducing N^2 complexity,
1343 	 * which should be ok given the number of nodes rarely exceeds 8.
1344 	 */
1345 	for_each_online_node(node) {
1346 		unsigned long faults;
1347 		int dist = node_distance(nid, node);
1348 
1349 		/*
1350 		 * The furthest away nodes in the system are not interesting
1351 		 * for placement; nid was already counted.
1352 		 */
1353 		if (dist == sched_max_numa_distance || node == nid)
1354 			continue;
1355 
1356 		/*
1357 		 * On systems with a backplane NUMA topology, compare groups
1358 		 * of nodes, and move tasks towards the group with the most
1359 		 * memory accesses. When comparing two nodes at distance
1360 		 * "hoplimit", only nodes closer by than "hoplimit" are part
1361 		 * of each group. Skip other nodes.
1362 		 */
1363 		if (sched_numa_topology_type == NUMA_BACKPLANE &&
1364 					dist >= maxdist)
1365 			continue;
1366 
1367 		/* Add up the faults from nearby nodes. */
1368 		if (task)
1369 			faults = task_faults(p, node);
1370 		else
1371 			faults = group_faults(p, node);
1372 
1373 		/*
1374 		 * On systems with a glueless mesh NUMA topology, there are
1375 		 * no fixed "groups of nodes". Instead, nodes that are not
1376 		 * directly connected bounce traffic through intermediate
1377 		 * nodes; a numa_group can occupy any set of nodes.
1378 		 * The further away a node is, the less the faults count.
1379 		 * This seems to result in good task placement.
1380 		 */
1381 		if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1382 			faults *= (sched_max_numa_distance - dist);
1383 			faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1384 		}
1385 
1386 		score += faults;
1387 	}
1388 
1389 	return score;
1390 }
1391 
1392 /*
1393  * These return the fraction of accesses done by a particular task, or
1394  * task group, on a particular numa node.  The group weight is given a
1395  * larger multiplier, in order to group tasks together that are almost
1396  * evenly spread out between numa nodes.
1397  */
task_weight(struct task_struct * p,int nid,int dist)1398 static inline unsigned long task_weight(struct task_struct *p, int nid,
1399 					int dist)
1400 {
1401 	unsigned long faults, total_faults;
1402 
1403 	if (!p->numa_faults)
1404 		return 0;
1405 
1406 	total_faults = p->total_numa_faults;
1407 
1408 	if (!total_faults)
1409 		return 0;
1410 
1411 	faults = task_faults(p, nid);
1412 	faults += score_nearby_nodes(p, nid, dist, true);
1413 
1414 	return 1000 * faults / total_faults;
1415 }
1416 
group_weight(struct task_struct * p,int nid,int dist)1417 static inline unsigned long group_weight(struct task_struct *p, int nid,
1418 					 int dist)
1419 {
1420 	struct numa_group *ng = deref_task_numa_group(p);
1421 	unsigned long faults, total_faults;
1422 
1423 	if (!ng)
1424 		return 0;
1425 
1426 	total_faults = ng->total_faults;
1427 
1428 	if (!total_faults)
1429 		return 0;
1430 
1431 	faults = group_faults(p, nid);
1432 	faults += score_nearby_nodes(p, nid, dist, false);
1433 
1434 	return 1000 * faults / total_faults;
1435 }
1436 
should_numa_migrate_memory(struct task_struct * p,struct page * page,int src_nid,int dst_cpu)1437 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1438 				int src_nid, int dst_cpu)
1439 {
1440 	struct numa_group *ng = deref_curr_numa_group(p);
1441 	int dst_nid = cpu_to_node(dst_cpu);
1442 	int last_cpupid, this_cpupid;
1443 
1444 	this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1445 	last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1446 
1447 	/*
1448 	 * Allow first faults or private faults to migrate immediately early in
1449 	 * the lifetime of a task. The magic number 4 is based on waiting for
1450 	 * two full passes of the "multi-stage node selection" test that is
1451 	 * executed below.
1452 	 */
1453 	if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
1454 	    (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1455 		return true;
1456 
1457 	/*
1458 	 * Multi-stage node selection is used in conjunction with a periodic
1459 	 * migration fault to build a temporal task<->page relation. By using
1460 	 * a two-stage filter we remove short/unlikely relations.
1461 	 *
1462 	 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1463 	 * a task's usage of a particular page (n_p) per total usage of this
1464 	 * page (n_t) (in a given time-span) to a probability.
1465 	 *
1466 	 * Our periodic faults will sample this probability and getting the
1467 	 * same result twice in a row, given these samples are fully
1468 	 * independent, is then given by P(n)^2, provided our sample period
1469 	 * is sufficiently short compared to the usage pattern.
1470 	 *
1471 	 * This quadric squishes small probabilities, making it less likely we
1472 	 * act on an unlikely task<->page relation.
1473 	 */
1474 	if (!cpupid_pid_unset(last_cpupid) &&
1475 				cpupid_to_nid(last_cpupid) != dst_nid)
1476 		return false;
1477 
1478 	/* Always allow migrate on private faults */
1479 	if (cpupid_match_pid(p, last_cpupid))
1480 		return true;
1481 
1482 	/* A shared fault, but p->numa_group has not been set up yet. */
1483 	if (!ng)
1484 		return true;
1485 
1486 	/*
1487 	 * Destination node is much more heavily used than the source
1488 	 * node? Allow migration.
1489 	 */
1490 	if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1491 					ACTIVE_NODE_FRACTION)
1492 		return true;
1493 
1494 	/*
1495 	 * Distribute memory according to CPU & memory use on each node,
1496 	 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1497 	 *
1498 	 * faults_cpu(dst)   3   faults_cpu(src)
1499 	 * --------------- * - > ---------------
1500 	 * faults_mem(dst)   4   faults_mem(src)
1501 	 */
1502 	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1503 	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1504 }
1505 
1506 /*
1507  * 'numa_type' describes the node at the moment of load balancing.
1508  */
1509 enum numa_type {
1510 	/* The node has spare capacity that can be used to run more tasks.  */
1511 	node_has_spare = 0,
1512 	/*
1513 	 * The node is fully used and the tasks don't compete for more CPU
1514 	 * cycles. Nevertheless, some tasks might wait before running.
1515 	 */
1516 	node_fully_busy,
1517 	/*
1518 	 * The node is overloaded and can't provide expected CPU cycles to all
1519 	 * tasks.
1520 	 */
1521 	node_overloaded
1522 };
1523 
1524 /* Cached statistics for all CPUs within a node */
1525 struct numa_stats {
1526 	unsigned long load;
1527 	unsigned long runnable;
1528 	unsigned long util;
1529 	/* Total compute capacity of CPUs on a node */
1530 	unsigned long compute_capacity;
1531 	unsigned int nr_running;
1532 	unsigned int weight;
1533 	enum numa_type node_type;
1534 	int idle_cpu;
1535 };
1536 
is_core_idle(int cpu)1537 static inline bool is_core_idle(int cpu)
1538 {
1539 #ifdef CONFIG_SCHED_SMT
1540 	int sibling;
1541 
1542 	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
1543 		if (cpu == sibling)
1544 			continue;
1545 
1546 		if (!idle_cpu(sibling))
1547 			return false;
1548 	}
1549 #endif
1550 
1551 	return true;
1552 }
1553 
1554 struct task_numa_env {
1555 	struct task_struct *p;
1556 
1557 	int src_cpu, src_nid;
1558 	int dst_cpu, dst_nid;
1559 
1560 	struct numa_stats src_stats, dst_stats;
1561 
1562 	int imbalance_pct;
1563 	int dist;
1564 
1565 	struct task_struct *best_task;
1566 	long best_imp;
1567 	int best_cpu;
1568 };
1569 
1570 static unsigned long cpu_load(struct rq *rq);
1571 static unsigned long cpu_runnable(struct rq *rq);
1572 static unsigned long cpu_util(int cpu);
1573 static inline long adjust_numa_imbalance(int imbalance, int nr_running);
1574 
1575 static inline enum
numa_classify(unsigned int imbalance_pct,struct numa_stats * ns)1576 numa_type numa_classify(unsigned int imbalance_pct,
1577 			 struct numa_stats *ns)
1578 {
1579 	if ((ns->nr_running > ns->weight) &&
1580 	    (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
1581 	     ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
1582 		return node_overloaded;
1583 
1584 	if ((ns->nr_running < ns->weight) ||
1585 	    (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
1586 	     ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
1587 		return node_has_spare;
1588 
1589 	return node_fully_busy;
1590 }
1591 
1592 #ifdef CONFIG_SCHED_SMT
1593 /* Forward declarations of select_idle_sibling helpers */
1594 static inline bool test_idle_cores(int cpu, bool def);
numa_idle_core(int idle_core,int cpu)1595 static inline int numa_idle_core(int idle_core, int cpu)
1596 {
1597 	if (!static_branch_likely(&sched_smt_present) ||
1598 	    idle_core >= 0 || !test_idle_cores(cpu, false))
1599 		return idle_core;
1600 
1601 	/*
1602 	 * Prefer cores instead of packing HT siblings
1603 	 * and triggering future load balancing.
1604 	 */
1605 	if (is_core_idle(cpu))
1606 		idle_core = cpu;
1607 
1608 	return idle_core;
1609 }
1610 #else
numa_idle_core(int idle_core,int cpu)1611 static inline int numa_idle_core(int idle_core, int cpu)
1612 {
1613 	return idle_core;
1614 }
1615 #endif
1616 
1617 /*
1618  * Gather all necessary information to make NUMA balancing placement
1619  * decisions that are compatible with standard load balancer. This
1620  * borrows code and logic from update_sg_lb_stats but sharing a
1621  * common implementation is impractical.
1622  */
update_numa_stats(struct task_numa_env * env,struct numa_stats * ns,int nid,bool find_idle)1623 static void update_numa_stats(struct task_numa_env *env,
1624 			      struct numa_stats *ns, int nid,
1625 			      bool find_idle)
1626 {
1627 	int cpu, idle_core = -1;
1628 
1629 	memset(ns, 0, sizeof(*ns));
1630 	ns->idle_cpu = -1;
1631 
1632 	rcu_read_lock();
1633 	for_each_cpu(cpu, cpumask_of_node(nid)) {
1634 		struct rq *rq = cpu_rq(cpu);
1635 
1636 		ns->load += cpu_load(rq);
1637 		ns->runnable += cpu_runnable(rq);
1638 		ns->util += cpu_util(cpu);
1639 		ns->nr_running += rq->cfs.h_nr_running;
1640 		ns->compute_capacity += capacity_of(cpu);
1641 
1642 		if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
1643 			if (READ_ONCE(rq->numa_migrate_on) ||
1644 			    !cpumask_test_cpu(cpu, env->p->cpus_ptr))
1645 				continue;
1646 
1647 			if (ns->idle_cpu == -1)
1648 				ns->idle_cpu = cpu;
1649 
1650 			idle_core = numa_idle_core(idle_core, cpu);
1651 		}
1652 	}
1653 	rcu_read_unlock();
1654 
1655 	ns->weight = cpumask_weight(cpumask_of_node(nid));
1656 
1657 	ns->node_type = numa_classify(env->imbalance_pct, ns);
1658 
1659 	if (idle_core >= 0)
1660 		ns->idle_cpu = idle_core;
1661 }
1662 
task_numa_assign(struct task_numa_env * env,struct task_struct * p,long imp)1663 static void task_numa_assign(struct task_numa_env *env,
1664 			     struct task_struct *p, long imp)
1665 {
1666 	struct rq *rq = cpu_rq(env->dst_cpu);
1667 
1668 	/* Check if run-queue part of active NUMA balance. */
1669 	if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
1670 		int cpu;
1671 		int start = env->dst_cpu;
1672 
1673 		/* Find alternative idle CPU. */
1674 		for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
1675 			if (cpu == env->best_cpu || !idle_cpu(cpu) ||
1676 			    !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1677 				continue;
1678 			}
1679 
1680 			env->dst_cpu = cpu;
1681 			rq = cpu_rq(env->dst_cpu);
1682 			if (!xchg(&rq->numa_migrate_on, 1))
1683 				goto assign;
1684 		}
1685 
1686 		/* Failed to find an alternative idle CPU */
1687 		return;
1688 	}
1689 
1690 assign:
1691 	/*
1692 	 * Clear previous best_cpu/rq numa-migrate flag, since task now
1693 	 * found a better CPU to move/swap.
1694 	 */
1695 	if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
1696 		rq = cpu_rq(env->best_cpu);
1697 		WRITE_ONCE(rq->numa_migrate_on, 0);
1698 	}
1699 
1700 	if (env->best_task)
1701 		put_task_struct(env->best_task);
1702 	if (p)
1703 		get_task_struct(p);
1704 
1705 	env->best_task = p;
1706 	env->best_imp = imp;
1707 	env->best_cpu = env->dst_cpu;
1708 }
1709 
load_too_imbalanced(long src_load,long dst_load,struct task_numa_env * env)1710 static bool load_too_imbalanced(long src_load, long dst_load,
1711 				struct task_numa_env *env)
1712 {
1713 	long imb, old_imb;
1714 	long orig_src_load, orig_dst_load;
1715 	long src_capacity, dst_capacity;
1716 
1717 	/*
1718 	 * The load is corrected for the CPU capacity available on each node.
1719 	 *
1720 	 * src_load        dst_load
1721 	 * ------------ vs ---------
1722 	 * src_capacity    dst_capacity
1723 	 */
1724 	src_capacity = env->src_stats.compute_capacity;
1725 	dst_capacity = env->dst_stats.compute_capacity;
1726 
1727 	imb = abs(dst_load * src_capacity - src_load * dst_capacity);
1728 
1729 	orig_src_load = env->src_stats.load;
1730 	orig_dst_load = env->dst_stats.load;
1731 
1732 	old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
1733 
1734 	/* Would this change make things worse? */
1735 	return (imb > old_imb);
1736 }
1737 
1738 /*
1739  * Maximum NUMA importance can be 1998 (2*999);
1740  * SMALLIMP @ 30 would be close to 1998/64.
1741  * Used to deter task migration.
1742  */
1743 #define SMALLIMP	30
1744 
1745 /*
1746  * This checks if the overall compute and NUMA accesses of the system would
1747  * be improved if the source tasks was migrated to the target dst_cpu taking
1748  * into account that it might be best if task running on the dst_cpu should
1749  * be exchanged with the source task
1750  */
task_numa_compare(struct task_numa_env * env,long taskimp,long groupimp,bool maymove)1751 static bool task_numa_compare(struct task_numa_env *env,
1752 			      long taskimp, long groupimp, bool maymove)
1753 {
1754 	struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
1755 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
1756 	long imp = p_ng ? groupimp : taskimp;
1757 	struct task_struct *cur;
1758 	long src_load, dst_load;
1759 	int dist = env->dist;
1760 	long moveimp = imp;
1761 	long load;
1762 	bool stopsearch = false;
1763 
1764 	if (READ_ONCE(dst_rq->numa_migrate_on))
1765 		return false;
1766 
1767 	rcu_read_lock();
1768 	cur = rcu_dereference(dst_rq->curr);
1769 	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1770 		cur = NULL;
1771 
1772 	/*
1773 	 * Because we have preemption enabled we can get migrated around and
1774 	 * end try selecting ourselves (current == env->p) as a swap candidate.
1775 	 */
1776 	if (cur == env->p) {
1777 		stopsearch = true;
1778 		goto unlock;
1779 	}
1780 
1781 	if (!cur) {
1782 		if (maymove && moveimp >= env->best_imp)
1783 			goto assign;
1784 		else
1785 			goto unlock;
1786 	}
1787 
1788 	/* Skip this swap candidate if cannot move to the source cpu. */
1789 	if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
1790 		goto unlock;
1791 
1792 	/*
1793 	 * Skip this swap candidate if it is not moving to its preferred
1794 	 * node and the best task is.
1795 	 */
1796 	if (env->best_task &&
1797 	    env->best_task->numa_preferred_nid == env->src_nid &&
1798 	    cur->numa_preferred_nid != env->src_nid) {
1799 		goto unlock;
1800 	}
1801 
1802 	/*
1803 	 * "imp" is the fault differential for the source task between the
1804 	 * source and destination node. Calculate the total differential for
1805 	 * the source task and potential destination task. The more negative
1806 	 * the value is, the more remote accesses that would be expected to
1807 	 * be incurred if the tasks were swapped.
1808 	 *
1809 	 * If dst and source tasks are in the same NUMA group, or not
1810 	 * in any group then look only at task weights.
1811 	 */
1812 	cur_ng = rcu_dereference(cur->numa_group);
1813 	if (cur_ng == p_ng) {
1814 		imp = taskimp + task_weight(cur, env->src_nid, dist) -
1815 		      task_weight(cur, env->dst_nid, dist);
1816 		/*
1817 		 * Add some hysteresis to prevent swapping the
1818 		 * tasks within a group over tiny differences.
1819 		 */
1820 		if (cur_ng)
1821 			imp -= imp / 16;
1822 	} else {
1823 		/*
1824 		 * Compare the group weights. If a task is all by itself
1825 		 * (not part of a group), use the task weight instead.
1826 		 */
1827 		if (cur_ng && p_ng)
1828 			imp += group_weight(cur, env->src_nid, dist) -
1829 			       group_weight(cur, env->dst_nid, dist);
1830 		else
1831 			imp += task_weight(cur, env->src_nid, dist) -
1832 			       task_weight(cur, env->dst_nid, dist);
1833 	}
1834 
1835 	/* Discourage picking a task already on its preferred node */
1836 	if (cur->numa_preferred_nid == env->dst_nid)
1837 		imp -= imp / 16;
1838 
1839 	/*
1840 	 * Encourage picking a task that moves to its preferred node.
1841 	 * This potentially makes imp larger than it's maximum of
1842 	 * 1998 (see SMALLIMP and task_weight for why) but in this
1843 	 * case, it does not matter.
1844 	 */
1845 	if (cur->numa_preferred_nid == env->src_nid)
1846 		imp += imp / 8;
1847 
1848 	if (maymove && moveimp > imp && moveimp > env->best_imp) {
1849 		imp = moveimp;
1850 		cur = NULL;
1851 		goto assign;
1852 	}
1853 
1854 	/*
1855 	 * Prefer swapping with a task moving to its preferred node over a
1856 	 * task that is not.
1857 	 */
1858 	if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
1859 	    env->best_task->numa_preferred_nid != env->src_nid) {
1860 		goto assign;
1861 	}
1862 
1863 	/*
1864 	 * If the NUMA importance is less than SMALLIMP,
1865 	 * task migration might only result in ping pong
1866 	 * of tasks and also hurt performance due to cache
1867 	 * misses.
1868 	 */
1869 	if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
1870 		goto unlock;
1871 
1872 	/*
1873 	 * In the overloaded case, try and keep the load balanced.
1874 	 */
1875 	load = task_h_load(env->p) - task_h_load(cur);
1876 	if (!load)
1877 		goto assign;
1878 
1879 	dst_load = env->dst_stats.load + load;
1880 	src_load = env->src_stats.load - load;
1881 
1882 	if (load_too_imbalanced(src_load, dst_load, env))
1883 		goto unlock;
1884 
1885 assign:
1886 	/* Evaluate an idle CPU for a task numa move. */
1887 	if (!cur) {
1888 		int cpu = env->dst_stats.idle_cpu;
1889 
1890 		/* Nothing cached so current CPU went idle since the search. */
1891 		if (cpu < 0)
1892 			cpu = env->dst_cpu;
1893 
1894 		/*
1895 		 * If the CPU is no longer truly idle and the previous best CPU
1896 		 * is, keep using it.
1897 		 */
1898 		if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
1899 		    idle_cpu(env->best_cpu)) {
1900 			cpu = env->best_cpu;
1901 		}
1902 
1903 		env->dst_cpu = cpu;
1904 	}
1905 
1906 	task_numa_assign(env, cur, imp);
1907 
1908 	/*
1909 	 * If a move to idle is allowed because there is capacity or load
1910 	 * balance improves then stop the search. While a better swap
1911 	 * candidate may exist, a search is not free.
1912 	 */
1913 	if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
1914 		stopsearch = true;
1915 
1916 	/*
1917 	 * If a swap candidate must be identified and the current best task
1918 	 * moves its preferred node then stop the search.
1919 	 */
1920 	if (!maymove && env->best_task &&
1921 	    env->best_task->numa_preferred_nid == env->src_nid) {
1922 		stopsearch = true;
1923 	}
1924 unlock:
1925 	rcu_read_unlock();
1926 
1927 	return stopsearch;
1928 }
1929 
task_numa_find_cpu(struct task_numa_env * env,long taskimp,long groupimp)1930 static void task_numa_find_cpu(struct task_numa_env *env,
1931 				long taskimp, long groupimp)
1932 {
1933 	bool maymove = false;
1934 	int cpu;
1935 
1936 	/*
1937 	 * If dst node has spare capacity, then check if there is an
1938 	 * imbalance that would be overruled by the load balancer.
1939 	 */
1940 	if (env->dst_stats.node_type == node_has_spare) {
1941 		unsigned int imbalance;
1942 		int src_running, dst_running;
1943 
1944 		/*
1945 		 * Would movement cause an imbalance? Note that if src has
1946 		 * more running tasks that the imbalance is ignored as the
1947 		 * move improves the imbalance from the perspective of the
1948 		 * CPU load balancer.
1949 		 * */
1950 		src_running = env->src_stats.nr_running - 1;
1951 		dst_running = env->dst_stats.nr_running + 1;
1952 		imbalance = max(0, dst_running - src_running);
1953 		imbalance = adjust_numa_imbalance(imbalance, dst_running);
1954 
1955 		/* Use idle CPU if there is no imbalance */
1956 		if (!imbalance) {
1957 			maymove = true;
1958 			if (env->dst_stats.idle_cpu >= 0) {
1959 				env->dst_cpu = env->dst_stats.idle_cpu;
1960 				task_numa_assign(env, NULL, 0);
1961 				return;
1962 			}
1963 		}
1964 	} else {
1965 		long src_load, dst_load, load;
1966 		/*
1967 		 * If the improvement from just moving env->p direction is better
1968 		 * than swapping tasks around, check if a move is possible.
1969 		 */
1970 		load = task_h_load(env->p);
1971 		dst_load = env->dst_stats.load + load;
1972 		src_load = env->src_stats.load - load;
1973 		maymove = !load_too_imbalanced(src_load, dst_load, env);
1974 	}
1975 
1976 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1977 		/* Skip this CPU if the source task cannot migrate */
1978 		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
1979 			continue;
1980 
1981 		env->dst_cpu = cpu;
1982 		if (task_numa_compare(env, taskimp, groupimp, maymove))
1983 			break;
1984 	}
1985 }
1986 
task_numa_migrate(struct task_struct * p)1987 static int task_numa_migrate(struct task_struct *p)
1988 {
1989 	struct task_numa_env env = {
1990 		.p = p,
1991 
1992 		.src_cpu = task_cpu(p),
1993 		.src_nid = task_node(p),
1994 
1995 		.imbalance_pct = 112,
1996 
1997 		.best_task = NULL,
1998 		.best_imp = 0,
1999 		.best_cpu = -1,
2000 	};
2001 	unsigned long taskweight, groupweight;
2002 	struct sched_domain *sd;
2003 	long taskimp, groupimp;
2004 	struct numa_group *ng;
2005 	struct rq *best_rq;
2006 	int nid, ret, dist;
2007 
2008 	/*
2009 	 * Pick the lowest SD_NUMA domain, as that would have the smallest
2010 	 * imbalance and would be the first to start moving tasks about.
2011 	 *
2012 	 * And we want to avoid any moving of tasks about, as that would create
2013 	 * random movement of tasks -- counter the numa conditions we're trying
2014 	 * to satisfy here.
2015 	 */
2016 	rcu_read_lock();
2017 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
2018 	if (sd)
2019 		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
2020 	rcu_read_unlock();
2021 
2022 	/*
2023 	 * Cpusets can break the scheduler domain tree into smaller
2024 	 * balance domains, some of which do not cross NUMA boundaries.
2025 	 * Tasks that are "trapped" in such domains cannot be migrated
2026 	 * elsewhere, so there is no point in (re)trying.
2027 	 */
2028 	if (unlikely(!sd)) {
2029 		sched_setnuma(p, task_node(p));
2030 		return -EINVAL;
2031 	}
2032 
2033 	env.dst_nid = p->numa_preferred_nid;
2034 	dist = env.dist = node_distance(env.src_nid, env.dst_nid);
2035 	taskweight = task_weight(p, env.src_nid, dist);
2036 	groupweight = group_weight(p, env.src_nid, dist);
2037 	update_numa_stats(&env, &env.src_stats, env.src_nid, false);
2038 	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2039 	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2040 	update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2041 
2042 	/* Try to find a spot on the preferred nid. */
2043 	task_numa_find_cpu(&env, taskimp, groupimp);
2044 
2045 	/*
2046 	 * Look at other nodes in these cases:
2047 	 * - there is no space available on the preferred_nid
2048 	 * - the task is part of a numa_group that is interleaved across
2049 	 *   multiple NUMA nodes; in order to better consolidate the group,
2050 	 *   we need to check other locations.
2051 	 */
2052 	ng = deref_curr_numa_group(p);
2053 	if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2054 		for_each_online_node(nid) {
2055 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
2056 				continue;
2057 
2058 			dist = node_distance(env.src_nid, env.dst_nid);
2059 			if (sched_numa_topology_type == NUMA_BACKPLANE &&
2060 						dist != env.dist) {
2061 				taskweight = task_weight(p, env.src_nid, dist);
2062 				groupweight = group_weight(p, env.src_nid, dist);
2063 			}
2064 
2065 			/* Only consider nodes where both task and groups benefit */
2066 			taskimp = task_weight(p, nid, dist) - taskweight;
2067 			groupimp = group_weight(p, nid, dist) - groupweight;
2068 			if (taskimp < 0 && groupimp < 0)
2069 				continue;
2070 
2071 			env.dist = dist;
2072 			env.dst_nid = nid;
2073 			update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2074 			task_numa_find_cpu(&env, taskimp, groupimp);
2075 		}
2076 	}
2077 
2078 	/*
2079 	 * If the task is part of a workload that spans multiple NUMA nodes,
2080 	 * and is migrating into one of the workload's active nodes, remember
2081 	 * this node as the task's preferred numa node, so the workload can
2082 	 * settle down.
2083 	 * A task that migrated to a second choice node will be better off
2084 	 * trying for a better one later. Do not set the preferred node here.
2085 	 */
2086 	if (ng) {
2087 		if (env.best_cpu == -1)
2088 			nid = env.src_nid;
2089 		else
2090 			nid = cpu_to_node(env.best_cpu);
2091 
2092 		if (nid != p->numa_preferred_nid)
2093 			sched_setnuma(p, nid);
2094 	}
2095 
2096 	/* No better CPU than the current one was found. */
2097 	if (env.best_cpu == -1) {
2098 		trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2099 		return -EAGAIN;
2100 	}
2101 
2102 	best_rq = cpu_rq(env.best_cpu);
2103 	if (env.best_task == NULL) {
2104 		ret = migrate_task_to(p, env.best_cpu);
2105 		WRITE_ONCE(best_rq->numa_migrate_on, 0);
2106 		if (ret != 0)
2107 			trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
2108 		return ret;
2109 	}
2110 
2111 	ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
2112 	WRITE_ONCE(best_rq->numa_migrate_on, 0);
2113 
2114 	if (ret != 0)
2115 		trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
2116 	put_task_struct(env.best_task);
2117 	return ret;
2118 }
2119 
2120 /* Attempt to migrate a task to a CPU on the preferred node. */
numa_migrate_preferred(struct task_struct * p)2121 static void numa_migrate_preferred(struct task_struct *p)
2122 {
2123 	unsigned long interval = HZ;
2124 
2125 	/* This task has no NUMA fault statistics yet */
2126 	if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
2127 		return;
2128 
2129 	/* Periodically retry migrating the task to the preferred node */
2130 	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
2131 	p->numa_migrate_retry = jiffies + interval;
2132 
2133 	/* Success if task is already running on preferred CPU */
2134 	if (task_node(p) == p->numa_preferred_nid)
2135 		return;
2136 
2137 	/* Otherwise, try migrate to a CPU on the preferred node */
2138 	task_numa_migrate(p);
2139 }
2140 
2141 /*
2142  * Find out how many nodes on the workload is actively running on. Do this by
2143  * tracking the nodes from which NUMA hinting faults are triggered. This can
2144  * be different from the set of nodes where the workload's memory is currently
2145  * located.
2146  */
numa_group_count_active_nodes(struct numa_group * numa_group)2147 static void numa_group_count_active_nodes(struct numa_group *numa_group)
2148 {
2149 	unsigned long faults, max_faults = 0;
2150 	int nid, active_nodes = 0;
2151 
2152 	for_each_online_node(nid) {
2153 		faults = group_faults_cpu(numa_group, nid);
2154 		if (faults > max_faults)
2155 			max_faults = faults;
2156 	}
2157 
2158 	for_each_online_node(nid) {
2159 		faults = group_faults_cpu(numa_group, nid);
2160 		if (faults * ACTIVE_NODE_FRACTION > max_faults)
2161 			active_nodes++;
2162 	}
2163 
2164 	numa_group->max_faults_cpu = max_faults;
2165 	numa_group->active_nodes = active_nodes;
2166 }
2167 
2168 /*
2169  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2170  * increments. The more local the fault statistics are, the higher the scan
2171  * period will be for the next scan window. If local/(local+remote) ratio is
2172  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2173  * the scan period will decrease. Aim for 70% local accesses.
2174  */
2175 #define NUMA_PERIOD_SLOTS 10
2176 #define NUMA_PERIOD_THRESHOLD 7
2177 
2178 /*
2179  * Increase the scan period (slow down scanning) if the majority of
2180  * our memory is already on our local node, or if the majority of
2181  * the page accesses are shared with other processes.
2182  * Otherwise, decrease the scan period.
2183  */
update_task_scan_period(struct task_struct * p,unsigned long shared,unsigned long private)2184 static void update_task_scan_period(struct task_struct *p,
2185 			unsigned long shared, unsigned long private)
2186 {
2187 	unsigned int period_slot;
2188 	int lr_ratio, ps_ratio;
2189 	int diff;
2190 
2191 	unsigned long remote = p->numa_faults_locality[0];
2192 	unsigned long local = p->numa_faults_locality[1];
2193 
2194 	/*
2195 	 * If there were no record hinting faults then either the task is
2196 	 * completely idle or all activity is areas that are not of interest
2197 	 * to automatic numa balancing. Related to that, if there were failed
2198 	 * migration then it implies we are migrating too quickly or the local
2199 	 * node is overloaded. In either case, scan slower
2200 	 */
2201 	if (local + shared == 0 || p->numa_faults_locality[2]) {
2202 		p->numa_scan_period = min(p->numa_scan_period_max,
2203 			p->numa_scan_period << 1);
2204 
2205 		p->mm->numa_next_scan = jiffies +
2206 			msecs_to_jiffies(p->numa_scan_period);
2207 
2208 		return;
2209 	}
2210 
2211 	/*
2212 	 * Prepare to scale scan period relative to the current period.
2213 	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
2214 	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2215 	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2216 	 */
2217 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2218 	lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2219 	ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
2220 
2221 	if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
2222 		/*
2223 		 * Most memory accesses are local. There is no need to
2224 		 * do fast NUMA scanning, since memory is already local.
2225 		 */
2226 		int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2227 		if (!slot)
2228 			slot = 1;
2229 		diff = slot * period_slot;
2230 	} else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
2231 		/*
2232 		 * Most memory accesses are shared with other tasks.
2233 		 * There is no point in continuing fast NUMA scanning,
2234 		 * since other tasks may just move the memory elsewhere.
2235 		 */
2236 		int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2237 		if (!slot)
2238 			slot = 1;
2239 		diff = slot * period_slot;
2240 	} else {
2241 		/*
2242 		 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2243 		 * yet they are not on the local NUMA node. Speed up
2244 		 * NUMA scanning to get the memory moved over.
2245 		 */
2246 		int ratio = max(lr_ratio, ps_ratio);
2247 		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2248 	}
2249 
2250 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
2251 			task_scan_min(p), task_scan_max(p));
2252 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2253 }
2254 
2255 /*
2256  * Get the fraction of time the task has been running since the last
2257  * NUMA placement cycle. The scheduler keeps similar statistics, but
2258  * decays those on a 32ms period, which is orders of magnitude off
2259  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2260  * stats only if the task is so new there are no NUMA statistics yet.
2261  */
numa_get_avg_runtime(struct task_struct * p,u64 * period)2262 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2263 {
2264 	u64 runtime, delta, now;
2265 	/* Use the start of this time slice to avoid calculations. */
2266 	now = p->se.exec_start;
2267 	runtime = p->se.sum_exec_runtime;
2268 
2269 	if (p->last_task_numa_placement) {
2270 		delta = runtime - p->last_sum_exec_runtime;
2271 		*period = now - p->last_task_numa_placement;
2272 
2273 		/* Avoid time going backwards, prevent potential divide error: */
2274 		if (unlikely((s64)*period < 0))
2275 			*period = 0;
2276 	} else {
2277 		delta = p->se.avg.load_sum;
2278 		*period = LOAD_AVG_MAX;
2279 	}
2280 
2281 	p->last_sum_exec_runtime = runtime;
2282 	p->last_task_numa_placement = now;
2283 
2284 	return delta;
2285 }
2286 
2287 /*
2288  * Determine the preferred nid for a task in a numa_group. This needs to
2289  * be done in a way that produces consistent results with group_weight,
2290  * otherwise workloads might not converge.
2291  */
preferred_group_nid(struct task_struct * p,int nid)2292 static int preferred_group_nid(struct task_struct *p, int nid)
2293 {
2294 	nodemask_t nodes;
2295 	int dist;
2296 
2297 	/* Direct connections between all NUMA nodes. */
2298 	if (sched_numa_topology_type == NUMA_DIRECT)
2299 		return nid;
2300 
2301 	/*
2302 	 * On a system with glueless mesh NUMA topology, group_weight
2303 	 * scores nodes according to the number of NUMA hinting faults on
2304 	 * both the node itself, and on nearby nodes.
2305 	 */
2306 	if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2307 		unsigned long score, max_score = 0;
2308 		int node, max_node = nid;
2309 
2310 		dist = sched_max_numa_distance;
2311 
2312 		for_each_online_node(node) {
2313 			score = group_weight(p, node, dist);
2314 			if (score > max_score) {
2315 				max_score = score;
2316 				max_node = node;
2317 			}
2318 		}
2319 		return max_node;
2320 	}
2321 
2322 	/*
2323 	 * Finding the preferred nid in a system with NUMA backplane
2324 	 * interconnect topology is more involved. The goal is to locate
2325 	 * tasks from numa_groups near each other in the system, and
2326 	 * untangle workloads from different sides of the system. This requires
2327 	 * searching down the hierarchy of node groups, recursively searching
2328 	 * inside the highest scoring group of nodes. The nodemask tricks
2329 	 * keep the complexity of the search down.
2330 	 */
2331 	nodes = node_online_map;
2332 	for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2333 		unsigned long max_faults = 0;
2334 		nodemask_t max_group = NODE_MASK_NONE;
2335 		int a, b;
2336 
2337 		/* Are there nodes at this distance from each other? */
2338 		if (!find_numa_distance(dist))
2339 			continue;
2340 
2341 		for_each_node_mask(a, nodes) {
2342 			unsigned long faults = 0;
2343 			nodemask_t this_group;
2344 			nodes_clear(this_group);
2345 
2346 			/* Sum group's NUMA faults; includes a==b case. */
2347 			for_each_node_mask(b, nodes) {
2348 				if (node_distance(a, b) < dist) {
2349 					faults += group_faults(p, b);
2350 					node_set(b, this_group);
2351 					node_clear(b, nodes);
2352 				}
2353 			}
2354 
2355 			/* Remember the top group. */
2356 			if (faults > max_faults) {
2357 				max_faults = faults;
2358 				max_group = this_group;
2359 				/*
2360 				 * subtle: at the smallest distance there is
2361 				 * just one node left in each "group", the
2362 				 * winner is the preferred nid.
2363 				 */
2364 				nid = a;
2365 			}
2366 		}
2367 		/* Next round, evaluate the nodes within max_group. */
2368 		if (!max_faults)
2369 			break;
2370 		nodes = max_group;
2371 	}
2372 	return nid;
2373 }
2374 
task_numa_placement(struct task_struct * p)2375 static void task_numa_placement(struct task_struct *p)
2376 {
2377 	int seq, nid, max_nid = NUMA_NO_NODE;
2378 	unsigned long max_faults = 0;
2379 	unsigned long fault_types[2] = { 0, 0 };
2380 	unsigned long total_faults;
2381 	u64 runtime, period;
2382 	spinlock_t *group_lock = NULL;
2383 	struct numa_group *ng;
2384 
2385 	/*
2386 	 * The p->mm->numa_scan_seq field gets updated without
2387 	 * exclusive access. Use READ_ONCE() here to ensure
2388 	 * that the field is read in a single access:
2389 	 */
2390 	seq = READ_ONCE(p->mm->numa_scan_seq);
2391 	if (p->numa_scan_seq == seq)
2392 		return;
2393 	p->numa_scan_seq = seq;
2394 	p->numa_scan_period_max = task_scan_max(p);
2395 
2396 	total_faults = p->numa_faults_locality[0] +
2397 		       p->numa_faults_locality[1];
2398 	runtime = numa_get_avg_runtime(p, &period);
2399 
2400 	/* If the task is part of a group prevent parallel updates to group stats */
2401 	ng = deref_curr_numa_group(p);
2402 	if (ng) {
2403 		group_lock = &ng->lock;
2404 		spin_lock_irq(group_lock);
2405 	}
2406 
2407 	/* Find the node with the highest number of faults */
2408 	for_each_online_node(nid) {
2409 		/* Keep track of the offsets in numa_faults array */
2410 		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2411 		unsigned long faults = 0, group_faults = 0;
2412 		int priv;
2413 
2414 		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2415 			long diff, f_diff, f_weight;
2416 
2417 			mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2418 			membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2419 			cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2420 			cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2421 
2422 			/* Decay existing window, copy faults since last scan */
2423 			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2424 			fault_types[priv] += p->numa_faults[membuf_idx];
2425 			p->numa_faults[membuf_idx] = 0;
2426 
2427 			/*
2428 			 * Normalize the faults_from, so all tasks in a group
2429 			 * count according to CPU use, instead of by the raw
2430 			 * number of faults. Tasks with little runtime have
2431 			 * little over-all impact on throughput, and thus their
2432 			 * faults are less important.
2433 			 */
2434 			f_weight = div64_u64(runtime << 16, period + 1);
2435 			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2436 				   (total_faults + 1);
2437 			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2438 			p->numa_faults[cpubuf_idx] = 0;
2439 
2440 			p->numa_faults[mem_idx] += diff;
2441 			p->numa_faults[cpu_idx] += f_diff;
2442 			faults += p->numa_faults[mem_idx];
2443 			p->total_numa_faults += diff;
2444 			if (ng) {
2445 				/*
2446 				 * safe because we can only change our own group
2447 				 *
2448 				 * mem_idx represents the offset for a given
2449 				 * nid and priv in a specific region because it
2450 				 * is at the beginning of the numa_faults array.
2451 				 */
2452 				ng->faults[mem_idx] += diff;
2453 				ng->faults_cpu[mem_idx] += f_diff;
2454 				ng->total_faults += diff;
2455 				group_faults += ng->faults[mem_idx];
2456 			}
2457 		}
2458 
2459 		if (!ng) {
2460 			if (faults > max_faults) {
2461 				max_faults = faults;
2462 				max_nid = nid;
2463 			}
2464 		} else if (group_faults > max_faults) {
2465 			max_faults = group_faults;
2466 			max_nid = nid;
2467 		}
2468 	}
2469 
2470 	if (ng) {
2471 		numa_group_count_active_nodes(ng);
2472 		spin_unlock_irq(group_lock);
2473 		max_nid = preferred_group_nid(p, max_nid);
2474 	}
2475 
2476 	if (max_faults) {
2477 		/* Set the new preferred node */
2478 		if (max_nid != p->numa_preferred_nid)
2479 			sched_setnuma(p, max_nid);
2480 	}
2481 
2482 	update_task_scan_period(p, fault_types[0], fault_types[1]);
2483 }
2484 
get_numa_group(struct numa_group * grp)2485 static inline int get_numa_group(struct numa_group *grp)
2486 {
2487 	return refcount_inc_not_zero(&grp->refcount);
2488 }
2489 
put_numa_group(struct numa_group * grp)2490 static inline void put_numa_group(struct numa_group *grp)
2491 {
2492 	if (refcount_dec_and_test(&grp->refcount))
2493 		kfree_rcu(grp, rcu);
2494 }
2495 
task_numa_group(struct task_struct * p,int cpupid,int flags,int * priv)2496 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2497 			int *priv)
2498 {
2499 	struct numa_group *grp, *my_grp;
2500 	struct task_struct *tsk;
2501 	bool join = false;
2502 	int cpu = cpupid_to_cpu(cpupid);
2503 	int i;
2504 
2505 	if (unlikely(!deref_curr_numa_group(p))) {
2506 		unsigned int size = sizeof(struct numa_group) +
2507 				    4*nr_node_ids*sizeof(unsigned long);
2508 
2509 		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2510 		if (!grp)
2511 			return;
2512 
2513 		refcount_set(&grp->refcount, 1);
2514 		grp->active_nodes = 1;
2515 		grp->max_faults_cpu = 0;
2516 		spin_lock_init(&grp->lock);
2517 		grp->gid = p->pid;
2518 		/* Second half of the array tracks nids where faults happen */
2519 		grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2520 						nr_node_ids;
2521 
2522 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2523 			grp->faults[i] = p->numa_faults[i];
2524 
2525 		grp->total_faults = p->total_numa_faults;
2526 
2527 		grp->nr_tasks++;
2528 		rcu_assign_pointer(p->numa_group, grp);
2529 	}
2530 
2531 	rcu_read_lock();
2532 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
2533 
2534 	if (!cpupid_match_pid(tsk, cpupid))
2535 		goto no_join;
2536 
2537 	grp = rcu_dereference(tsk->numa_group);
2538 	if (!grp)
2539 		goto no_join;
2540 
2541 	my_grp = deref_curr_numa_group(p);
2542 	if (grp == my_grp)
2543 		goto no_join;
2544 
2545 	/*
2546 	 * Only join the other group if its bigger; if we're the bigger group,
2547 	 * the other task will join us.
2548 	 */
2549 	if (my_grp->nr_tasks > grp->nr_tasks)
2550 		goto no_join;
2551 
2552 	/*
2553 	 * Tie-break on the grp address.
2554 	 */
2555 	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2556 		goto no_join;
2557 
2558 	/* Always join threads in the same process. */
2559 	if (tsk->mm == current->mm)
2560 		join = true;
2561 
2562 	/* Simple filter to avoid false positives due to PID collisions */
2563 	if (flags & TNF_SHARED)
2564 		join = true;
2565 
2566 	/* Update priv based on whether false sharing was detected */
2567 	*priv = !join;
2568 
2569 	if (join && !get_numa_group(grp))
2570 		goto no_join;
2571 
2572 	rcu_read_unlock();
2573 
2574 	if (!join)
2575 		return;
2576 
2577 	BUG_ON(irqs_disabled());
2578 	double_lock_irq(&my_grp->lock, &grp->lock);
2579 
2580 	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2581 		my_grp->faults[i] -= p->numa_faults[i];
2582 		grp->faults[i] += p->numa_faults[i];
2583 	}
2584 	my_grp->total_faults -= p->total_numa_faults;
2585 	grp->total_faults += p->total_numa_faults;
2586 
2587 	my_grp->nr_tasks--;
2588 	grp->nr_tasks++;
2589 
2590 	spin_unlock(&my_grp->lock);
2591 	spin_unlock_irq(&grp->lock);
2592 
2593 	rcu_assign_pointer(p->numa_group, grp);
2594 
2595 	put_numa_group(my_grp);
2596 	return;
2597 
2598 no_join:
2599 	rcu_read_unlock();
2600 	return;
2601 }
2602 
2603 /*
2604  * Get rid of NUMA staticstics associated with a task (either current or dead).
2605  * If @final is set, the task is dead and has reached refcount zero, so we can
2606  * safely free all relevant data structures. Otherwise, there might be
2607  * concurrent reads from places like load balancing and procfs, and we should
2608  * reset the data back to default state without freeing ->numa_faults.
2609  */
task_numa_free(struct task_struct * p,bool final)2610 void task_numa_free(struct task_struct *p, bool final)
2611 {
2612 	/* safe: p either is current or is being freed by current */
2613 	struct numa_group *grp = rcu_dereference_raw(p->numa_group);
2614 	unsigned long *numa_faults = p->numa_faults;
2615 	unsigned long flags;
2616 	int i;
2617 
2618 	if (!numa_faults)
2619 		return;
2620 
2621 	if (grp) {
2622 		spin_lock_irqsave(&grp->lock, flags);
2623 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2624 			grp->faults[i] -= p->numa_faults[i];
2625 		grp->total_faults -= p->total_numa_faults;
2626 
2627 		grp->nr_tasks--;
2628 		spin_unlock_irqrestore(&grp->lock, flags);
2629 		RCU_INIT_POINTER(p->numa_group, NULL);
2630 		put_numa_group(grp);
2631 	}
2632 
2633 	if (final) {
2634 		p->numa_faults = NULL;
2635 		kfree(numa_faults);
2636 	} else {
2637 		p->total_numa_faults = 0;
2638 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2639 			numa_faults[i] = 0;
2640 	}
2641 }
2642 
2643 /*
2644  * Got a PROT_NONE fault for a page on @node.
2645  */
task_numa_fault(int last_cpupid,int mem_node,int pages,int flags)2646 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2647 {
2648 	struct task_struct *p = current;
2649 	bool migrated = flags & TNF_MIGRATED;
2650 	int cpu_node = task_node(current);
2651 	int local = !!(flags & TNF_FAULT_LOCAL);
2652 	struct numa_group *ng;
2653 	int priv;
2654 
2655 	if (!static_branch_likely(&sched_numa_balancing))
2656 		return;
2657 
2658 	/* for example, ksmd faulting in a user's mm */
2659 	if (!p->mm)
2660 		return;
2661 
2662 	/* Allocate buffer to track faults on a per-node basis */
2663 	if (unlikely(!p->numa_faults)) {
2664 		int size = sizeof(*p->numa_faults) *
2665 			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2666 
2667 		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2668 		if (!p->numa_faults)
2669 			return;
2670 
2671 		p->total_numa_faults = 0;
2672 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2673 	}
2674 
2675 	/*
2676 	 * First accesses are treated as private, otherwise consider accesses
2677 	 * to be private if the accessing pid has not changed
2678 	 */
2679 	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2680 		priv = 1;
2681 	} else {
2682 		priv = cpupid_match_pid(p, last_cpupid);
2683 		if (!priv && !(flags & TNF_NO_GROUP))
2684 			task_numa_group(p, last_cpupid, flags, &priv);
2685 	}
2686 
2687 	/*
2688 	 * If a workload spans multiple NUMA nodes, a shared fault that
2689 	 * occurs wholly within the set of nodes that the workload is
2690 	 * actively using should be counted as local. This allows the
2691 	 * scan rate to slow down when a workload has settled down.
2692 	 */
2693 	ng = deref_curr_numa_group(p);
2694 	if (!priv && !local && ng && ng->active_nodes > 1 &&
2695 				numa_is_active_node(cpu_node, ng) &&
2696 				numa_is_active_node(mem_node, ng))
2697 		local = 1;
2698 
2699 	/*
2700 	 * Retry to migrate task to preferred node periodically, in case it
2701 	 * previously failed, or the scheduler moved us.
2702 	 */
2703 	if (time_after(jiffies, p->numa_migrate_retry)) {
2704 		task_numa_placement(p);
2705 		numa_migrate_preferred(p);
2706 	}
2707 
2708 	if (migrated)
2709 		p->numa_pages_migrated += pages;
2710 	if (flags & TNF_MIGRATE_FAIL)
2711 		p->numa_faults_locality[2] += pages;
2712 
2713 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2714 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2715 	p->numa_faults_locality[local] += pages;
2716 }
2717 
reset_ptenuma_scan(struct task_struct * p)2718 static void reset_ptenuma_scan(struct task_struct *p)
2719 {
2720 	/*
2721 	 * We only did a read acquisition of the mmap sem, so
2722 	 * p->mm->numa_scan_seq is written to without exclusive access
2723 	 * and the update is not guaranteed to be atomic. That's not
2724 	 * much of an issue though, since this is just used for
2725 	 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2726 	 * expensive, to avoid any form of compiler optimizations:
2727 	 */
2728 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2729 	p->mm->numa_scan_offset = 0;
2730 }
2731 
2732 /*
2733  * The expensive part of numa migration is done from task_work context.
2734  * Triggered from task_tick_numa().
2735  */
task_numa_work(struct callback_head * work)2736 static void task_numa_work(struct callback_head *work)
2737 {
2738 	unsigned long migrate, next_scan, now = jiffies;
2739 	struct task_struct *p = current;
2740 	struct mm_struct *mm = p->mm;
2741 	u64 runtime = p->se.sum_exec_runtime;
2742 	struct vm_area_struct *vma;
2743 	unsigned long start, end;
2744 	unsigned long nr_pte_updates = 0;
2745 	long pages, virtpages;
2746 
2747 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2748 
2749 	work->next = work;
2750 	/*
2751 	 * Who cares about NUMA placement when they're dying.
2752 	 *
2753 	 * NOTE: make sure not to dereference p->mm before this check,
2754 	 * exit_task_work() happens _after_ exit_mm() so we could be called
2755 	 * without p->mm even though we still had it when we enqueued this
2756 	 * work.
2757 	 */
2758 	if (p->flags & PF_EXITING)
2759 		return;
2760 
2761 	if (!mm->numa_next_scan) {
2762 		mm->numa_next_scan = now +
2763 			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2764 	}
2765 
2766 	/*
2767 	 * Enforce maximal scan/migration frequency..
2768 	 */
2769 	migrate = mm->numa_next_scan;
2770 	if (time_before(now, migrate))
2771 		return;
2772 
2773 	if (p->numa_scan_period == 0) {
2774 		p->numa_scan_period_max = task_scan_max(p);
2775 		p->numa_scan_period = task_scan_start(p);
2776 	}
2777 
2778 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2779 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2780 		return;
2781 
2782 	/*
2783 	 * Delay this task enough that another task of this mm will likely win
2784 	 * the next time around.
2785 	 */
2786 	p->node_stamp += 2 * TICK_NSEC;
2787 
2788 	start = mm->numa_scan_offset;
2789 	pages = sysctl_numa_balancing_scan_size;
2790 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2791 	virtpages = pages * 8;	   /* Scan up to this much virtual space */
2792 	if (!pages)
2793 		return;
2794 
2795 
2796 	if (!mmap_read_trylock(mm))
2797 		return;
2798 	vma = find_vma(mm, start);
2799 	if (!vma) {
2800 		reset_ptenuma_scan(p);
2801 		start = 0;
2802 		vma = mm->mmap;
2803 	}
2804 	for (; vma; vma = vma->vm_next) {
2805 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2806 			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2807 			continue;
2808 		}
2809 
2810 		/*
2811 		 * Shared library pages mapped by multiple processes are not
2812 		 * migrated as it is expected they are cache replicated. Avoid
2813 		 * hinting faults in read-only file-backed mappings or the vdso
2814 		 * as migrating the pages will be of marginal benefit.
2815 		 */
2816 		if (!vma->vm_mm ||
2817 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2818 			continue;
2819 
2820 		/*
2821 		 * Skip inaccessible VMAs to avoid any confusion between
2822 		 * PROT_NONE and NUMA hinting ptes
2823 		 */
2824 		if (!vma_is_accessible(vma))
2825 			continue;
2826 
2827 		do {
2828 			start = max(start, vma->vm_start);
2829 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2830 			end = min(end, vma->vm_end);
2831 			nr_pte_updates = change_prot_numa(vma, start, end);
2832 
2833 			/*
2834 			 * Try to scan sysctl_numa_balancing_size worth of
2835 			 * hpages that have at least one present PTE that
2836 			 * is not already pte-numa. If the VMA contains
2837 			 * areas that are unused or already full of prot_numa
2838 			 * PTEs, scan up to virtpages, to skip through those
2839 			 * areas faster.
2840 			 */
2841 			if (nr_pte_updates)
2842 				pages -= (end - start) >> PAGE_SHIFT;
2843 			virtpages -= (end - start) >> PAGE_SHIFT;
2844 
2845 			start = end;
2846 			if (pages <= 0 || virtpages <= 0)
2847 				goto out;
2848 
2849 			cond_resched();
2850 		} while (end != vma->vm_end);
2851 	}
2852 
2853 out:
2854 	/*
2855 	 * It is possible to reach the end of the VMA list but the last few
2856 	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2857 	 * would find the !migratable VMA on the next scan but not reset the
2858 	 * scanner to the start so check it now.
2859 	 */
2860 	if (vma)
2861 		mm->numa_scan_offset = start;
2862 	else
2863 		reset_ptenuma_scan(p);
2864 	mmap_read_unlock(mm);
2865 
2866 	/*
2867 	 * Make sure tasks use at least 32x as much time to run other code
2868 	 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2869 	 * Usually update_task_scan_period slows down scanning enough; on an
2870 	 * overloaded system we need to limit overhead on a per task basis.
2871 	 */
2872 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
2873 		u64 diff = p->se.sum_exec_runtime - runtime;
2874 		p->node_stamp += 32 * diff;
2875 	}
2876 }
2877 
init_numa_balancing(unsigned long clone_flags,struct task_struct * p)2878 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
2879 {
2880 	int mm_users = 0;
2881 	struct mm_struct *mm = p->mm;
2882 
2883 	if (mm) {
2884 		mm_users = atomic_read(&mm->mm_users);
2885 		if (mm_users == 1) {
2886 			mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2887 			mm->numa_scan_seq = 0;
2888 		}
2889 	}
2890 	p->node_stamp			= 0;
2891 	p->numa_scan_seq		= mm ? mm->numa_scan_seq : 0;
2892 	p->numa_scan_period		= sysctl_numa_balancing_scan_delay;
2893 	/* Protect against double add, see task_tick_numa and task_numa_work */
2894 	p->numa_work.next		= &p->numa_work;
2895 	p->numa_faults			= NULL;
2896 	RCU_INIT_POINTER(p->numa_group, NULL);
2897 	p->last_task_numa_placement	= 0;
2898 	p->last_sum_exec_runtime	= 0;
2899 
2900 	init_task_work(&p->numa_work, task_numa_work);
2901 
2902 	/* New address space, reset the preferred nid */
2903 	if (!(clone_flags & CLONE_VM)) {
2904 		p->numa_preferred_nid = NUMA_NO_NODE;
2905 		return;
2906 	}
2907 
2908 	/*
2909 	 * New thread, keep existing numa_preferred_nid which should be copied
2910 	 * already by arch_dup_task_struct but stagger when scans start.
2911 	 */
2912 	if (mm) {
2913 		unsigned int delay;
2914 
2915 		delay = min_t(unsigned int, task_scan_max(current),
2916 			current->numa_scan_period * mm_users * NSEC_PER_MSEC);
2917 		delay += 2 * TICK_NSEC;
2918 		p->node_stamp = delay;
2919 	}
2920 }
2921 
2922 /*
2923  * Drive the periodic memory faults..
2924  */
task_tick_numa(struct rq * rq,struct task_struct * curr)2925 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2926 {
2927 	struct callback_head *work = &curr->numa_work;
2928 	u64 period, now;
2929 
2930 	/*
2931 	 * We don't care about NUMA placement if we don't have memory.
2932 	 */
2933 	if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
2934 		return;
2935 
2936 	/*
2937 	 * Using runtime rather than walltime has the dual advantage that
2938 	 * we (mostly) drive the selection from busy threads and that the
2939 	 * task needs to have done some actual work before we bother with
2940 	 * NUMA placement.
2941 	 */
2942 	now = curr->se.sum_exec_runtime;
2943 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2944 
2945 	if (now > curr->node_stamp + period) {
2946 		if (!curr->node_stamp)
2947 			curr->numa_scan_period = task_scan_start(curr);
2948 		curr->node_stamp += period;
2949 
2950 		if (!time_before(jiffies, curr->mm->numa_next_scan))
2951 			task_work_add(curr, work, TWA_RESUME);
2952 	}
2953 }
2954 
update_scan_period(struct task_struct * p,int new_cpu)2955 static void update_scan_period(struct task_struct *p, int new_cpu)
2956 {
2957 	int src_nid = cpu_to_node(task_cpu(p));
2958 	int dst_nid = cpu_to_node(new_cpu);
2959 
2960 	if (!static_branch_likely(&sched_numa_balancing))
2961 		return;
2962 
2963 	if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
2964 		return;
2965 
2966 	if (src_nid == dst_nid)
2967 		return;
2968 
2969 	/*
2970 	 * Allow resets if faults have been trapped before one scan
2971 	 * has completed. This is most likely due to a new task that
2972 	 * is pulled cross-node due to wakeups or load balancing.
2973 	 */
2974 	if (p->numa_scan_seq) {
2975 		/*
2976 		 * Avoid scan adjustments if moving to the preferred
2977 		 * node or if the task was not previously running on
2978 		 * the preferred node.
2979 		 */
2980 		if (dst_nid == p->numa_preferred_nid ||
2981 		    (p->numa_preferred_nid != NUMA_NO_NODE &&
2982 			src_nid != p->numa_preferred_nid))
2983 			return;
2984 	}
2985 
2986 	p->numa_scan_period = task_scan_start(p);
2987 }
2988 
2989 #else
task_tick_numa(struct rq * rq,struct task_struct * curr)2990 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2991 {
2992 }
2993 
account_numa_enqueue(struct rq * rq,struct task_struct * p)2994 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2995 {
2996 }
2997 
account_numa_dequeue(struct rq * rq,struct task_struct * p)2998 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2999 {
3000 }
3001 
update_scan_period(struct task_struct * p,int new_cpu)3002 static inline void update_scan_period(struct task_struct *p, int new_cpu)
3003 {
3004 }
3005 
3006 #endif /* CONFIG_NUMA_BALANCING */
3007 
3008 static void
account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)3009 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3010 {
3011 	update_load_add(&cfs_rq->load, se->load.weight);
3012 #ifdef CONFIG_SMP
3013 	if (entity_is_task(se)) {
3014 		struct rq *rq = rq_of(cfs_rq);
3015 
3016 		account_numa_enqueue(rq, task_of(se));
3017 		list_add(&se->group_node, &rq->cfs_tasks);
3018 	}
3019 #endif
3020 	cfs_rq->nr_running++;
3021 }
3022 
3023 static void
account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)3024 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3025 {
3026 	update_load_sub(&cfs_rq->load, se->load.weight);
3027 #ifdef CONFIG_SMP
3028 	if (entity_is_task(se)) {
3029 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3030 		list_del_init(&se->group_node);
3031 	}
3032 #endif
3033 	cfs_rq->nr_running--;
3034 }
3035 
3036 /*
3037  * Signed add and clamp on underflow.
3038  *
3039  * Explicitly do a load-store to ensure the intermediate value never hits
3040  * memory. This allows lockless observations without ever seeing the negative
3041  * values.
3042  */
3043 #define add_positive(_ptr, _val) do {                           \
3044 	typeof(_ptr) ptr = (_ptr);                              \
3045 	typeof(_val) val = (_val);                              \
3046 	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
3047 								\
3048 	res = var + val;                                        \
3049 								\
3050 	if (val < 0 && res > var)                               \
3051 		res = 0;                                        \
3052 								\
3053 	WRITE_ONCE(*ptr, res);                                  \
3054 } while (0)
3055 
3056 /*
3057  * Unsigned subtract and clamp on underflow.
3058  *
3059  * Explicitly do a load-store to ensure the intermediate value never hits
3060  * memory. This allows lockless observations without ever seeing the negative
3061  * values.
3062  */
3063 #define sub_positive(_ptr, _val) do {				\
3064 	typeof(_ptr) ptr = (_ptr);				\
3065 	typeof(*ptr) val = (_val);				\
3066 	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
3067 	res = var - val;					\
3068 	if (res > var)						\
3069 		res = 0;					\
3070 	WRITE_ONCE(*ptr, res);					\
3071 } while (0)
3072 
3073 /*
3074  * Remove and clamp on negative, from a local variable.
3075  *
3076  * A variant of sub_positive(), which does not use explicit load-store
3077  * and is thus optimized for local variable updates.
3078  */
3079 #define lsub_positive(_ptr, _val) do {				\
3080 	typeof(_ptr) ptr = (_ptr);				\
3081 	*ptr -= min_t(typeof(*ptr), *ptr, _val);		\
3082 } while (0)
3083 
3084 #ifdef CONFIG_SMP
3085 static inline void
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3086 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3087 {
3088 	cfs_rq->avg.load_avg += se->avg.load_avg;
3089 	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3090 }
3091 
3092 static inline void
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3093 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3094 {
3095 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3096 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3097 }
3098 #else
3099 static inline void
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3100 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3101 static inline void
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3102 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3103 #endif
3104 
reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight)3105 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3106 			    unsigned long weight)
3107 {
3108 	if (se->on_rq) {
3109 		/* commit outstanding execution time */
3110 		if (cfs_rq->curr == se)
3111 			update_curr(cfs_rq);
3112 		update_load_sub(&cfs_rq->load, se->load.weight);
3113 	}
3114 	dequeue_load_avg(cfs_rq, se);
3115 
3116 	update_load_set(&se->load, weight);
3117 
3118 #ifdef CONFIG_SMP
3119 	do {
3120 		u32 divider = get_pelt_divider(&se->avg);
3121 
3122 		se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3123 	} while (0);
3124 #endif
3125 
3126 	enqueue_load_avg(cfs_rq, se);
3127 	if (se->on_rq)
3128 		update_load_add(&cfs_rq->load, se->load.weight);
3129 
3130 }
3131 
reweight_task(struct task_struct * p,int prio)3132 void reweight_task(struct task_struct *p, int prio)
3133 {
3134 	struct sched_entity *se = &p->se;
3135 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3136 	struct load_weight *load = &se->load;
3137 	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3138 
3139 	reweight_entity(cfs_rq, se, weight);
3140 	load->inv_weight = sched_prio_to_wmult[prio];
3141 }
3142 
3143 #ifdef CONFIG_FAIR_GROUP_SCHED
3144 #ifdef CONFIG_SMP
3145 /*
3146  * All this does is approximate the hierarchical proportion which includes that
3147  * global sum we all love to hate.
3148  *
3149  * That is, the weight of a group entity, is the proportional share of the
3150  * group weight based on the group runqueue weights. That is:
3151  *
3152  *                     tg->weight * grq->load.weight
3153  *   ge->load.weight = -----------------------------               (1)
3154  *                       \Sum grq->load.weight
3155  *
3156  * Now, because computing that sum is prohibitively expensive to compute (been
3157  * there, done that) we approximate it with this average stuff. The average
3158  * moves slower and therefore the approximation is cheaper and more stable.
3159  *
3160  * So instead of the above, we substitute:
3161  *
3162  *   grq->load.weight -> grq->avg.load_avg                         (2)
3163  *
3164  * which yields the following:
3165  *
3166  *                     tg->weight * grq->avg.load_avg
3167  *   ge->load.weight = ------------------------------              (3)
3168  *                             tg->load_avg
3169  *
3170  * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3171  *
3172  * That is shares_avg, and it is right (given the approximation (2)).
3173  *
3174  * The problem with it is that because the average is slow -- it was designed
3175  * to be exactly that of course -- this leads to transients in boundary
3176  * conditions. In specific, the case where the group was idle and we start the
3177  * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3178  * yielding bad latency etc..
3179  *
3180  * Now, in that special case (1) reduces to:
3181  *
3182  *                     tg->weight * grq->load.weight
3183  *   ge->load.weight = ----------------------------- = tg->weight   (4)
3184  *                         grp->load.weight
3185  *
3186  * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3187  *
3188  * So what we do is modify our approximation (3) to approach (4) in the (near)
3189  * UP case, like:
3190  *
3191  *   ge->load.weight =
3192  *
3193  *              tg->weight * grq->load.weight
3194  *     ---------------------------------------------------         (5)
3195  *     tg->load_avg - grq->avg.load_avg + grq->load.weight
3196  *
3197  * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3198  * we need to use grq->avg.load_avg as its lower bound, which then gives:
3199  *
3200  *
3201  *                     tg->weight * grq->load.weight
3202  *   ge->load.weight = -----------------------------		   (6)
3203  *                             tg_load_avg'
3204  *
3205  * Where:
3206  *
3207  *   tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3208  *                  max(grq->load.weight, grq->avg.load_avg)
3209  *
3210  * And that is shares_weight and is icky. In the (near) UP case it approaches
3211  * (4) while in the normal case it approaches (3). It consistently
3212  * overestimates the ge->load.weight and therefore:
3213  *
3214  *   \Sum ge->load.weight >= tg->weight
3215  *
3216  * hence icky!
3217  */
calc_group_shares(struct cfs_rq * cfs_rq)3218 static long calc_group_shares(struct cfs_rq *cfs_rq)
3219 {
3220 	long tg_weight, tg_shares, load, shares;
3221 	struct task_group *tg = cfs_rq->tg;
3222 
3223 	tg_shares = READ_ONCE(tg->shares);
3224 
3225 	load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3226 
3227 	tg_weight = atomic_long_read(&tg->load_avg);
3228 
3229 	/* Ensure tg_weight >= load */
3230 	tg_weight -= cfs_rq->tg_load_avg_contrib;
3231 	tg_weight += load;
3232 
3233 	shares = (tg_shares * load);
3234 	if (tg_weight)
3235 		shares /= tg_weight;
3236 
3237 	/*
3238 	 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3239 	 * of a group with small tg->shares value. It is a floor value which is
3240 	 * assigned as a minimum load.weight to the sched_entity representing
3241 	 * the group on a CPU.
3242 	 *
3243 	 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3244 	 * on an 8-core system with 8 tasks each runnable on one CPU shares has
3245 	 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3246 	 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3247 	 * instead of 0.
3248 	 */
3249 	return clamp_t(long, shares, MIN_SHARES, tg_shares);
3250 }
3251 #endif /* CONFIG_SMP */
3252 
3253 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3254 
3255 /*
3256  * Recomputes the group entity based on the current state of its group
3257  * runqueue.
3258  */
update_cfs_group(struct sched_entity * se)3259 static void update_cfs_group(struct sched_entity *se)
3260 {
3261 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3262 	long shares;
3263 
3264 	if (!gcfs_rq)
3265 		return;
3266 
3267 	if (throttled_hierarchy(gcfs_rq))
3268 		return;
3269 
3270 #ifndef CONFIG_SMP
3271 	shares = READ_ONCE(gcfs_rq->tg->shares);
3272 
3273 	if (likely(se->load.weight == shares))
3274 		return;
3275 #else
3276 	shares   = calc_group_shares(gcfs_rq);
3277 #endif
3278 
3279 	reweight_entity(cfs_rq_of(se), se, shares);
3280 }
3281 
3282 #else /* CONFIG_FAIR_GROUP_SCHED */
update_cfs_group(struct sched_entity * se)3283 static inline void update_cfs_group(struct sched_entity *se)
3284 {
3285 }
3286 #endif /* CONFIG_FAIR_GROUP_SCHED */
3287 
cfs_rq_util_change(struct cfs_rq * cfs_rq,int flags)3288 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3289 {
3290 	struct rq *rq = rq_of(cfs_rq);
3291 
3292 	if (&rq->cfs == cfs_rq) {
3293 		/*
3294 		 * There are a few boundary cases this might miss but it should
3295 		 * get called often enough that that should (hopefully) not be
3296 		 * a real problem.
3297 		 *
3298 		 * It will not get called when we go idle, because the idle
3299 		 * thread is a different class (!fair), nor will the utilization
3300 		 * number include things like RT tasks.
3301 		 *
3302 		 * As is, the util number is not freq-invariant (we'd have to
3303 		 * implement arch_scale_freq_capacity() for that).
3304 		 *
3305 		 * See cpu_util().
3306 		 */
3307 		cpufreq_update_util(rq, flags);
3308 	}
3309 }
3310 
3311 #ifdef CONFIG_SMP
3312 #ifdef CONFIG_FAIR_GROUP_SCHED
3313 /**
3314  * update_tg_load_avg - update the tg's load avg
3315  * @cfs_rq: the cfs_rq whose avg changed
3316  *
3317  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3318  * However, because tg->load_avg is a global value there are performance
3319  * considerations.
3320  *
3321  * In order to avoid having to look at the other cfs_rq's, we use a
3322  * differential update where we store the last value we propagated. This in
3323  * turn allows skipping updates if the differential is 'small'.
3324  *
3325  * Updating tg's load_avg is necessary before update_cfs_share().
3326  */
update_tg_load_avg(struct cfs_rq * cfs_rq)3327 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
3328 {
3329 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3330 
3331 	/*
3332 	 * No need to update load_avg for root_task_group as it is not used.
3333 	 */
3334 	if (cfs_rq->tg == &root_task_group)
3335 		return;
3336 
3337 	if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3338 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
3339 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3340 	}
3341 }
3342 
3343 /*
3344  * Called within set_task_rq() right before setting a task's CPU. The
3345  * caller only guarantees p->pi_lock is held; no other assumptions,
3346  * including the state of rq->lock, should be made.
3347  */
set_task_rq_fair(struct sched_entity * se,struct cfs_rq * prev,struct cfs_rq * next)3348 void set_task_rq_fair(struct sched_entity *se,
3349 		      struct cfs_rq *prev, struct cfs_rq *next)
3350 {
3351 	u64 p_last_update_time;
3352 	u64 n_last_update_time;
3353 
3354 	if (!sched_feat(ATTACH_AGE_LOAD))
3355 		return;
3356 
3357 	/*
3358 	 * We are supposed to update the task to "current" time, then its up to
3359 	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3360 	 * getting what current time is, so simply throw away the out-of-date
3361 	 * time. This will result in the wakee task is less decayed, but giving
3362 	 * the wakee more load sounds not bad.
3363 	 */
3364 	if (!(se->avg.last_update_time && prev))
3365 		return;
3366 
3367 #ifndef CONFIG_64BIT
3368 	{
3369 		u64 p_last_update_time_copy;
3370 		u64 n_last_update_time_copy;
3371 
3372 		do {
3373 			p_last_update_time_copy = prev->load_last_update_time_copy;
3374 			n_last_update_time_copy = next->load_last_update_time_copy;
3375 
3376 			smp_rmb();
3377 
3378 			p_last_update_time = prev->avg.last_update_time;
3379 			n_last_update_time = next->avg.last_update_time;
3380 
3381 		} while (p_last_update_time != p_last_update_time_copy ||
3382 			 n_last_update_time != n_last_update_time_copy);
3383 	}
3384 #else
3385 	p_last_update_time = prev->avg.last_update_time;
3386 	n_last_update_time = next->avg.last_update_time;
3387 #endif
3388 	__update_load_avg_blocked_se(p_last_update_time, se);
3389 	se->avg.last_update_time = n_last_update_time;
3390 }
3391 
3392 /*
3393  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3394  * propagate its contribution. The key to this propagation is the invariant
3395  * that for each group:
3396  *
3397  *   ge->avg == grq->avg						(1)
3398  *
3399  * _IFF_ we look at the pure running and runnable sums. Because they
3400  * represent the very same entity, just at different points in the hierarchy.
3401  *
3402  * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
3403  * and simply copies the running/runnable sum over (but still wrong, because
3404  * the group entity and group rq do not have their PELT windows aligned).
3405  *
3406  * However, update_tg_cfs_load() is more complex. So we have:
3407  *
3408  *   ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg		(2)
3409  *
3410  * And since, like util, the runnable part should be directly transferable,
3411  * the following would _appear_ to be the straight forward approach:
3412  *
3413  *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg	(3)
3414  *
3415  * And per (1) we have:
3416  *
3417  *   ge->avg.runnable_avg == grq->avg.runnable_avg
3418  *
3419  * Which gives:
3420  *
3421  *                      ge->load.weight * grq->avg.load_avg
3422  *   ge->avg.load_avg = -----------------------------------		(4)
3423  *                               grq->load.weight
3424  *
3425  * Except that is wrong!
3426  *
3427  * Because while for entities historical weight is not important and we
3428  * really only care about our future and therefore can consider a pure
3429  * runnable sum, runqueues can NOT do this.
3430  *
3431  * We specifically want runqueues to have a load_avg that includes
3432  * historical weights. Those represent the blocked load, the load we expect
3433  * to (shortly) return to us. This only works by keeping the weights as
3434  * integral part of the sum. We therefore cannot decompose as per (3).
3435  *
3436  * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3437  * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3438  * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3439  * runnable section of these tasks overlap (or not). If they were to perfectly
3440  * align the rq as a whole would be runnable 2/3 of the time. If however we
3441  * always have at least 1 runnable task, the rq as a whole is always runnable.
3442  *
3443  * So we'll have to approximate.. :/
3444  *
3445  * Given the constraint:
3446  *
3447  *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3448  *
3449  * We can construct a rule that adds runnable to a rq by assuming minimal
3450  * overlap.
3451  *
3452  * On removal, we'll assume each task is equally runnable; which yields:
3453  *
3454  *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3455  *
3456  * XXX: only do this for the part of runnable > running ?
3457  *
3458  */
3459 static inline void
update_tg_cfs_util(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3460 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3461 {
3462 	long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3463 	u32 divider;
3464 
3465 	/* Nothing to update */
3466 	if (!delta)
3467 		return;
3468 
3469 	/*
3470 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3471 	 * See ___update_load_avg() for details.
3472 	 */
3473 	divider = get_pelt_divider(&cfs_rq->avg);
3474 
3475 	/* Set new sched_entity's utilization */
3476 	se->avg.util_avg = gcfs_rq->avg.util_avg;
3477 	se->avg.util_sum = se->avg.util_avg * divider;
3478 
3479 	/* Update parent cfs_rq utilization */
3480 	add_positive(&cfs_rq->avg.util_avg, delta);
3481 	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3482 }
3483 
3484 static inline void
update_tg_cfs_runnable(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3485 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3486 {
3487 	long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3488 	u32 divider;
3489 
3490 	/* Nothing to update */
3491 	if (!delta)
3492 		return;
3493 
3494 	/*
3495 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3496 	 * See ___update_load_avg() for details.
3497 	 */
3498 	divider = get_pelt_divider(&cfs_rq->avg);
3499 
3500 	/* Set new sched_entity's runnable */
3501 	se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3502 	se->avg.runnable_sum = se->avg.runnable_avg * divider;
3503 
3504 	/* Update parent cfs_rq runnable */
3505 	add_positive(&cfs_rq->avg.runnable_avg, delta);
3506 	cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3507 }
3508 
3509 static inline void
update_tg_cfs_load(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3510 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3511 {
3512 	long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3513 	unsigned long load_avg;
3514 	u64 load_sum = 0;
3515 	s64 delta_sum;
3516 	u32 divider;
3517 
3518 	if (!runnable_sum)
3519 		return;
3520 
3521 	gcfs_rq->prop_runnable_sum = 0;
3522 
3523 	/*
3524 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3525 	 * See ___update_load_avg() for details.
3526 	 */
3527 	divider = get_pelt_divider(&cfs_rq->avg);
3528 
3529 	if (runnable_sum >= 0) {
3530 		/*
3531 		 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3532 		 * the CPU is saturated running == runnable.
3533 		 */
3534 		runnable_sum += se->avg.load_sum;
3535 		runnable_sum = min_t(long, runnable_sum, divider);
3536 	} else {
3537 		/*
3538 		 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3539 		 * assuming all tasks are equally runnable.
3540 		 */
3541 		if (scale_load_down(gcfs_rq->load.weight)) {
3542 			load_sum = div_s64(gcfs_rq->avg.load_sum,
3543 				scale_load_down(gcfs_rq->load.weight));
3544 		}
3545 
3546 		/* But make sure to not inflate se's runnable */
3547 		runnable_sum = min(se->avg.load_sum, load_sum);
3548 	}
3549 
3550 	/*
3551 	 * runnable_sum can't be lower than running_sum
3552 	 * Rescale running sum to be in the same range as runnable sum
3553 	 * running_sum is in [0 : LOAD_AVG_MAX <<  SCHED_CAPACITY_SHIFT]
3554 	 * runnable_sum is in [0 : LOAD_AVG_MAX]
3555 	 */
3556 	running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3557 	runnable_sum = max(runnable_sum, running_sum);
3558 
3559 	load_sum = (s64)se_weight(se) * runnable_sum;
3560 	load_avg = div_s64(load_sum, divider);
3561 
3562 	delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3563 	delta_avg = load_avg - se->avg.load_avg;
3564 
3565 	se->avg.load_sum = runnable_sum;
3566 	se->avg.load_avg = load_avg;
3567 	add_positive(&cfs_rq->avg.load_avg, delta_avg);
3568 	add_positive(&cfs_rq->avg.load_sum, delta_sum);
3569 }
3570 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)3571 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
3572 {
3573 	cfs_rq->propagate = 1;
3574 	cfs_rq->prop_runnable_sum += runnable_sum;
3575 }
3576 
3577 /* Update task and its cfs_rq load average */
propagate_entity_load_avg(struct sched_entity * se)3578 static inline int propagate_entity_load_avg(struct sched_entity *se)
3579 {
3580 	struct cfs_rq *cfs_rq, *gcfs_rq;
3581 
3582 	if (entity_is_task(se))
3583 		return 0;
3584 
3585 	gcfs_rq = group_cfs_rq(se);
3586 	if (!gcfs_rq->propagate)
3587 		return 0;
3588 
3589 	gcfs_rq->propagate = 0;
3590 
3591 	cfs_rq = cfs_rq_of(se);
3592 
3593 	add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
3594 
3595 	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3596 	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
3597 	update_tg_cfs_load(cfs_rq, se, gcfs_rq);
3598 
3599 	trace_pelt_cfs_tp(cfs_rq);
3600 	trace_pelt_se_tp(se);
3601 
3602 	return 1;
3603 }
3604 
3605 /*
3606  * Check if we need to update the load and the utilization of a blocked
3607  * group_entity:
3608  */
skip_blocked_update(struct sched_entity * se)3609 static inline bool skip_blocked_update(struct sched_entity *se)
3610 {
3611 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3612 
3613 	/*
3614 	 * If sched_entity still have not zero load or utilization, we have to
3615 	 * decay it:
3616 	 */
3617 	if (se->avg.load_avg || se->avg.util_avg)
3618 		return false;
3619 
3620 	/*
3621 	 * If there is a pending propagation, we have to update the load and
3622 	 * the utilization of the sched_entity:
3623 	 */
3624 	if (gcfs_rq->propagate)
3625 		return false;
3626 
3627 	/*
3628 	 * Otherwise, the load and the utilization of the sched_entity is
3629 	 * already zero and there is no pending propagation, so it will be a
3630 	 * waste of time to try to decay it:
3631 	 */
3632 	return true;
3633 }
3634 
3635 #else /* CONFIG_FAIR_GROUP_SCHED */
3636 
update_tg_load_avg(struct cfs_rq * cfs_rq)3637 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
3638 
propagate_entity_load_avg(struct sched_entity * se)3639 static inline int propagate_entity_load_avg(struct sched_entity *se)
3640 {
3641 	return 0;
3642 }
3643 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)3644 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
3645 
3646 #endif /* CONFIG_FAIR_GROUP_SCHED */
3647 
3648 /**
3649  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3650  * @now: current time, as per cfs_rq_clock_pelt()
3651  * @cfs_rq: cfs_rq to update
3652  *
3653  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3654  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3655  * post_init_entity_util_avg().
3656  *
3657  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3658  *
3659  * Returns true if the load decayed or we removed load.
3660  *
3661  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3662  * call update_tg_load_avg() when this function returns true.
3663  */
3664 static inline int
update_cfs_rq_load_avg(u64 now,struct cfs_rq * cfs_rq)3665 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3666 {
3667 	unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
3668 	struct sched_avg *sa = &cfs_rq->avg;
3669 	int decayed = 0;
3670 
3671 	if (cfs_rq->removed.nr) {
3672 		unsigned long r;
3673 		u32 divider = get_pelt_divider(&cfs_rq->avg);
3674 
3675 		raw_spin_lock(&cfs_rq->removed.lock);
3676 		swap(cfs_rq->removed.util_avg, removed_util);
3677 		swap(cfs_rq->removed.load_avg, removed_load);
3678 		swap(cfs_rq->removed.runnable_avg, removed_runnable);
3679 		cfs_rq->removed.nr = 0;
3680 		raw_spin_unlock(&cfs_rq->removed.lock);
3681 
3682 		r = removed_load;
3683 		sub_positive(&sa->load_avg, r);
3684 		sub_positive(&sa->load_sum, r * divider);
3685 
3686 		r = removed_util;
3687 		sub_positive(&sa->util_avg, r);
3688 		sub_positive(&sa->util_sum, r * divider);
3689 		/*
3690 		 * Because of rounding, se->util_sum might ends up being +1 more than
3691 		 * cfs->util_sum. Although this is not a problem by itself, detaching
3692 		 * a lot of tasks with the rounding problem between 2 updates of
3693 		 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
3694 		 * cfs_util_avg is not.
3695 		 * Check that util_sum is still above its lower bound for the new
3696 		 * util_avg. Given that period_contrib might have moved since the last
3697 		 * sync, we are only sure that util_sum must be above or equal to
3698 		 *    util_avg * minimum possible divider
3699 		 */
3700 		sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
3701 
3702 		r = removed_runnable;
3703 		sub_positive(&sa->runnable_avg, r);
3704 		sub_positive(&sa->runnable_sum, r * divider);
3705 
3706 		/*
3707 		 * removed_runnable is the unweighted version of removed_load so we
3708 		 * can use it to estimate removed_load_sum.
3709 		 */
3710 		add_tg_cfs_propagate(cfs_rq,
3711 			-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
3712 
3713 		decayed = 1;
3714 	}
3715 
3716 	decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
3717 
3718 #ifndef CONFIG_64BIT
3719 	smp_wmb();
3720 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
3721 #endif
3722 
3723 	return decayed;
3724 }
3725 
3726 /**
3727  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3728  * @cfs_rq: cfs_rq to attach to
3729  * @se: sched_entity to attach
3730  *
3731  * Must call update_cfs_rq_load_avg() before this, since we rely on
3732  * cfs_rq->avg.last_update_time being current.
3733  */
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3734 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3735 {
3736 	/*
3737 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3738 	 * See ___update_load_avg() for details.
3739 	 */
3740 	u32 divider = get_pelt_divider(&cfs_rq->avg);
3741 
3742 	/*
3743 	 * When we attach the @se to the @cfs_rq, we must align the decay
3744 	 * window because without that, really weird and wonderful things can
3745 	 * happen.
3746 	 *
3747 	 * XXX illustrate
3748 	 */
3749 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
3750 	se->avg.period_contrib = cfs_rq->avg.period_contrib;
3751 
3752 	/*
3753 	 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
3754 	 * period_contrib. This isn't strictly correct, but since we're
3755 	 * entirely outside of the PELT hierarchy, nobody cares if we truncate
3756 	 * _sum a little.
3757 	 */
3758 	se->avg.util_sum = se->avg.util_avg * divider;
3759 
3760 	se->avg.runnable_sum = se->avg.runnable_avg * divider;
3761 
3762 	se->avg.load_sum = se->avg.load_avg * divider;
3763 	if (se_weight(se) < se->avg.load_sum)
3764 		se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3765 	else
3766 		se->avg.load_sum = 1;
3767 
3768 	enqueue_load_avg(cfs_rq, se);
3769 	cfs_rq->avg.util_avg += se->avg.util_avg;
3770 	cfs_rq->avg.util_sum += se->avg.util_sum;
3771 	cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
3772 	cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
3773 
3774 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3775 
3776 	cfs_rq_util_change(cfs_rq, 0);
3777 
3778 	trace_pelt_cfs_tp(cfs_rq);
3779 }
3780 
3781 /**
3782  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3783  * @cfs_rq: cfs_rq to detach from
3784  * @se: sched_entity to detach
3785  *
3786  * Must call update_cfs_rq_load_avg() before this, since we rely on
3787  * cfs_rq->avg.last_update_time being current.
3788  */
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3789 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3790 {
3791 	dequeue_load_avg(cfs_rq, se);
3792 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3793 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3794 	sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3795 	sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
3796 
3797 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3798 
3799 	cfs_rq_util_change(cfs_rq, 0);
3800 
3801 	trace_pelt_cfs_tp(cfs_rq);
3802 }
3803 
3804 /*
3805  * Optional action to be done while updating the load average
3806  */
3807 #define UPDATE_TG	0x1
3808 #define SKIP_AGE_LOAD	0x2
3809 #define DO_ATTACH	0x4
3810 
3811 /* Update task and its cfs_rq load average */
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)3812 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3813 {
3814 	u64 now = cfs_rq_clock_pelt(cfs_rq);
3815 	int decayed;
3816 
3817 	trace_android_vh_prepare_update_load_avg_se(se, flags);
3818 	/*
3819 	 * Track task load average for carrying it to new CPU after migrated, and
3820 	 * track group sched_entity load average for task_h_load calc in migration
3821 	 */
3822 	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
3823 		__update_load_avg_se(now, cfs_rq, se);
3824 
3825 	trace_android_vh_finish_update_load_avg_se(se, flags);
3826 
3827 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
3828 	decayed |= propagate_entity_load_avg(se);
3829 
3830 	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3831 
3832 		/*
3833 		 * DO_ATTACH means we're here from enqueue_entity().
3834 		 * !last_update_time means we've passed through
3835 		 * migrate_task_rq_fair() indicating we migrated.
3836 		 *
3837 		 * IOW we're enqueueing a task on a new CPU.
3838 		 */
3839 		attach_entity_load_avg(cfs_rq, se);
3840 		update_tg_load_avg(cfs_rq);
3841 
3842 	} else if (decayed) {
3843 		cfs_rq_util_change(cfs_rq, 0);
3844 
3845 		if (flags & UPDATE_TG)
3846 			update_tg_load_avg(cfs_rq);
3847 	}
3848 }
3849 
3850 #ifndef CONFIG_64BIT
cfs_rq_last_update_time(struct cfs_rq * cfs_rq)3851 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3852 {
3853 	u64 last_update_time_copy;
3854 	u64 last_update_time;
3855 
3856 	do {
3857 		last_update_time_copy = cfs_rq->load_last_update_time_copy;
3858 		smp_rmb();
3859 		last_update_time = cfs_rq->avg.last_update_time;
3860 	} while (last_update_time != last_update_time_copy);
3861 
3862 	return last_update_time;
3863 }
3864 #else
cfs_rq_last_update_time(struct cfs_rq * cfs_rq)3865 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3866 {
3867 	return cfs_rq->avg.last_update_time;
3868 }
3869 #endif
3870 
3871 /*
3872  * Synchronize entity load avg of dequeued entity without locking
3873  * the previous rq.
3874  */
sync_entity_load_avg(struct sched_entity * se)3875 static void sync_entity_load_avg(struct sched_entity *se)
3876 {
3877 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3878 	u64 last_update_time;
3879 
3880 	last_update_time = cfs_rq_last_update_time(cfs_rq);
3881 	trace_android_vh_prepare_update_load_avg_se(se, 0);
3882 	__update_load_avg_blocked_se(last_update_time, se);
3883 	trace_android_vh_finish_update_load_avg_se(se, 0);
3884 }
3885 
3886 /*
3887  * Task first catches up with cfs_rq, and then subtract
3888  * itself from the cfs_rq (task must be off the queue now).
3889  */
remove_entity_load_avg(struct sched_entity * se)3890 static void remove_entity_load_avg(struct sched_entity *se)
3891 {
3892 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3893 	unsigned long flags;
3894 
3895 	/*
3896 	 * tasks cannot exit without having gone through wake_up_new_task() ->
3897 	 * post_init_entity_util_avg() which will have added things to the
3898 	 * cfs_rq, so we can remove unconditionally.
3899 	 */
3900 
3901 	sync_entity_load_avg(se);
3902 
3903 	raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
3904 	++cfs_rq->removed.nr;
3905 	cfs_rq->removed.util_avg	+= se->avg.util_avg;
3906 	cfs_rq->removed.load_avg	+= se->avg.load_avg;
3907 	cfs_rq->removed.runnable_avg	+= se->avg.runnable_avg;
3908 	raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
3909 }
3910 
cfs_rq_runnable_avg(struct cfs_rq * cfs_rq)3911 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
3912 {
3913 	return cfs_rq->avg.runnable_avg;
3914 }
3915 
cfs_rq_load_avg(struct cfs_rq * cfs_rq)3916 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3917 {
3918 	return cfs_rq->avg.load_avg;
3919 }
3920 
3921 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
3922 
task_util(struct task_struct * p)3923 static inline unsigned long task_util(struct task_struct *p)
3924 {
3925 	return READ_ONCE(p->se.avg.util_avg);
3926 }
3927 
_task_util_est(struct task_struct * p)3928 static inline unsigned long _task_util_est(struct task_struct *p)
3929 {
3930 	struct util_est ue = READ_ONCE(p->se.avg.util_est);
3931 
3932 	return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
3933 }
3934 
task_util_est(struct task_struct * p)3935 static inline unsigned long task_util_est(struct task_struct *p)
3936 {
3937 	return max(task_util(p), _task_util_est(p));
3938 }
3939 
3940 #ifdef CONFIG_UCLAMP_TASK
uclamp_task_util(struct task_struct * p)3941 static inline unsigned long uclamp_task_util(struct task_struct *p)
3942 {
3943 	return clamp(task_util_est(p),
3944 		     uclamp_eff_value(p, UCLAMP_MIN),
3945 		     uclamp_eff_value(p, UCLAMP_MAX));
3946 }
3947 #else
uclamp_task_util(struct task_struct * p)3948 static inline unsigned long uclamp_task_util(struct task_struct *p)
3949 {
3950 	return task_util_est(p);
3951 }
3952 #endif
3953 
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)3954 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
3955 				    struct task_struct *p)
3956 {
3957 	unsigned int enqueued;
3958 
3959 	if (!sched_feat(UTIL_EST))
3960 		return;
3961 
3962 	/* Update root cfs_rq's estimated utilization */
3963 	enqueued  = cfs_rq->avg.util_est.enqueued;
3964 	enqueued += _task_util_est(p);
3965 	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
3966 
3967 	trace_sched_util_est_cfs_tp(cfs_rq);
3968 }
3969 
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)3970 static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
3971 				    struct task_struct *p)
3972 {
3973 	unsigned int enqueued;
3974 
3975 	if (!sched_feat(UTIL_EST))
3976 		return;
3977 
3978 	/* Update root cfs_rq's estimated utilization */
3979 	enqueued  = cfs_rq->avg.util_est.enqueued;
3980 	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
3981 	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
3982 
3983 	trace_sched_util_est_cfs_tp(cfs_rq);
3984 }
3985 
3986 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
3987 
3988 /*
3989  * Check if a (signed) value is within a specified (unsigned) margin,
3990  * based on the observation that:
3991  *
3992  *     abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
3993  *
3994  * NOTE: this only works when value + maring < INT_MAX.
3995  */
within_margin(int value,int margin)3996 static inline bool within_margin(int value, int margin)
3997 {
3998 	return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
3999 }
4000 
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)4001 static inline void util_est_update(struct cfs_rq *cfs_rq,
4002 				   struct task_struct *p,
4003 				   bool task_sleep)
4004 {
4005 	long last_ewma_diff, last_enqueued_diff;
4006 	struct util_est ue;
4007 	int ret = 0;
4008 
4009 	trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret);
4010 	if (ret)
4011 		return;
4012 
4013 	if (!sched_feat(UTIL_EST))
4014 		return;
4015 
4016 	/*
4017 	 * Skip update of task's estimated utilization when the task has not
4018 	 * yet completed an activation, e.g. being migrated.
4019 	 */
4020 	if (!task_sleep)
4021 		return;
4022 
4023 	/*
4024 	 * If the PELT values haven't changed since enqueue time,
4025 	 * skip the util_est update.
4026 	 */
4027 	ue = p->se.avg.util_est;
4028 	if (ue.enqueued & UTIL_AVG_UNCHANGED)
4029 		return;
4030 
4031 	last_enqueued_diff = ue.enqueued;
4032 
4033 	/*
4034 	 * Reset EWMA on utilization increases, the moving average is used only
4035 	 * to smooth utilization decreases.
4036 	 */
4037 	ue.enqueued = task_util(p);
4038 	if (sched_feat(UTIL_EST_FASTUP)) {
4039 		if (ue.ewma < ue.enqueued) {
4040 			ue.ewma = ue.enqueued;
4041 			goto done;
4042 		}
4043 	}
4044 
4045 	/*
4046 	 * Skip update of task's estimated utilization when its members are
4047 	 * already ~1% close to its last activation value.
4048 	 */
4049 	last_ewma_diff = ue.enqueued - ue.ewma;
4050 	last_enqueued_diff -= ue.enqueued;
4051 	if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
4052 		if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
4053 			goto done;
4054 
4055 		return;
4056 	}
4057 
4058 	/*
4059 	 * To avoid overestimation of actual task utilization, skip updates if
4060 	 * we cannot grant there is idle time in this CPU.
4061 	 */
4062 	if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
4063 		return;
4064 
4065 	/*
4066 	 * Update Task's estimated utilization
4067 	 *
4068 	 * When *p completes an activation we can consolidate another sample
4069 	 * of the task size. This is done by storing the current PELT value
4070 	 * as ue.enqueued and by using this value to update the Exponential
4071 	 * Weighted Moving Average (EWMA):
4072 	 *
4073 	 *  ewma(t) = w *  task_util(p) + (1-w) * ewma(t-1)
4074 	 *          = w *  task_util(p) +         ewma(t-1)  - w * ewma(t-1)
4075 	 *          = w * (task_util(p) -         ewma(t-1)) +     ewma(t-1)
4076 	 *          = w * (      last_ewma_diff            ) +     ewma(t-1)
4077 	 *          = w * (last_ewma_diff  +  ewma(t-1) / w)
4078 	 *
4079 	 * Where 'w' is the weight of new samples, which is configured to be
4080 	 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4081 	 */
4082 	ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
4083 	ue.ewma  += last_ewma_diff;
4084 	ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
4085 done:
4086 	ue.enqueued |= UTIL_AVG_UNCHANGED;
4087 	WRITE_ONCE(p->se.avg.util_est, ue);
4088 
4089 	trace_sched_util_est_se_tp(&p->se);
4090 }
4091 
task_fits_capacity(struct task_struct * p,long capacity)4092 static inline int task_fits_capacity(struct task_struct *p, long capacity)
4093 {
4094 	return fits_capacity(uclamp_task_util(p), capacity);
4095 }
4096 
update_misfit_status(struct task_struct * p,struct rq * rq)4097 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4098 {
4099 	bool need_update = true;
4100 
4101 	trace_android_rvh_update_misfit_status(p, rq, &need_update);
4102 	if (!static_branch_unlikely(&sched_asym_cpucapacity) || !need_update)
4103 		return;
4104 
4105 	if (!p || p->nr_cpus_allowed == 1) {
4106 		rq->misfit_task_load = 0;
4107 		return;
4108 	}
4109 
4110 	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
4111 		rq->misfit_task_load = 0;
4112 		return;
4113 	}
4114 
4115 	/*
4116 	 * Make sure that misfit_task_load will not be null even if
4117 	 * task_h_load() returns 0.
4118 	 */
4119 	rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
4120 }
4121 
4122 #else /* CONFIG_SMP */
4123 
4124 #define UPDATE_TG	0x0
4125 #define SKIP_AGE_LOAD	0x0
4126 #define DO_ATTACH	0x0
4127 
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int not_used1)4128 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
4129 {
4130 	cfs_rq_util_change(cfs_rq, 0);
4131 }
4132 
remove_entity_load_avg(struct sched_entity * se)4133 static inline void remove_entity_load_avg(struct sched_entity *se) {}
4134 
4135 static inline void
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4136 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4137 static inline void
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4138 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4139 
newidle_balance(struct rq * rq,struct rq_flags * rf)4140 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
4141 {
4142 	return 0;
4143 }
4144 
4145 static inline void
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)4146 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4147 
4148 static inline void
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)4149 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4150 
4151 static inline void
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)4152 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
4153 		bool task_sleep) {}
update_misfit_status(struct task_struct * p,struct rq * rq)4154 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
4155 
4156 #endif /* CONFIG_SMP */
4157 
check_spread(struct cfs_rq * cfs_rq,struct sched_entity * se)4158 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
4159 {
4160 #ifdef CONFIG_SCHED_DEBUG
4161 	s64 d = se->vruntime - cfs_rq->min_vruntime;
4162 
4163 	if (d < 0)
4164 		d = -d;
4165 
4166 	if (d > 3*sysctl_sched_latency)
4167 		schedstat_inc(cfs_rq->nr_spread_over);
4168 #endif
4169 }
4170 
4171 static void
place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int initial)4172 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
4173 {
4174 	u64 vruntime = cfs_rq->min_vruntime;
4175 
4176 	/*
4177 	 * The 'current' period is already promised to the current tasks,
4178 	 * however the extra weight of the new task will slow them down a
4179 	 * little, place the new task so that it fits in the slot that
4180 	 * stays open at the end.
4181 	 */
4182 	if (initial && sched_feat(START_DEBIT))
4183 		vruntime += sched_vslice(cfs_rq, se);
4184 
4185 	/* sleeps up to a single latency don't count. */
4186 	if (!initial) {
4187 		unsigned long thresh = sysctl_sched_latency;
4188 
4189 		/*
4190 		 * Halve their sleep time's effect, to allow
4191 		 * for a gentler effect of sleepers:
4192 		 */
4193 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
4194 			thresh >>= 1;
4195 
4196 		vruntime -= thresh;
4197 	}
4198 
4199 	/* ensure we never gain time by being placed backwards. */
4200 	se->vruntime = max_vruntime(se->vruntime, vruntime);
4201 	trace_android_rvh_place_entity(cfs_rq, se, initial, vruntime);
4202 }
4203 
4204 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4205 
check_schedstat_required(void)4206 static inline void check_schedstat_required(void)
4207 {
4208 #ifdef CONFIG_SCHEDSTATS
4209 	if (schedstat_enabled())
4210 		return;
4211 
4212 	/* Force schedstat enabled if a dependent tracepoint is active */
4213 	if (trace_sched_stat_wait_enabled()    ||
4214 			trace_sched_stat_sleep_enabled()   ||
4215 			trace_sched_stat_iowait_enabled()  ||
4216 			trace_sched_stat_blocked_enabled() ||
4217 			trace_sched_stat_runtime_enabled())  {
4218 		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
4219 			     "stat_blocked and stat_runtime require the "
4220 			     "kernel parameter schedstats=enable or "
4221 			     "kernel.sched_schedstats=1\n");
4222 	}
4223 #endif
4224 }
4225 
4226 static inline bool cfs_bandwidth_used(void);
4227 
4228 /*
4229  * MIGRATION
4230  *
4231  *	dequeue
4232  *	  update_curr()
4233  *	    update_min_vruntime()
4234  *	  vruntime -= min_vruntime
4235  *
4236  *	enqueue
4237  *	  update_curr()
4238  *	    update_min_vruntime()
4239  *	  vruntime += min_vruntime
4240  *
4241  * this way the vruntime transition between RQs is done when both
4242  * min_vruntime are up-to-date.
4243  *
4244  * WAKEUP (remote)
4245  *
4246  *	->migrate_task_rq_fair() (p->state == TASK_WAKING)
4247  *	  vruntime -= min_vruntime
4248  *
4249  *	enqueue
4250  *	  update_curr()
4251  *	    update_min_vruntime()
4252  *	  vruntime += min_vruntime
4253  *
4254  * this way we don't have the most up-to-date min_vruntime on the originating
4255  * CPU and an up-to-date min_vruntime on the destination CPU.
4256  */
4257 
4258 static void
enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)4259 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4260 {
4261 	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
4262 	bool curr = cfs_rq->curr == se;
4263 
4264 	/*
4265 	 * If we're the current task, we must renormalise before calling
4266 	 * update_curr().
4267 	 */
4268 	if (renorm && curr)
4269 		se->vruntime += cfs_rq->min_vruntime;
4270 
4271 	update_curr(cfs_rq);
4272 
4273 	/*
4274 	 * Otherwise, renormalise after, such that we're placed at the current
4275 	 * moment in time, instead of some random moment in the past. Being
4276 	 * placed in the past could significantly boost this task to the
4277 	 * fairness detriment of existing tasks.
4278 	 */
4279 	if (renorm && !curr)
4280 		se->vruntime += cfs_rq->min_vruntime;
4281 
4282 	/*
4283 	 * When enqueuing a sched_entity, we must:
4284 	 *   - Update loads to have both entity and cfs_rq synced with now.
4285 	 *   - Add its load to cfs_rq->runnable_avg
4286 	 *   - For group_entity, update its weight to reflect the new share of
4287 	 *     its group cfs_rq
4288 	 *   - Add its new weight to cfs_rq->load.weight
4289 	 */
4290 	update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
4291 	se_update_runnable(se);
4292 	update_cfs_group(se);
4293 	account_entity_enqueue(cfs_rq, se);
4294 
4295 	if (flags & ENQUEUE_WAKEUP)
4296 		place_entity(cfs_rq, se, 0);
4297 
4298 	check_schedstat_required();
4299 	update_stats_enqueue(cfs_rq, se, flags);
4300 	check_spread(cfs_rq, se);
4301 	if (!curr)
4302 		__enqueue_entity(cfs_rq, se);
4303 	se->on_rq = 1;
4304 
4305 	/*
4306 	 * When bandwidth control is enabled, cfs might have been removed
4307 	 * because of a parent been throttled but cfs->nr_running > 1. Try to
4308 	 * add it unconditionnally.
4309 	 */
4310 	if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
4311 		list_add_leaf_cfs_rq(cfs_rq);
4312 
4313 	if (cfs_rq->nr_running == 1)
4314 		check_enqueue_throttle(cfs_rq);
4315 }
4316 
__clear_buddies_last(struct sched_entity * se)4317 static void __clear_buddies_last(struct sched_entity *se)
4318 {
4319 	for_each_sched_entity(se) {
4320 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
4321 		if (cfs_rq->last != se)
4322 			break;
4323 
4324 		cfs_rq->last = NULL;
4325 	}
4326 }
4327 
__clear_buddies_next(struct sched_entity * se)4328 static void __clear_buddies_next(struct sched_entity *se)
4329 {
4330 	for_each_sched_entity(se) {
4331 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
4332 		if (cfs_rq->next != se)
4333 			break;
4334 
4335 		cfs_rq->next = NULL;
4336 	}
4337 }
4338 
__clear_buddies_skip(struct sched_entity * se)4339 static void __clear_buddies_skip(struct sched_entity *se)
4340 {
4341 	for_each_sched_entity(se) {
4342 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
4343 		if (cfs_rq->skip != se)
4344 			break;
4345 
4346 		cfs_rq->skip = NULL;
4347 	}
4348 }
4349 
clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se)4350 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
4351 {
4352 	if (cfs_rq->last == se)
4353 		__clear_buddies_last(se);
4354 
4355 	if (cfs_rq->next == se)
4356 		__clear_buddies_next(se);
4357 
4358 	if (cfs_rq->skip == se)
4359 		__clear_buddies_skip(se);
4360 }
4361 
4362 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4363 
4364 static void
dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)4365 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4366 {
4367 	/*
4368 	 * Update run-time statistics of the 'current'.
4369 	 */
4370 	update_curr(cfs_rq);
4371 
4372 	/*
4373 	 * When dequeuing a sched_entity, we must:
4374 	 *   - Update loads to have both entity and cfs_rq synced with now.
4375 	 *   - Subtract its load from the cfs_rq->runnable_avg.
4376 	 *   - Subtract its previous weight from cfs_rq->load.weight.
4377 	 *   - For group entity, update its weight to reflect the new share
4378 	 *     of its group cfs_rq.
4379 	 */
4380 	update_load_avg(cfs_rq, se, UPDATE_TG);
4381 	se_update_runnable(se);
4382 
4383 	update_stats_dequeue(cfs_rq, se, flags);
4384 
4385 	clear_buddies(cfs_rq, se);
4386 
4387 	if (se != cfs_rq->curr)
4388 		__dequeue_entity(cfs_rq, se);
4389 	se->on_rq = 0;
4390 	account_entity_dequeue(cfs_rq, se);
4391 
4392 	/*
4393 	 * Normalize after update_curr(); which will also have moved
4394 	 * min_vruntime if @se is the one holding it back. But before doing
4395 	 * update_min_vruntime() again, which will discount @se's position and
4396 	 * can move min_vruntime forward still more.
4397 	 */
4398 	if (!(flags & DEQUEUE_SLEEP))
4399 		se->vruntime -= cfs_rq->min_vruntime;
4400 
4401 	/* return excess runtime on last dequeue */
4402 	return_cfs_rq_runtime(cfs_rq);
4403 
4404 	update_cfs_group(se);
4405 
4406 	/*
4407 	 * Now advance min_vruntime if @se was the entity holding it back,
4408 	 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4409 	 * put back on, and if we advance min_vruntime, we'll be placed back
4410 	 * further than we started -- ie. we'll be penalized.
4411 	 */
4412 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
4413 		update_min_vruntime(cfs_rq);
4414 }
4415 
4416 /*
4417  * Preempt the current task with a newly woken task if needed:
4418  */
4419 static void
check_preempt_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr)4420 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4421 {
4422 	unsigned long ideal_runtime, delta_exec;
4423 	struct sched_entity *se;
4424 	s64 delta;
4425 	bool skip_preempt = false;
4426 
4427 	ideal_runtime = sched_slice(cfs_rq, curr);
4428 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
4429 	trace_android_rvh_check_preempt_tick(current, &ideal_runtime, &skip_preempt,
4430 			delta_exec, cfs_rq, curr, sysctl_sched_min_granularity);
4431 	if (skip_preempt)
4432 		return;
4433 	if (delta_exec > ideal_runtime) {
4434 		resched_curr(rq_of(cfs_rq));
4435 		/*
4436 		 * The current task ran long enough, ensure it doesn't get
4437 		 * re-elected due to buddy favours.
4438 		 */
4439 		clear_buddies(cfs_rq, curr);
4440 		return;
4441 	}
4442 
4443 	/*
4444 	 * Ensure that a task that missed wakeup preemption by a
4445 	 * narrow margin doesn't have to wait for a full slice.
4446 	 * This also mitigates buddy induced latencies under load.
4447 	 */
4448 	if (delta_exec < sysctl_sched_min_granularity)
4449 		return;
4450 
4451 	se = __pick_first_entity(cfs_rq);
4452 	delta = curr->vruntime - se->vruntime;
4453 
4454 	if (delta < 0)
4455 		return;
4456 
4457 	if (delta > ideal_runtime)
4458 		resched_curr(rq_of(cfs_rq));
4459 }
4460 
set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)4461 void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
4462 {
4463 	/* 'current' is not kept within the tree. */
4464 	if (se->on_rq) {
4465 		/*
4466 		 * Any task has to be enqueued before it get to execute on
4467 		 * a CPU. So account for the time it spent waiting on the
4468 		 * runqueue.
4469 		 */
4470 		update_stats_wait_end(cfs_rq, se);
4471 		__dequeue_entity(cfs_rq, se);
4472 		update_load_avg(cfs_rq, se, UPDATE_TG);
4473 	}
4474 
4475 	update_stats_curr_start(cfs_rq, se);
4476 	cfs_rq->curr = se;
4477 
4478 	/*
4479 	 * Track our maximum slice length, if the CPU's load is at
4480 	 * least twice that of our own weight (i.e. dont track it
4481 	 * when there are only lesser-weight tasks around):
4482 	 */
4483 	if (schedstat_enabled() &&
4484 	    rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
4485 		schedstat_set(se->statistics.slice_max,
4486 			max((u64)schedstat_val(se->statistics.slice_max),
4487 			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
4488 	}
4489 
4490 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
4491 }
4492 EXPORT_SYMBOL_GPL(set_next_entity);
4493 
4494 
4495 static int
4496 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4497 
4498 /*
4499  * Pick the next process, keeping these things in mind, in this order:
4500  * 1) keep things fair between processes/task groups
4501  * 2) pick the "next" process, since someone really wants that to run
4502  * 3) pick the "last" process, for cache locality
4503  * 4) do not run the "skip" process, if something else is available
4504  */
4505 static struct sched_entity *
pick_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * curr)4506 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4507 {
4508 	struct sched_entity *left = __pick_first_entity(cfs_rq);
4509 	struct sched_entity *se = NULL;
4510 
4511 	trace_android_rvh_pick_next_entity(cfs_rq, curr, &se);
4512 	if (se)
4513 		goto done;
4514 
4515 	/*
4516 	 * If curr is set we have to see if its left of the leftmost entity
4517 	 * still in the tree, provided there was anything in the tree at all.
4518 	 */
4519 	if (!left || (curr && entity_before(curr, left)))
4520 		left = curr;
4521 
4522 	se = left; /* ideally we run the leftmost entity */
4523 
4524 	/*
4525 	 * Avoid running the skip buddy, if running something else can
4526 	 * be done without getting too unfair.
4527 	 */
4528 	if (cfs_rq->skip == se) {
4529 		struct sched_entity *second;
4530 
4531 		if (se == curr) {
4532 			second = __pick_first_entity(cfs_rq);
4533 		} else {
4534 			second = __pick_next_entity(se);
4535 			if (!second || (curr && entity_before(curr, second)))
4536 				second = curr;
4537 		}
4538 
4539 		if (second && wakeup_preempt_entity(second, left) < 1)
4540 			se = second;
4541 	}
4542 
4543 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
4544 		/*
4545 		 * Someone really wants this to run. If it's not unfair, run it.
4546 		 */
4547 		se = cfs_rq->next;
4548 	} else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
4549 		/*
4550 		 * Prefer last buddy, try to return the CPU to a preempted task.
4551 		 */
4552 		se = cfs_rq->last;
4553 	}
4554 
4555 done:
4556 	clear_buddies(cfs_rq, se);
4557 
4558 	return se;
4559 }
4560 
4561 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4562 
put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev)4563 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
4564 {
4565 	/*
4566 	 * If still on the runqueue then deactivate_task()
4567 	 * was not called and update_curr() has to be done:
4568 	 */
4569 	if (prev->on_rq)
4570 		update_curr(cfs_rq);
4571 
4572 	/* throttle cfs_rqs exceeding runtime */
4573 	check_cfs_rq_runtime(cfs_rq);
4574 
4575 	check_spread(cfs_rq, prev);
4576 
4577 	if (prev->on_rq) {
4578 		update_stats_wait_start(cfs_rq, prev);
4579 		/* Put 'current' back into the tree. */
4580 		__enqueue_entity(cfs_rq, prev);
4581 		/* in !on_rq case, update occurred at dequeue */
4582 		update_load_avg(cfs_rq, prev, 0);
4583 	}
4584 	cfs_rq->curr = NULL;
4585 }
4586 
4587 static void
entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued)4588 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
4589 {
4590 	/*
4591 	 * Update run-time statistics of the 'current'.
4592 	 */
4593 	update_curr(cfs_rq);
4594 
4595 	/*
4596 	 * Ensure that runnable average is periodically updated.
4597 	 */
4598 	update_load_avg(cfs_rq, curr, UPDATE_TG);
4599 	update_cfs_group(curr);
4600 
4601 #ifdef CONFIG_SCHED_HRTICK
4602 	/*
4603 	 * queued ticks are scheduled to match the slice, so don't bother
4604 	 * validating it and just reschedule.
4605 	 */
4606 	if (queued) {
4607 		resched_curr(rq_of(cfs_rq));
4608 		return;
4609 	}
4610 	/*
4611 	 * don't let the period tick interfere with the hrtick preemption
4612 	 */
4613 	if (!sched_feat(DOUBLE_TICK) &&
4614 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
4615 		return;
4616 #endif
4617 
4618 	if (cfs_rq->nr_running > 1)
4619 		check_preempt_tick(cfs_rq, curr);
4620 }
4621 
4622 
4623 /**************************************************
4624  * CFS bandwidth control machinery
4625  */
4626 
4627 #ifdef CONFIG_CFS_BANDWIDTH
4628 
4629 #ifdef CONFIG_JUMP_LABEL
4630 static struct static_key __cfs_bandwidth_used;
4631 
cfs_bandwidth_used(void)4632 static inline bool cfs_bandwidth_used(void)
4633 {
4634 	return static_key_false(&__cfs_bandwidth_used);
4635 }
4636 
cfs_bandwidth_usage_inc(void)4637 void cfs_bandwidth_usage_inc(void)
4638 {
4639 	static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
4640 }
4641 
cfs_bandwidth_usage_dec(void)4642 void cfs_bandwidth_usage_dec(void)
4643 {
4644 	static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
4645 }
4646 #else /* CONFIG_JUMP_LABEL */
cfs_bandwidth_used(void)4647 static bool cfs_bandwidth_used(void)
4648 {
4649 	return true;
4650 }
4651 
cfs_bandwidth_usage_inc(void)4652 void cfs_bandwidth_usage_inc(void) {}
cfs_bandwidth_usage_dec(void)4653 void cfs_bandwidth_usage_dec(void) {}
4654 #endif /* CONFIG_JUMP_LABEL */
4655 
4656 /*
4657  * default period for cfs group bandwidth.
4658  * default: 0.1s, units: nanoseconds
4659  */
default_cfs_period(void)4660 static inline u64 default_cfs_period(void)
4661 {
4662 	return 100000000ULL;
4663 }
4664 
sched_cfs_bandwidth_slice(void)4665 static inline u64 sched_cfs_bandwidth_slice(void)
4666 {
4667 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4668 }
4669 
4670 /*
4671  * Replenish runtime according to assigned quota. We use sched_clock_cpu
4672  * directly instead of rq->clock to avoid adding additional synchronization
4673  * around rq->lock.
4674  *
4675  * requires cfs_b->lock
4676  */
__refill_cfs_bandwidth_runtime(struct cfs_bandwidth * cfs_b)4677 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4678 {
4679 	if (cfs_b->quota != RUNTIME_INF)
4680 		cfs_b->runtime = cfs_b->quota;
4681 }
4682 
tg_cfs_bandwidth(struct task_group * tg)4683 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4684 {
4685 	return &tg->cfs_bandwidth;
4686 }
4687 
4688 /* returns 0 on failure to allocate runtime */
__assign_cfs_rq_runtime(struct cfs_bandwidth * cfs_b,struct cfs_rq * cfs_rq,u64 target_runtime)4689 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
4690 				   struct cfs_rq *cfs_rq, u64 target_runtime)
4691 {
4692 	u64 min_amount, amount = 0;
4693 
4694 	lockdep_assert_held(&cfs_b->lock);
4695 
4696 	/* note: this is a positive sum as runtime_remaining <= 0 */
4697 	min_amount = target_runtime - cfs_rq->runtime_remaining;
4698 
4699 	if (cfs_b->quota == RUNTIME_INF)
4700 		amount = min_amount;
4701 	else {
4702 		start_cfs_bandwidth(cfs_b);
4703 
4704 		if (cfs_b->runtime > 0) {
4705 			amount = min(cfs_b->runtime, min_amount);
4706 			cfs_b->runtime -= amount;
4707 			cfs_b->idle = 0;
4708 		}
4709 	}
4710 
4711 	cfs_rq->runtime_remaining += amount;
4712 
4713 	return cfs_rq->runtime_remaining > 0;
4714 }
4715 
4716 /* returns 0 on failure to allocate runtime */
assign_cfs_rq_runtime(struct cfs_rq * cfs_rq)4717 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4718 {
4719 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4720 	int ret;
4721 
4722 	raw_spin_lock(&cfs_b->lock);
4723 	ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
4724 	raw_spin_unlock(&cfs_b->lock);
4725 
4726 	return ret;
4727 }
4728 
__account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)4729 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4730 {
4731 	/* dock delta_exec before expiring quota (as it could span periods) */
4732 	cfs_rq->runtime_remaining -= delta_exec;
4733 
4734 	if (likely(cfs_rq->runtime_remaining > 0))
4735 		return;
4736 
4737 	if (cfs_rq->throttled)
4738 		return;
4739 	/*
4740 	 * if we're unable to extend our runtime we resched so that the active
4741 	 * hierarchy can be throttled
4742 	 */
4743 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4744 		resched_curr(rq_of(cfs_rq));
4745 }
4746 
4747 static __always_inline
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)4748 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4749 {
4750 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
4751 		return;
4752 
4753 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
4754 }
4755 
cfs_rq_throttled(struct cfs_rq * cfs_rq)4756 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4757 {
4758 	return cfs_bandwidth_used() && cfs_rq->throttled;
4759 }
4760 
4761 /* check whether cfs_rq, or any parent, is throttled */
throttled_hierarchy(struct cfs_rq * cfs_rq)4762 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4763 {
4764 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
4765 }
4766 
4767 /*
4768  * Ensure that neither of the group entities corresponding to src_cpu or
4769  * dest_cpu are members of a throttled hierarchy when performing group
4770  * load-balance operations.
4771  */
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)4772 static inline int throttled_lb_pair(struct task_group *tg,
4773 				    int src_cpu, int dest_cpu)
4774 {
4775 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4776 
4777 	src_cfs_rq = tg->cfs_rq[src_cpu];
4778 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
4779 
4780 	return throttled_hierarchy(src_cfs_rq) ||
4781 	       throttled_hierarchy(dest_cfs_rq);
4782 }
4783 
tg_unthrottle_up(struct task_group * tg,void * data)4784 static int tg_unthrottle_up(struct task_group *tg, void *data)
4785 {
4786 	struct rq *rq = data;
4787 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4788 
4789 	cfs_rq->throttle_count--;
4790 	if (!cfs_rq->throttle_count) {
4791 		cfs_rq->throttled_clock_pelt_time += rq_clock_task_mult(rq) -
4792 					     cfs_rq->throttled_clock_pelt;
4793 
4794 		/* Add cfs_rq with already running entity in the list */
4795 		if (cfs_rq->nr_running >= 1)
4796 			list_add_leaf_cfs_rq(cfs_rq);
4797 	}
4798 
4799 	return 0;
4800 }
4801 
tg_throttle_down(struct task_group * tg,void * data)4802 static int tg_throttle_down(struct task_group *tg, void *data)
4803 {
4804 	struct rq *rq = data;
4805 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4806 
4807 	/* group is entering throttled state, stop time */
4808 	if (!cfs_rq->throttle_count) {
4809 		cfs_rq->throttled_clock_pelt = rq_clock_task_mult(rq);
4810 		list_del_leaf_cfs_rq(cfs_rq);
4811 	}
4812 	cfs_rq->throttle_count++;
4813 
4814 	return 0;
4815 }
4816 
throttle_cfs_rq(struct cfs_rq * cfs_rq)4817 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
4818 {
4819 	struct rq *rq = rq_of(cfs_rq);
4820 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4821 	struct sched_entity *se;
4822 	long task_delta, idle_task_delta, dequeue = 1;
4823 
4824 	raw_spin_lock(&cfs_b->lock);
4825 	/* This will start the period timer if necessary */
4826 	if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
4827 		/*
4828 		 * We have raced with bandwidth becoming available, and if we
4829 		 * actually throttled the timer might not unthrottle us for an
4830 		 * entire period. We additionally needed to make sure that any
4831 		 * subsequent check_cfs_rq_runtime calls agree not to throttle
4832 		 * us, as we may commit to do cfs put_prev+pick_next, so we ask
4833 		 * for 1ns of runtime rather than just check cfs_b.
4834 		 */
4835 		dequeue = 0;
4836 	} else {
4837 		list_add_tail_rcu(&cfs_rq->throttled_list,
4838 				  &cfs_b->throttled_cfs_rq);
4839 	}
4840 	raw_spin_unlock(&cfs_b->lock);
4841 
4842 	if (!dequeue)
4843 		return false;  /* Throttle no longer required. */
4844 
4845 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4846 
4847 	/* freeze hierarchy runnable averages while throttled */
4848 	rcu_read_lock();
4849 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4850 	rcu_read_unlock();
4851 
4852 	task_delta = cfs_rq->h_nr_running;
4853 	idle_task_delta = cfs_rq->idle_h_nr_running;
4854 	for_each_sched_entity(se) {
4855 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4856 		/* throttled entity or throttle-on-deactivate */
4857 		if (!se->on_rq)
4858 			break;
4859 
4860 		if (dequeue) {
4861 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4862 		} else {
4863 			update_load_avg(qcfs_rq, se, 0);
4864 			se_update_runnable(se);
4865 		}
4866 
4867 		qcfs_rq->h_nr_running -= task_delta;
4868 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
4869 
4870 		if (qcfs_rq->load.weight)
4871 			dequeue = 0;
4872 	}
4873 
4874 	if (!se)
4875 		sub_nr_running(rq, task_delta);
4876 
4877 	/*
4878 	 * Note: distribution will already see us throttled via the
4879 	 * throttled-list.  rq->lock protects completion.
4880 	 */
4881 	cfs_rq->throttled = 1;
4882 	cfs_rq->throttled_clock = rq_clock(rq);
4883 	return true;
4884 }
4885 
unthrottle_cfs_rq(struct cfs_rq * cfs_rq)4886 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
4887 {
4888 	struct rq *rq = rq_of(cfs_rq);
4889 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4890 	struct sched_entity *se;
4891 	long task_delta, idle_task_delta;
4892 
4893 	se = cfs_rq->tg->se[cpu_of(rq)];
4894 
4895 	cfs_rq->throttled = 0;
4896 
4897 	update_rq_clock(rq);
4898 
4899 	raw_spin_lock(&cfs_b->lock);
4900 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4901 	list_del_rcu(&cfs_rq->throttled_list);
4902 	raw_spin_unlock(&cfs_b->lock);
4903 
4904 	/* update hierarchical throttle state */
4905 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4906 
4907 	if (!cfs_rq->load.weight)
4908 		return;
4909 
4910 	task_delta = cfs_rq->h_nr_running;
4911 	idle_task_delta = cfs_rq->idle_h_nr_running;
4912 	for_each_sched_entity(se) {
4913 		if (se->on_rq)
4914 			break;
4915 		cfs_rq = cfs_rq_of(se);
4916 		enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4917 
4918 		cfs_rq->h_nr_running += task_delta;
4919 		cfs_rq->idle_h_nr_running += idle_task_delta;
4920 
4921 		/* end evaluation on encountering a throttled cfs_rq */
4922 		if (cfs_rq_throttled(cfs_rq))
4923 			goto unthrottle_throttle;
4924 	}
4925 
4926 	for_each_sched_entity(se) {
4927 		cfs_rq = cfs_rq_of(se);
4928 
4929 		update_load_avg(cfs_rq, se, UPDATE_TG);
4930 		se_update_runnable(se);
4931 
4932 		cfs_rq->h_nr_running += task_delta;
4933 		cfs_rq->idle_h_nr_running += idle_task_delta;
4934 
4935 
4936 		/* end evaluation on encountering a throttled cfs_rq */
4937 		if (cfs_rq_throttled(cfs_rq))
4938 			goto unthrottle_throttle;
4939 
4940 		/*
4941 		 * One parent has been throttled and cfs_rq removed from the
4942 		 * list. Add it back to not break the leaf list.
4943 		 */
4944 		if (throttled_hierarchy(cfs_rq))
4945 			list_add_leaf_cfs_rq(cfs_rq);
4946 	}
4947 
4948 	/* At this point se is NULL and we are at root level*/
4949 	add_nr_running(rq, task_delta);
4950 
4951 unthrottle_throttle:
4952 	/*
4953 	 * The cfs_rq_throttled() breaks in the above iteration can result in
4954 	 * incomplete leaf list maintenance, resulting in triggering the
4955 	 * assertion below.
4956 	 */
4957 	for_each_sched_entity(se) {
4958 		cfs_rq = cfs_rq_of(se);
4959 
4960 		if (list_add_leaf_cfs_rq(cfs_rq))
4961 			break;
4962 	}
4963 
4964 	assert_list_leaf_cfs_rq(rq);
4965 
4966 	/* Determine whether we need to wake up potentially idle CPU: */
4967 	if (rq->curr == rq->idle && rq->cfs.nr_running)
4968 		resched_curr(rq);
4969 }
4970 
distribute_cfs_runtime(struct cfs_bandwidth * cfs_b)4971 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
4972 {
4973 	struct cfs_rq *cfs_rq;
4974 	u64 runtime, remaining = 1;
4975 
4976 	rcu_read_lock();
4977 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4978 				throttled_list) {
4979 		struct rq *rq = rq_of(cfs_rq);
4980 		struct rq_flags rf;
4981 
4982 		rq_lock_irqsave(rq, &rf);
4983 		if (!cfs_rq_throttled(cfs_rq))
4984 			goto next;
4985 
4986 		/* By the above check, this should never be true */
4987 		SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
4988 
4989 		raw_spin_lock(&cfs_b->lock);
4990 		runtime = -cfs_rq->runtime_remaining + 1;
4991 		if (runtime > cfs_b->runtime)
4992 			runtime = cfs_b->runtime;
4993 		cfs_b->runtime -= runtime;
4994 		remaining = cfs_b->runtime;
4995 		raw_spin_unlock(&cfs_b->lock);
4996 
4997 		cfs_rq->runtime_remaining += runtime;
4998 
4999 		/* we check whether we're throttled above */
5000 		if (cfs_rq->runtime_remaining > 0)
5001 			unthrottle_cfs_rq(cfs_rq);
5002 
5003 next:
5004 		rq_unlock_irqrestore(rq, &rf);
5005 
5006 		if (!remaining)
5007 			break;
5008 	}
5009 	rcu_read_unlock();
5010 }
5011 
5012 /*
5013  * Responsible for refilling a task_group's bandwidth and unthrottling its
5014  * cfs_rqs as appropriate. If there has been no activity within the last
5015  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5016  * used to track this state.
5017  */
do_sched_cfs_period_timer(struct cfs_bandwidth * cfs_b,int overrun,unsigned long flags)5018 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
5019 {
5020 	int throttled;
5021 
5022 	/* no need to continue the timer with no bandwidth constraint */
5023 	if (cfs_b->quota == RUNTIME_INF)
5024 		goto out_deactivate;
5025 
5026 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5027 	cfs_b->nr_periods += overrun;
5028 
5029 	/*
5030 	 * idle depends on !throttled (for the case of a large deficit), and if
5031 	 * we're going inactive then everything else can be deferred
5032 	 */
5033 	if (cfs_b->idle && !throttled)
5034 		goto out_deactivate;
5035 
5036 	__refill_cfs_bandwidth_runtime(cfs_b);
5037 
5038 	if (!throttled) {
5039 		/* mark as potentially idle for the upcoming period */
5040 		cfs_b->idle = 1;
5041 		return 0;
5042 	}
5043 
5044 	/* account preceding periods in which throttling occurred */
5045 	cfs_b->nr_throttled += overrun;
5046 
5047 	/*
5048 	 * This check is repeated as we release cfs_b->lock while we unthrottle.
5049 	 */
5050 	while (throttled && cfs_b->runtime > 0) {
5051 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5052 		/* we can't nest cfs_b->lock while distributing bandwidth */
5053 		distribute_cfs_runtime(cfs_b);
5054 		raw_spin_lock_irqsave(&cfs_b->lock, flags);
5055 
5056 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5057 	}
5058 
5059 	/*
5060 	 * While we are ensured activity in the period following an
5061 	 * unthrottle, this also covers the case in which the new bandwidth is
5062 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
5063 	 * timer to remain active while there are any throttled entities.)
5064 	 */
5065 	cfs_b->idle = 0;
5066 
5067 	return 0;
5068 
5069 out_deactivate:
5070 	return 1;
5071 }
5072 
5073 /* a cfs_rq won't donate quota below this amount */
5074 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
5075 /* minimum remaining period time to redistribute slack quota */
5076 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
5077 /* how long we wait to gather additional slack before distributing */
5078 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
5079 
5080 /*
5081  * Are we near the end of the current quota period?
5082  *
5083  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5084  * hrtimer base being cleared by hrtimer_start. In the case of
5085  * migrate_hrtimers, base is never cleared, so we are fine.
5086  */
runtime_refresh_within(struct cfs_bandwidth * cfs_b,u64 min_expire)5087 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
5088 {
5089 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
5090 	s64 remaining;
5091 
5092 	/* if the call-back is running a quota refresh is already occurring */
5093 	if (hrtimer_callback_running(refresh_timer))
5094 		return 1;
5095 
5096 	/* is a quota refresh about to occur? */
5097 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
5098 	if (remaining < (s64)min_expire)
5099 		return 1;
5100 
5101 	return 0;
5102 }
5103 
start_cfs_slack_bandwidth(struct cfs_bandwidth * cfs_b)5104 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
5105 {
5106 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
5107 
5108 	/* if there's a quota refresh soon don't bother with slack */
5109 	if (runtime_refresh_within(cfs_b, min_left))
5110 		return;
5111 
5112 	/* don't push forwards an existing deferred unthrottle */
5113 	if (cfs_b->slack_started)
5114 		return;
5115 	cfs_b->slack_started = true;
5116 
5117 	hrtimer_start(&cfs_b->slack_timer,
5118 			ns_to_ktime(cfs_bandwidth_slack_period),
5119 			HRTIMER_MODE_REL);
5120 }
5121 
5122 /* we know any runtime found here is valid as update_curr() precedes return */
__return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5123 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5124 {
5125 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5126 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
5127 
5128 	if (slack_runtime <= 0)
5129 		return;
5130 
5131 	raw_spin_lock(&cfs_b->lock);
5132 	if (cfs_b->quota != RUNTIME_INF) {
5133 		cfs_b->runtime += slack_runtime;
5134 
5135 		/* we are under rq->lock, defer unthrottling using a timer */
5136 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
5137 		    !list_empty(&cfs_b->throttled_cfs_rq))
5138 			start_cfs_slack_bandwidth(cfs_b);
5139 	}
5140 	raw_spin_unlock(&cfs_b->lock);
5141 
5142 	/* even if it's not valid for return we don't want to try again */
5143 	cfs_rq->runtime_remaining -= slack_runtime;
5144 }
5145 
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5146 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5147 {
5148 	if (!cfs_bandwidth_used())
5149 		return;
5150 
5151 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
5152 		return;
5153 
5154 	__return_cfs_rq_runtime(cfs_rq);
5155 }
5156 
5157 /*
5158  * This is done with a timer (instead of inline with bandwidth return) since
5159  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5160  */
do_sched_cfs_slack_timer(struct cfs_bandwidth * cfs_b)5161 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
5162 {
5163 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
5164 	unsigned long flags;
5165 
5166 	/* confirm we're still not at a refresh boundary */
5167 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
5168 	cfs_b->slack_started = false;
5169 
5170 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
5171 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5172 		return;
5173 	}
5174 
5175 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
5176 		runtime = cfs_b->runtime;
5177 
5178 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5179 
5180 	if (!runtime)
5181 		return;
5182 
5183 	distribute_cfs_runtime(cfs_b);
5184 
5185 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
5186 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5187 }
5188 
5189 /*
5190  * When a group wakes up we want to make sure that its quota is not already
5191  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
5192  * runtime as update_curr() throttling can not trigger until it's on-rq.
5193  */
check_enqueue_throttle(struct cfs_rq * cfs_rq)5194 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
5195 {
5196 	if (!cfs_bandwidth_used())
5197 		return;
5198 
5199 	/* an active group must be handled by the update_curr()->put() path */
5200 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
5201 		return;
5202 
5203 	/* ensure the group is not already throttled */
5204 	if (cfs_rq_throttled(cfs_rq))
5205 		return;
5206 
5207 	/* update runtime allocation */
5208 	account_cfs_rq_runtime(cfs_rq, 0);
5209 	if (cfs_rq->runtime_remaining <= 0)
5210 		throttle_cfs_rq(cfs_rq);
5211 }
5212 
sync_throttle(struct task_group * tg,int cpu)5213 static void sync_throttle(struct task_group *tg, int cpu)
5214 {
5215 	struct cfs_rq *pcfs_rq, *cfs_rq;
5216 
5217 	if (!cfs_bandwidth_used())
5218 		return;
5219 
5220 	if (!tg->parent)
5221 		return;
5222 
5223 	cfs_rq = tg->cfs_rq[cpu];
5224 	pcfs_rq = tg->parent->cfs_rq[cpu];
5225 
5226 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
5227 	cfs_rq->throttled_clock_pelt = rq_clock_task_mult(cpu_rq(cpu));
5228 }
5229 
5230 /* conditionally throttle active cfs_rq's from put_prev_entity() */
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)5231 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5232 {
5233 	if (!cfs_bandwidth_used())
5234 		return false;
5235 
5236 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
5237 		return false;
5238 
5239 	/*
5240 	 * it's possible for a throttled entity to be forced into a running
5241 	 * state (e.g. set_curr_task), in this case we're finished.
5242 	 */
5243 	if (cfs_rq_throttled(cfs_rq))
5244 		return true;
5245 
5246 	return throttle_cfs_rq(cfs_rq);
5247 }
5248 
sched_cfs_slack_timer(struct hrtimer * timer)5249 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
5250 {
5251 	struct cfs_bandwidth *cfs_b =
5252 		container_of(timer, struct cfs_bandwidth, slack_timer);
5253 
5254 	do_sched_cfs_slack_timer(cfs_b);
5255 
5256 	return HRTIMER_NORESTART;
5257 }
5258 
5259 extern const u64 max_cfs_quota_period;
5260 
sched_cfs_period_timer(struct hrtimer * timer)5261 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
5262 {
5263 	struct cfs_bandwidth *cfs_b =
5264 		container_of(timer, struct cfs_bandwidth, period_timer);
5265 	unsigned long flags;
5266 	int overrun;
5267 	int idle = 0;
5268 	int count = 0;
5269 
5270 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
5271 	for (;;) {
5272 		overrun = hrtimer_forward_now(timer, cfs_b->period);
5273 		if (!overrun)
5274 			break;
5275 
5276 		idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5277 
5278 		if (++count > 3) {
5279 			u64 new, old = ktime_to_ns(cfs_b->period);
5280 
5281 			/*
5282 			 * Grow period by a factor of 2 to avoid losing precision.
5283 			 * Precision loss in the quota/period ratio can cause __cfs_schedulable
5284 			 * to fail.
5285 			 */
5286 			new = old * 2;
5287 			if (new < max_cfs_quota_period) {
5288 				cfs_b->period = ns_to_ktime(new);
5289 				cfs_b->quota *= 2;
5290 
5291 				pr_warn_ratelimited(
5292 	"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5293 					smp_processor_id(),
5294 					div_u64(new, NSEC_PER_USEC),
5295 					div_u64(cfs_b->quota, NSEC_PER_USEC));
5296 			} else {
5297 				pr_warn_ratelimited(
5298 	"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5299 					smp_processor_id(),
5300 					div_u64(old, NSEC_PER_USEC),
5301 					div_u64(cfs_b->quota, NSEC_PER_USEC));
5302 			}
5303 
5304 			/* reset count so we don't come right back in here */
5305 			count = 0;
5306 		}
5307 	}
5308 	if (idle)
5309 		cfs_b->period_active = 0;
5310 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5311 
5312 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
5313 }
5314 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5315 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5316 {
5317 	raw_spin_lock_init(&cfs_b->lock);
5318 	cfs_b->runtime = 0;
5319 	cfs_b->quota = RUNTIME_INF;
5320 	cfs_b->period = ns_to_ktime(default_cfs_period());
5321 
5322 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
5323 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
5324 	cfs_b->period_timer.function = sched_cfs_period_timer;
5325 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5326 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
5327 	cfs_b->slack_started = false;
5328 }
5329 
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)5330 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5331 {
5332 	cfs_rq->runtime_enabled = 0;
5333 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
5334 }
5335 
start_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5336 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5337 {
5338 	lockdep_assert_held(&cfs_b->lock);
5339 
5340 	if (cfs_b->period_active)
5341 		return;
5342 
5343 	cfs_b->period_active = 1;
5344 	hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
5345 	hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
5346 }
5347 
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5348 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5349 {
5350 	/* init_cfs_bandwidth() was not called */
5351 	if (!cfs_b->throttled_cfs_rq.next)
5352 		return;
5353 
5354 	hrtimer_cancel(&cfs_b->period_timer);
5355 	hrtimer_cancel(&cfs_b->slack_timer);
5356 }
5357 
5358 /*
5359  * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
5360  *
5361  * The race is harmless, since modifying bandwidth settings of unhooked group
5362  * bits doesn't do much.
5363  */
5364 
5365 /* cpu online calback */
update_runtime_enabled(struct rq * rq)5366 static void __maybe_unused update_runtime_enabled(struct rq *rq)
5367 {
5368 	struct task_group *tg;
5369 
5370 	lockdep_assert_held(&rq->lock);
5371 
5372 	rcu_read_lock();
5373 	list_for_each_entry_rcu(tg, &task_groups, list) {
5374 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
5375 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5376 
5377 		raw_spin_lock(&cfs_b->lock);
5378 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
5379 		raw_spin_unlock(&cfs_b->lock);
5380 	}
5381 	rcu_read_unlock();
5382 }
5383 
5384 /* cpu offline callback */
unthrottle_offline_cfs_rqs(struct rq * rq)5385 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
5386 {
5387 	struct task_group *tg;
5388 
5389 	lockdep_assert_held(&rq->lock);
5390 
5391 	rcu_read_lock();
5392 	list_for_each_entry_rcu(tg, &task_groups, list) {
5393 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5394 
5395 		if (!cfs_rq->runtime_enabled)
5396 			continue;
5397 
5398 		/*
5399 		 * clock_task is not advancing so we just need to make sure
5400 		 * there's some valid quota amount
5401 		 */
5402 		cfs_rq->runtime_remaining = 1;
5403 		/*
5404 		 * Offline rq is schedulable till CPU is completely disabled
5405 		 * in take_cpu_down(), so we prevent new cfs throttling here.
5406 		 */
5407 		cfs_rq->runtime_enabled = 0;
5408 
5409 		if (cfs_rq_throttled(cfs_rq))
5410 			unthrottle_cfs_rq(cfs_rq);
5411 	}
5412 	rcu_read_unlock();
5413 }
5414 
5415 #else /* CONFIG_CFS_BANDWIDTH */
5416 
cfs_bandwidth_used(void)5417 static inline bool cfs_bandwidth_used(void)
5418 {
5419 	return false;
5420 }
5421 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)5422 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)5423 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
check_enqueue_throttle(struct cfs_rq * cfs_rq)5424 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
sync_throttle(struct task_group * tg,int cpu)5425 static inline void sync_throttle(struct task_group *tg, int cpu) {}
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5426 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
5427 
cfs_rq_throttled(struct cfs_rq * cfs_rq)5428 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5429 {
5430 	return 0;
5431 }
5432 
throttled_hierarchy(struct cfs_rq * cfs_rq)5433 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5434 {
5435 	return 0;
5436 }
5437 
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)5438 static inline int throttled_lb_pair(struct task_group *tg,
5439 				    int src_cpu, int dest_cpu)
5440 {
5441 	return 0;
5442 }
5443 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5444 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
5445 
5446 #ifdef CONFIG_FAIR_GROUP_SCHED
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)5447 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
5448 #endif
5449 
tg_cfs_bandwidth(struct task_group * tg)5450 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5451 {
5452 	return NULL;
5453 }
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5454 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
update_runtime_enabled(struct rq * rq)5455 static inline void update_runtime_enabled(struct rq *rq) {}
unthrottle_offline_cfs_rqs(struct rq * rq)5456 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
5457 
5458 #endif /* CONFIG_CFS_BANDWIDTH */
5459 
5460 /**************************************************
5461  * CFS operations on tasks:
5462  */
5463 
5464 #ifdef CONFIG_SCHED_HRTICK
hrtick_start_fair(struct rq * rq,struct task_struct * p)5465 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5466 {
5467 	struct sched_entity *se = &p->se;
5468 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5469 
5470 	SCHED_WARN_ON(task_rq(p) != rq);
5471 
5472 	if (rq->cfs.h_nr_running > 1) {
5473 		u64 slice = sched_slice(cfs_rq, se);
5474 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
5475 		s64 delta = slice - ran;
5476 
5477 		if (delta < 0) {
5478 			if (rq->curr == p)
5479 				resched_curr(rq);
5480 			return;
5481 		}
5482 		hrtick_start(rq, delta);
5483 	}
5484 }
5485 
5486 /*
5487  * called from enqueue/dequeue and updates the hrtick when the
5488  * current task is from our class and nr_running is low enough
5489  * to matter.
5490  */
hrtick_update(struct rq * rq)5491 static void hrtick_update(struct rq *rq)
5492 {
5493 	struct task_struct *curr = rq->curr;
5494 
5495 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
5496 		return;
5497 
5498 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
5499 		hrtick_start_fair(rq, curr);
5500 }
5501 #else /* !CONFIG_SCHED_HRTICK */
5502 static inline void
hrtick_start_fair(struct rq * rq,struct task_struct * p)5503 hrtick_start_fair(struct rq *rq, struct task_struct *p)
5504 {
5505 }
5506 
hrtick_update(struct rq * rq)5507 static inline void hrtick_update(struct rq *rq)
5508 {
5509 }
5510 #endif
5511 
5512 #ifdef CONFIG_SMP
5513 static inline unsigned long cpu_util(int cpu);
5514 
cpu_overutilized(int cpu)5515 static inline bool cpu_overutilized(int cpu)
5516 {
5517 	int overutilized = -1;
5518 
5519 	trace_android_rvh_cpu_overutilized(cpu, &overutilized);
5520 	if (overutilized != -1)
5521 		return overutilized;
5522 
5523 	return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
5524 }
5525 
update_overutilized_status(struct rq * rq)5526 static inline void update_overutilized_status(struct rq *rq)
5527 {
5528 	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5529 		WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
5530 		trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
5531 	}
5532 }
5533 #else
update_overutilized_status(struct rq * rq)5534 static inline void update_overutilized_status(struct rq *rq) { }
5535 #endif
5536 
5537 /* Runqueue only has SCHED_IDLE tasks enqueued */
sched_idle_rq(struct rq * rq)5538 static int sched_idle_rq(struct rq *rq)
5539 {
5540 	return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
5541 			rq->nr_running);
5542 }
5543 
5544 #ifdef CONFIG_SMP
sched_idle_cpu(int cpu)5545 static int sched_idle_cpu(int cpu)
5546 {
5547 	return sched_idle_rq(cpu_rq(cpu));
5548 }
5549 #endif
5550 
5551 /*
5552  * The enqueue_task method is called before nr_running is
5553  * increased. Here we update the fair scheduling stats and
5554  * then put the task into the rbtree:
5555  */
5556 static void
enqueue_task_fair(struct rq * rq,struct task_struct * p,int flags)5557 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5558 {
5559 	struct cfs_rq *cfs_rq;
5560 	struct sched_entity *se = &p->se;
5561 	int idle_h_nr_running = task_has_idle_policy(p);
5562 	int task_new = !(flags & ENQUEUE_WAKEUP);
5563 	int should_iowait_boost;
5564 
5565 	/*
5566 	 * The code below (indirectly) updates schedutil which looks at
5567 	 * the cfs_rq utilization to select a frequency.
5568 	 * Let's add the task's estimated utilization to the cfs_rq's
5569 	 * estimated utilization, before we update schedutil.
5570 	 */
5571 	util_est_enqueue(&rq->cfs, p);
5572 
5573 	/*
5574 	 * If in_iowait is set, the code below may not trigger any cpufreq
5575 	 * utilization updates, so do it here explicitly with the IOWAIT flag
5576 	 * passed.
5577 	 */
5578 	should_iowait_boost = p->in_iowait;
5579 	trace_android_rvh_set_iowait(p, &should_iowait_boost);
5580 	if (should_iowait_boost)
5581 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
5582 
5583 	for_each_sched_entity(se) {
5584 		if (se->on_rq)
5585 			break;
5586 		cfs_rq = cfs_rq_of(se);
5587 		enqueue_entity(cfs_rq, se, flags);
5588 
5589 		cfs_rq->h_nr_running++;
5590 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
5591 
5592 		/* end evaluation on encountering a throttled cfs_rq */
5593 		if (cfs_rq_throttled(cfs_rq))
5594 			goto enqueue_throttle;
5595 
5596 		flags = ENQUEUE_WAKEUP;
5597 	}
5598 
5599 	trace_android_rvh_enqueue_task_fair(rq, p, flags);
5600 	for_each_sched_entity(se) {
5601 		cfs_rq = cfs_rq_of(se);
5602 
5603 		update_load_avg(cfs_rq, se, UPDATE_TG);
5604 		se_update_runnable(se);
5605 		update_cfs_group(se);
5606 
5607 		cfs_rq->h_nr_running++;
5608 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
5609 
5610 		/* end evaluation on encountering a throttled cfs_rq */
5611 		if (cfs_rq_throttled(cfs_rq))
5612 			goto enqueue_throttle;
5613 
5614                /*
5615                 * One parent has been throttled and cfs_rq removed from the
5616                 * list. Add it back to not break the leaf list.
5617                 */
5618                if (throttled_hierarchy(cfs_rq))
5619                        list_add_leaf_cfs_rq(cfs_rq);
5620 	}
5621 
5622 	/* At this point se is NULL and we are at root level*/
5623 	add_nr_running(rq, 1);
5624 
5625 	/*
5626 	 * Since new tasks are assigned an initial util_avg equal to
5627 	 * half of the spare capacity of their CPU, tiny tasks have the
5628 	 * ability to cross the overutilized threshold, which will
5629 	 * result in the load balancer ruining all the task placement
5630 	 * done by EAS. As a way to mitigate that effect, do not account
5631 	 * for the first enqueue operation of new tasks during the
5632 	 * overutilized flag detection.
5633 	 *
5634 	 * A better way of solving this problem would be to wait for
5635 	 * the PELT signals of tasks to converge before taking them
5636 	 * into account, but that is not straightforward to implement,
5637 	 * and the following generally works well enough in practice.
5638 	 */
5639 	if (!task_new)
5640 		update_overutilized_status(rq);
5641 
5642 enqueue_throttle:
5643 	if (cfs_bandwidth_used()) {
5644 		/*
5645 		 * When bandwidth control is enabled; the cfs_rq_throttled()
5646 		 * breaks in the above iteration can result in incomplete
5647 		 * leaf list maintenance, resulting in triggering the assertion
5648 		 * below.
5649 		 */
5650 		for_each_sched_entity(se) {
5651 			cfs_rq = cfs_rq_of(se);
5652 
5653 			if (list_add_leaf_cfs_rq(cfs_rq))
5654 				break;
5655 		}
5656 	}
5657 
5658 	assert_list_leaf_cfs_rq(rq);
5659 
5660 	hrtick_update(rq);
5661 }
5662 
5663 static void set_next_buddy(struct sched_entity *se);
5664 
5665 /*
5666  * The dequeue_task method is called before nr_running is
5667  * decreased. We remove the task from the rbtree and
5668  * update the fair scheduling stats:
5669  */
dequeue_task_fair(struct rq * rq,struct task_struct * p,int flags)5670 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5671 {
5672 	struct cfs_rq *cfs_rq;
5673 	struct sched_entity *se = &p->se;
5674 	int task_sleep = flags & DEQUEUE_SLEEP;
5675 	int idle_h_nr_running = task_has_idle_policy(p);
5676 	bool was_sched_idle = sched_idle_rq(rq);
5677 
5678 	util_est_dequeue(&rq->cfs, p);
5679 
5680 	for_each_sched_entity(se) {
5681 		cfs_rq = cfs_rq_of(se);
5682 		dequeue_entity(cfs_rq, se, flags);
5683 
5684 		cfs_rq->h_nr_running--;
5685 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5686 
5687 		/* end evaluation on encountering a throttled cfs_rq */
5688 		if (cfs_rq_throttled(cfs_rq))
5689 			goto dequeue_throttle;
5690 
5691 		/* Don't dequeue parent if it has other entities besides us */
5692 		if (cfs_rq->load.weight) {
5693 			/* Avoid re-evaluating load for this entity: */
5694 			se = parent_entity(se);
5695 			/*
5696 			 * Bias pick_next to pick a task from this cfs_rq, as
5697 			 * p is sleeping when it is within its sched_slice.
5698 			 */
5699 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
5700 				set_next_buddy(se);
5701 			break;
5702 		}
5703 		flags |= DEQUEUE_SLEEP;
5704 	}
5705 
5706 	trace_android_rvh_dequeue_task_fair(rq, p, flags);
5707 	for_each_sched_entity(se) {
5708 		cfs_rq = cfs_rq_of(se);
5709 
5710 		update_load_avg(cfs_rq, se, UPDATE_TG);
5711 		se_update_runnable(se);
5712 		update_cfs_group(se);
5713 
5714 		cfs_rq->h_nr_running--;
5715 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5716 
5717 		/* end evaluation on encountering a throttled cfs_rq */
5718 		if (cfs_rq_throttled(cfs_rq))
5719 			goto dequeue_throttle;
5720 
5721 	}
5722 
5723 	/* At this point se is NULL and we are at root level*/
5724 	sub_nr_running(rq, 1);
5725 
5726 	/* balance early to pull high priority tasks */
5727 	if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
5728 		rq->next_balance = jiffies;
5729 
5730 dequeue_throttle:
5731 	util_est_update(&rq->cfs, p, task_sleep);
5732 	hrtick_update(rq);
5733 }
5734 
5735 #ifdef CONFIG_SMP
5736 
5737 /* Working cpumask for: load_balance, load_balance_newidle. */
5738 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5739 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
5740 
5741 #ifdef CONFIG_NO_HZ_COMMON
5742 
5743 static struct {
5744 	cpumask_var_t idle_cpus_mask;
5745 	atomic_t nr_cpus;
5746 	int has_blocked;		/* Idle CPUS has blocked load */
5747 	unsigned long next_balance;     /* in jiffy units */
5748 	unsigned long next_blocked;	/* Next update of blocked load in jiffies */
5749 } nohz ____cacheline_aligned;
5750 
5751 #endif /* CONFIG_NO_HZ_COMMON */
5752 
cpu_load(struct rq * rq)5753 static unsigned long cpu_load(struct rq *rq)
5754 {
5755 	return cfs_rq_load_avg(&rq->cfs);
5756 }
5757 
5758 /*
5759  * cpu_load_without - compute CPU load without any contributions from *p
5760  * @cpu: the CPU which load is requested
5761  * @p: the task which load should be discounted
5762  *
5763  * The load of a CPU is defined by the load of tasks currently enqueued on that
5764  * CPU as well as tasks which are currently sleeping after an execution on that
5765  * CPU.
5766  *
5767  * This method returns the load of the specified CPU by discounting the load of
5768  * the specified task, whenever the task is currently contributing to the CPU
5769  * load.
5770  */
cpu_load_without(struct rq * rq,struct task_struct * p)5771 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
5772 {
5773 	struct cfs_rq *cfs_rq;
5774 	unsigned int load;
5775 
5776 	/* Task has no contribution or is new */
5777 	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
5778 		return cpu_load(rq);
5779 
5780 	cfs_rq = &rq->cfs;
5781 	load = READ_ONCE(cfs_rq->avg.load_avg);
5782 
5783 	/* Discount task's util from CPU's util */
5784 	lsub_positive(&load, task_h_load(p));
5785 
5786 	return load;
5787 }
5788 
cpu_runnable(struct rq * rq)5789 static unsigned long cpu_runnable(struct rq *rq)
5790 {
5791 	return cfs_rq_runnable_avg(&rq->cfs);
5792 }
5793 
cpu_runnable_without(struct rq * rq,struct task_struct * p)5794 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
5795 {
5796 	struct cfs_rq *cfs_rq;
5797 	unsigned int runnable;
5798 
5799 	/* Task has no contribution or is new */
5800 	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
5801 		return cpu_runnable(rq);
5802 
5803 	cfs_rq = &rq->cfs;
5804 	runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
5805 
5806 	/* Discount task's runnable from CPU's runnable */
5807 	lsub_positive(&runnable, p->se.avg.runnable_avg);
5808 
5809 	return runnable;
5810 }
5811 
capacity_of(int cpu)5812 static unsigned long capacity_of(int cpu)
5813 {
5814 	return cpu_rq(cpu)->cpu_capacity;
5815 }
5816 
record_wakee(struct task_struct * p)5817 static void record_wakee(struct task_struct *p)
5818 {
5819 	/*
5820 	 * Only decay a single time; tasks that have less then 1 wakeup per
5821 	 * jiffy will not have built up many flips.
5822 	 */
5823 	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5824 		current->wakee_flips >>= 1;
5825 		current->wakee_flip_decay_ts = jiffies;
5826 	}
5827 
5828 	if (current->last_wakee != p) {
5829 		current->last_wakee = p;
5830 		current->wakee_flips++;
5831 	}
5832 }
5833 
5834 /*
5835  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5836  *
5837  * A waker of many should wake a different task than the one last awakened
5838  * at a frequency roughly N times higher than one of its wakees.
5839  *
5840  * In order to determine whether we should let the load spread vs consolidating
5841  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5842  * partner, and a factor of lls_size higher frequency in the other.
5843  *
5844  * With both conditions met, we can be relatively sure that the relationship is
5845  * non-monogamous, with partner count exceeding socket size.
5846  *
5847  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5848  * whatever is irrelevant, spread criteria is apparent partner count exceeds
5849  * socket size.
5850  */
wake_wide(struct task_struct * p)5851 static int wake_wide(struct task_struct *p)
5852 {
5853 	unsigned int master = current->wakee_flips;
5854 	unsigned int slave = p->wakee_flips;
5855 	int factor = __this_cpu_read(sd_llc_size);
5856 
5857 	if (master < slave)
5858 		swap(master, slave);
5859 	if (slave < factor || master < slave * factor)
5860 		return 0;
5861 	return 1;
5862 }
5863 
5864 /*
5865  * The purpose of wake_affine() is to quickly determine on which CPU we can run
5866  * soonest. For the purpose of speed we only consider the waking and previous
5867  * CPU.
5868  *
5869  * wake_affine_idle() - only considers 'now', it check if the waking CPU is
5870  *			cache-affine and is (or	will be) idle.
5871  *
5872  * wake_affine_weight() - considers the weight to reflect the average
5873  *			  scheduling latency of the CPUs. This seems to work
5874  *			  for the overloaded case.
5875  */
5876 static int
wake_affine_idle(int this_cpu,int prev_cpu,int sync)5877 wake_affine_idle(int this_cpu, int prev_cpu, int sync)
5878 {
5879 	/*
5880 	 * If this_cpu is idle, it implies the wakeup is from interrupt
5881 	 * context. Only allow the move if cache is shared. Otherwise an
5882 	 * interrupt intensive workload could force all tasks onto one
5883 	 * node depending on the IO topology or IRQ affinity settings.
5884 	 *
5885 	 * If the prev_cpu is idle and cache affine then avoid a migration.
5886 	 * There is no guarantee that the cache hot data from an interrupt
5887 	 * is more important than cache hot data on the prev_cpu and from
5888 	 * a cpufreq perspective, it's better to have higher utilisation
5889 	 * on one CPU.
5890 	 */
5891 	if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
5892 		return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
5893 
5894 	if (sync && cpu_rq(this_cpu)->nr_running == 1)
5895 		return this_cpu;
5896 
5897 	return nr_cpumask_bits;
5898 }
5899 
5900 static int
wake_affine_weight(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)5901 wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5902 		   int this_cpu, int prev_cpu, int sync)
5903 {
5904 	s64 this_eff_load, prev_eff_load;
5905 	unsigned long task_load;
5906 
5907 	this_eff_load = cpu_load(cpu_rq(this_cpu));
5908 
5909 	if (sync) {
5910 		unsigned long current_load = task_h_load(current);
5911 
5912 		if (current_load > this_eff_load)
5913 			return this_cpu;
5914 
5915 		this_eff_load -= current_load;
5916 	}
5917 
5918 	task_load = task_h_load(p);
5919 
5920 	this_eff_load += task_load;
5921 	if (sched_feat(WA_BIAS))
5922 		this_eff_load *= 100;
5923 	this_eff_load *= capacity_of(prev_cpu);
5924 
5925 	prev_eff_load = cpu_load(cpu_rq(prev_cpu));
5926 	prev_eff_load -= task_load;
5927 	if (sched_feat(WA_BIAS))
5928 		prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
5929 	prev_eff_load *= capacity_of(this_cpu);
5930 
5931 	/*
5932 	 * If sync, adjust the weight of prev_eff_load such that if
5933 	 * prev_eff == this_eff that select_idle_sibling() will consider
5934 	 * stacking the wakee on top of the waker if no other CPU is
5935 	 * idle.
5936 	 */
5937 	if (sync)
5938 		prev_eff_load += 1;
5939 
5940 	return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
5941 }
5942 
wake_affine(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)5943 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5944 		       int this_cpu, int prev_cpu, int sync)
5945 {
5946 	int target = nr_cpumask_bits;
5947 
5948 	if (sched_feat(WA_IDLE))
5949 		target = wake_affine_idle(this_cpu, prev_cpu, sync);
5950 
5951 	if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
5952 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
5953 
5954 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5955 	if (target == nr_cpumask_bits)
5956 		return prev_cpu;
5957 
5958 	schedstat_inc(sd->ttwu_move_affine);
5959 	schedstat_inc(p->se.statistics.nr_wakeups_affine);
5960 	return target;
5961 }
5962 
5963 static struct sched_group *
5964 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
5965 
5966 /*
5967  * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
5968  */
5969 static int
find_idlest_group_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)5970 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5971 {
5972 	unsigned long load, min_load = ULONG_MAX;
5973 	unsigned int min_exit_latency = UINT_MAX;
5974 	u64 latest_idle_timestamp = 0;
5975 	int least_loaded_cpu = this_cpu;
5976 	int shallowest_idle_cpu = -1;
5977 	int i;
5978 
5979 	/* Check if we have any choice: */
5980 	if (group->group_weight == 1)
5981 		return cpumask_first(sched_group_span(group));
5982 
5983 	/* Traverse only the allowed CPUs */
5984 	for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
5985 		if (sched_idle_cpu(i))
5986 			return i;
5987 
5988 		if (available_idle_cpu(i)) {
5989 			struct rq *rq = cpu_rq(i);
5990 			struct cpuidle_state *idle = idle_get_state(rq);
5991 			if (idle && idle->exit_latency < min_exit_latency) {
5992 				/*
5993 				 * We give priority to a CPU whose idle state
5994 				 * has the smallest exit latency irrespective
5995 				 * of any idle timestamp.
5996 				 */
5997 				min_exit_latency = idle->exit_latency;
5998 				latest_idle_timestamp = rq->idle_stamp;
5999 				shallowest_idle_cpu = i;
6000 			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
6001 				   rq->idle_stamp > latest_idle_timestamp) {
6002 				/*
6003 				 * If equal or no active idle state, then
6004 				 * the most recently idled CPU might have
6005 				 * a warmer cache.
6006 				 */
6007 				latest_idle_timestamp = rq->idle_stamp;
6008 				shallowest_idle_cpu = i;
6009 			}
6010 		} else if (shallowest_idle_cpu == -1) {
6011 			load = cpu_load(cpu_rq(i));
6012 			if (load < min_load) {
6013 				min_load = load;
6014 				least_loaded_cpu = i;
6015 			}
6016 		}
6017 	}
6018 
6019 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
6020 }
6021 
find_idlest_cpu(struct sched_domain * sd,struct task_struct * p,int cpu,int prev_cpu,int sd_flag)6022 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
6023 				  int cpu, int prev_cpu, int sd_flag)
6024 {
6025 	int new_cpu = cpu;
6026 
6027 	if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
6028 		return prev_cpu;
6029 
6030 	/*
6031 	 * We need task's util for cpu_util_without, sync it up to
6032 	 * prev_cpu's last_update_time.
6033 	 */
6034 	if (!(sd_flag & SD_BALANCE_FORK))
6035 		sync_entity_load_avg(&p->se);
6036 
6037 	while (sd) {
6038 		struct sched_group *group;
6039 		struct sched_domain *tmp;
6040 		int weight;
6041 
6042 		if (!(sd->flags & sd_flag)) {
6043 			sd = sd->child;
6044 			continue;
6045 		}
6046 
6047 		group = find_idlest_group(sd, p, cpu);
6048 		if (!group) {
6049 			sd = sd->child;
6050 			continue;
6051 		}
6052 
6053 		new_cpu = find_idlest_group_cpu(group, p, cpu);
6054 		if (new_cpu == cpu) {
6055 			/* Now try balancing at a lower domain level of 'cpu': */
6056 			sd = sd->child;
6057 			continue;
6058 		}
6059 
6060 		/* Now try balancing at a lower domain level of 'new_cpu': */
6061 		cpu = new_cpu;
6062 		weight = sd->span_weight;
6063 		sd = NULL;
6064 		for_each_domain(cpu, tmp) {
6065 			if (weight <= tmp->span_weight)
6066 				break;
6067 			if (tmp->flags & sd_flag)
6068 				sd = tmp;
6069 		}
6070 	}
6071 
6072 	return new_cpu;
6073 }
6074 
6075 #ifdef CONFIG_SCHED_SMT
6076 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
6077 EXPORT_SYMBOL_GPL(sched_smt_present);
6078 
set_idle_cores(int cpu,int val)6079 static inline void set_idle_cores(int cpu, int val)
6080 {
6081 	struct sched_domain_shared *sds;
6082 
6083 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6084 	if (sds)
6085 		WRITE_ONCE(sds->has_idle_cores, val);
6086 }
6087 
test_idle_cores(int cpu,bool def)6088 static inline bool test_idle_cores(int cpu, bool def)
6089 {
6090 	struct sched_domain_shared *sds;
6091 
6092 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6093 	if (sds)
6094 		return READ_ONCE(sds->has_idle_cores);
6095 
6096 	return def;
6097 }
6098 
6099 /*
6100  * Scans the local SMT mask to see if the entire core is idle, and records this
6101  * information in sd_llc_shared->has_idle_cores.
6102  *
6103  * Since SMT siblings share all cache levels, inspecting this limited remote
6104  * state should be fairly cheap.
6105  */
__update_idle_core(struct rq * rq)6106 void __update_idle_core(struct rq *rq)
6107 {
6108 	int core = cpu_of(rq);
6109 	int cpu;
6110 
6111 	rcu_read_lock();
6112 	if (test_idle_cores(core, true))
6113 		goto unlock;
6114 
6115 	for_each_cpu(cpu, cpu_smt_mask(core)) {
6116 		if (cpu == core)
6117 			continue;
6118 
6119 		if (!available_idle_cpu(cpu))
6120 			goto unlock;
6121 	}
6122 
6123 	set_idle_cores(core, 1);
6124 unlock:
6125 	rcu_read_unlock();
6126 }
6127 
6128 /*
6129  * Scan the entire LLC domain for idle cores; this dynamically switches off if
6130  * there are no idle cores left in the system; tracked through
6131  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6132  */
select_idle_core(struct task_struct * p,struct sched_domain * sd,int target)6133 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6134 {
6135 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6136 	int core, cpu;
6137 
6138 	if (!static_branch_likely(&sched_smt_present))
6139 		return -1;
6140 
6141 	if (!test_idle_cores(target, false))
6142 		return -1;
6143 
6144 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6145 
6146 	for_each_cpu_wrap(core, cpus, target) {
6147 		bool idle = true;
6148 
6149 		for_each_cpu(cpu, cpu_smt_mask(core)) {
6150 			if (!available_idle_cpu(cpu)) {
6151 				idle = false;
6152 				break;
6153 			}
6154 		}
6155 		cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
6156 
6157 		if (idle)
6158 			return core;
6159 	}
6160 
6161 	/*
6162 	 * Failed to find an idle core; stop looking for one.
6163 	 */
6164 	set_idle_cores(target, 0);
6165 
6166 	return -1;
6167 }
6168 
6169 /*
6170  * Scan the local SMT mask for idle CPUs.
6171  */
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)6172 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6173 {
6174 	int cpu;
6175 
6176 	if (!static_branch_likely(&sched_smt_present))
6177 		return -1;
6178 
6179 	for_each_cpu(cpu, cpu_smt_mask(target)) {
6180 		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
6181 		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
6182 			continue;
6183 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
6184 			return cpu;
6185 	}
6186 
6187 	return -1;
6188 }
6189 
6190 #else /* CONFIG_SCHED_SMT */
6191 
select_idle_core(struct task_struct * p,struct sched_domain * sd,int target)6192 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6193 {
6194 	return -1;
6195 }
6196 
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)6197 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6198 {
6199 	return -1;
6200 }
6201 
6202 #endif /* CONFIG_SCHED_SMT */
6203 
6204 /*
6205  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6206  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6207  * average idle time for this rq (as found in rq->avg_idle).
6208  */
select_idle_cpu(struct task_struct * p,struct sched_domain * sd,int target)6209 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
6210 {
6211 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6212 	struct sched_domain *this_sd;
6213 	u64 avg_cost, avg_idle;
6214 	u64 time;
6215 	int this = smp_processor_id();
6216 	int cpu, nr = INT_MAX;
6217 
6218 	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6219 	if (!this_sd)
6220 		return -1;
6221 
6222 	/*
6223 	 * Due to large variance we need a large fuzz factor; hackbench in
6224 	 * particularly is sensitive here.
6225 	 */
6226 	avg_idle = this_rq()->avg_idle / 512;
6227 	avg_cost = this_sd->avg_scan_cost + 1;
6228 
6229 	if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
6230 		return -1;
6231 
6232 	if (sched_feat(SIS_PROP)) {
6233 		u64 span_avg = sd->span_weight * avg_idle;
6234 		if (span_avg > 4*avg_cost)
6235 			nr = div_u64(span_avg, avg_cost);
6236 		else
6237 			nr = 4;
6238 	}
6239 
6240 	time = cpu_clock(this);
6241 
6242 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6243 
6244 	for_each_cpu_wrap(cpu, cpus, target) {
6245 		if (!--nr)
6246 			return -1;
6247 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
6248 			break;
6249 	}
6250 
6251 	time = cpu_clock(this) - time;
6252 	update_avg(&this_sd->avg_scan_cost, time);
6253 
6254 	return cpu;
6255 }
6256 
6257 /*
6258  * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
6259  * the task fits. If no CPU is big enough, but there are idle ones, try to
6260  * maximize capacity.
6261  */
6262 static int
select_idle_capacity(struct task_struct * p,struct sched_domain * sd,int target)6263 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
6264 {
6265 	unsigned long task_util, best_cap = 0;
6266 	int cpu, best_cpu = -1;
6267 	struct cpumask *cpus;
6268 
6269 	cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6270 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6271 
6272 	task_util = uclamp_task_util(p);
6273 
6274 	for_each_cpu_wrap(cpu, cpus, target) {
6275 		unsigned long cpu_cap = capacity_of(cpu);
6276 
6277 		if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
6278 			continue;
6279 		if (fits_capacity(task_util, cpu_cap))
6280 			return cpu;
6281 
6282 		if (cpu_cap > best_cap) {
6283 			best_cap = cpu_cap;
6284 			best_cpu = cpu;
6285 		}
6286 	}
6287 
6288 	return best_cpu;
6289 }
6290 
asym_fits_capacity(int task_util,int cpu)6291 static inline bool asym_fits_capacity(int task_util, int cpu)
6292 {
6293 	if (static_branch_unlikely(&sched_asym_cpucapacity))
6294 		return fits_capacity(task_util, capacity_of(cpu));
6295 
6296 	return true;
6297 }
6298 
6299 /*
6300  * Try and locate an idle core/thread in the LLC cache domain.
6301  */
select_idle_sibling(struct task_struct * p,int prev,int target)6302 static int select_idle_sibling(struct task_struct *p, int prev, int target)
6303 {
6304 	struct sched_domain *sd;
6305 	unsigned long task_util;
6306 	int i, recent_used_cpu;
6307 
6308 	/*
6309 	 * On asymmetric system, update task utilization because we will check
6310 	 * that the task fits with cpu's capacity.
6311 	 */
6312 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6313 		sync_entity_load_avg(&p->se);
6314 		task_util = uclamp_task_util(p);
6315 	}
6316 
6317 	if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
6318 	    asym_fits_capacity(task_util, target))
6319 		return target;
6320 
6321 	/*
6322 	 * If the previous CPU is cache affine and idle, don't be stupid:
6323 	 */
6324 	if (prev != target && cpus_share_cache(prev, target) &&
6325 	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
6326 	    asym_fits_capacity(task_util, prev))
6327 		return prev;
6328 
6329 	/*
6330 	 * Allow a per-cpu kthread to stack with the wakee if the
6331 	 * kworker thread and the tasks previous CPUs are the same.
6332 	 * The assumption is that the wakee queued work for the
6333 	 * per-cpu kthread that is now complete and the wakeup is
6334 	 * essentially a sync wakeup. An obvious example of this
6335 	 * pattern is IO completions.
6336 	 */
6337 	if (is_per_cpu_kthread(current) &&
6338 	    in_task() &&
6339 	    prev == smp_processor_id() &&
6340 	    this_rq()->nr_running <= 1 &&
6341 	    asym_fits_capacity(task_util, prev)) {
6342 		return prev;
6343 	}
6344 
6345 	/* Check a recently used CPU as a potential idle candidate: */
6346 	recent_used_cpu = p->recent_used_cpu;
6347 	if (recent_used_cpu != prev &&
6348 	    recent_used_cpu != target &&
6349 	    cpus_share_cache(recent_used_cpu, target) &&
6350 	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
6351 	    cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
6352 	    asym_fits_capacity(task_util, recent_used_cpu)) {
6353 		/*
6354 		 * Replace recent_used_cpu with prev as it is a potential
6355 		 * candidate for the next wake:
6356 		 */
6357 		p->recent_used_cpu = prev;
6358 		return recent_used_cpu;
6359 	}
6360 
6361 	if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6362 		if (rockchip_perf_get_level() == ROCKCHIP_PERFORMANCE_HIGH)
6363 			goto sd_llc;
6364 	}
6365 
6366 	/*
6367 	 * For asymmetric CPU capacity systems, our domain of interest is
6368 	 * sd_asym_cpucapacity rather than sd_llc.
6369 	 */
6370 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6371 		sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
6372 		/*
6373 		 * On an asymmetric CPU capacity system where an exclusive
6374 		 * cpuset defines a symmetric island (i.e. one unique
6375 		 * capacity_orig value through the cpuset), the key will be set
6376 		 * but the CPUs within that cpuset will not have a domain with
6377 		 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
6378 		 * capacity path.
6379 		 */
6380 		if (sd) {
6381 			i = select_idle_capacity(p, sd, target);
6382 			return ((unsigned)i < nr_cpumask_bits) ? i : target;
6383 		}
6384 	}
6385 
6386 sd_llc:
6387 	sd = rcu_dereference(per_cpu(sd_llc, target));
6388 	if (!sd)
6389 		return target;
6390 
6391 	i = select_idle_core(p, sd, target);
6392 	if ((unsigned)i < nr_cpumask_bits)
6393 		return i;
6394 
6395 	i = select_idle_cpu(p, sd, target);
6396 	if ((unsigned)i < nr_cpumask_bits)
6397 		return i;
6398 
6399 	i = select_idle_smt(p, sd, target);
6400 	if ((unsigned)i < nr_cpumask_bits)
6401 		return i;
6402 
6403 	return target;
6404 }
6405 
6406 /**
6407  * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
6408  * @cpu: the CPU to get the utilization of
6409  *
6410  * The unit of the return value must be the one of capacity so we can compare
6411  * the utilization with the capacity of the CPU that is available for CFS task
6412  * (ie cpu_capacity).
6413  *
6414  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
6415  * recent utilization of currently non-runnable tasks on a CPU. It represents
6416  * the amount of utilization of a CPU in the range [0..capacity_orig] where
6417  * capacity_orig is the cpu_capacity available at the highest frequency
6418  * (arch_scale_freq_capacity()).
6419  * The utilization of a CPU converges towards a sum equal to or less than the
6420  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
6421  * the running time on this CPU scaled by capacity_curr.
6422  *
6423  * The estimated utilization of a CPU is defined to be the maximum between its
6424  * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
6425  * currently RUNNABLE on that CPU.
6426  * This allows to properly represent the expected utilization of a CPU which
6427  * has just got a big task running since a long sleep period. At the same time
6428  * however it preserves the benefits of the "blocked utilization" in
6429  * describing the potential for other tasks waking up on the same CPU.
6430  *
6431  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
6432  * higher than capacity_orig because of unfortunate rounding in
6433  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
6434  * the average stabilizes with the new running time. We need to check that the
6435  * utilization stays within the range of [0..capacity_orig] and cap it if
6436  * necessary. Without utilization capping, a group could be seen as overloaded
6437  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
6438  * available capacity. We allow utilization to overshoot capacity_curr (but not
6439  * capacity_orig) as it useful for predicting the capacity required after task
6440  * migrations (scheduler-driven DVFS).
6441  *
6442  * Return: the (estimated) utilization for the specified CPU
6443  */
cpu_util(int cpu)6444 static inline unsigned long cpu_util(int cpu)
6445 {
6446 	struct cfs_rq *cfs_rq;
6447 	unsigned int util;
6448 
6449 	cfs_rq = &cpu_rq(cpu)->cfs;
6450 	util = READ_ONCE(cfs_rq->avg.util_avg);
6451 
6452 	if (sched_feat(UTIL_EST))
6453 		util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
6454 
6455 	return min_t(unsigned long, util, capacity_orig_of(cpu));
6456 }
6457 
6458 /*
6459  * cpu_util_without: compute cpu utilization without any contributions from *p
6460  * @cpu: the CPU which utilization is requested
6461  * @p: the task which utilization should be discounted
6462  *
6463  * The utilization of a CPU is defined by the utilization of tasks currently
6464  * enqueued on that CPU as well as tasks which are currently sleeping after an
6465  * execution on that CPU.
6466  *
6467  * This method returns the utilization of the specified CPU by discounting the
6468  * utilization of the specified task, whenever the task is currently
6469  * contributing to the CPU utilization.
6470  */
cpu_util_without(int cpu,struct task_struct * p)6471 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6472 {
6473 	struct cfs_rq *cfs_rq;
6474 	unsigned int util;
6475 
6476 	/* Task has no contribution or is new */
6477 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6478 		return cpu_util(cpu);
6479 
6480 	cfs_rq = &cpu_rq(cpu)->cfs;
6481 	util = READ_ONCE(cfs_rq->avg.util_avg);
6482 
6483 	/* Discount task's util from CPU's util */
6484 	lsub_positive(&util, task_util(p));
6485 
6486 	/*
6487 	 * Covered cases:
6488 	 *
6489 	 * a) if *p is the only task sleeping on this CPU, then:
6490 	 *      cpu_util (== task_util) > util_est (== 0)
6491 	 *    and thus we return:
6492 	 *      cpu_util_without = (cpu_util - task_util) = 0
6493 	 *
6494 	 * b) if other tasks are SLEEPING on this CPU, which is now exiting
6495 	 *    IDLE, then:
6496 	 *      cpu_util >= task_util
6497 	 *      cpu_util > util_est (== 0)
6498 	 *    and thus we discount *p's blocked utilization to return:
6499 	 *      cpu_util_without = (cpu_util - task_util) >= 0
6500 	 *
6501 	 * c) if other tasks are RUNNABLE on that CPU and
6502 	 *      util_est > cpu_util
6503 	 *    then we use util_est since it returns a more restrictive
6504 	 *    estimation of the spare capacity on that CPU, by just
6505 	 *    considering the expected utilization of tasks already
6506 	 *    runnable on that CPU.
6507 	 *
6508 	 * Cases a) and b) are covered by the above code, while case c) is
6509 	 * covered by the following code when estimated utilization is
6510 	 * enabled.
6511 	 */
6512 	if (sched_feat(UTIL_EST)) {
6513 		unsigned int estimated =
6514 			READ_ONCE(cfs_rq->avg.util_est.enqueued);
6515 
6516 		/*
6517 		 * Despite the following checks we still have a small window
6518 		 * for a possible race, when an execl's select_task_rq_fair()
6519 		 * races with LB's detach_task():
6520 		 *
6521 		 *   detach_task()
6522 		 *     p->on_rq = TASK_ON_RQ_MIGRATING;
6523 		 *     ---------------------------------- A
6524 		 *     deactivate_task()                   \
6525 		 *       dequeue_task()                     + RaceTime
6526 		 *         util_est_dequeue()              /
6527 		 *     ---------------------------------- B
6528 		 *
6529 		 * The additional check on "current == p" it's required to
6530 		 * properly fix the execl regression and it helps in further
6531 		 * reducing the chances for the above race.
6532 		 */
6533 		if (unlikely(task_on_rq_queued(p) || current == p))
6534 			lsub_positive(&estimated, _task_util_est(p));
6535 
6536 		util = max(util, estimated);
6537 	}
6538 
6539 	/*
6540 	 * Utilization (estimated) can exceed the CPU capacity, thus let's
6541 	 * clamp to the maximum CPU capacity to ensure consistency with
6542 	 * the cpu_util call.
6543 	 */
6544 	return min_t(unsigned long, util, capacity_orig_of(cpu));
6545 }
6546 
6547 /*
6548  * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
6549  * to @dst_cpu.
6550  */
cpu_util_next(int cpu,struct task_struct * p,int dst_cpu)6551 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
6552 {
6553 	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
6554 	unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
6555 
6556 	/*
6557 	 * If @p migrates from @cpu to another, remove its contribution. Or,
6558 	 * if @p migrates from another CPU to @cpu, add its contribution. In
6559 	 * the other cases, @cpu is not impacted by the migration, so the
6560 	 * util_avg should already be correct.
6561 	 */
6562 	if (task_cpu(p) == cpu && dst_cpu != cpu)
6563 		sub_positive(&util, task_util(p));
6564 	else if (task_cpu(p) != cpu && dst_cpu == cpu)
6565 		util += task_util(p);
6566 
6567 	if (sched_feat(UTIL_EST)) {
6568 		util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6569 
6570 		/*
6571 		 * During wake-up, the task isn't enqueued yet and doesn't
6572 		 * appear in the cfs_rq->avg.util_est.enqueued of any rq,
6573 		 * so just add it (if needed) to "simulate" what will be
6574 		 * cpu_util() after the task has been enqueued.
6575 		 */
6576 		if (dst_cpu == cpu)
6577 			util_est += _task_util_est(p);
6578 
6579 		util = max(util, util_est);
6580 	}
6581 
6582 	return min(util, capacity_orig_of(cpu));
6583 }
6584 
6585 /*
6586  * compute_energy(): Estimates the energy that @pd would consume if @p was
6587  * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
6588  * landscape of @pd's CPUs after the task migration, and uses the Energy Model
6589  * to compute what would be the energy if we decided to actually migrate that
6590  * task.
6591  */
6592 static long
compute_energy(struct task_struct * p,int dst_cpu,struct perf_domain * pd)6593 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
6594 {
6595 	struct cpumask *pd_mask = perf_domain_span(pd);
6596 	unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
6597 	unsigned long max_util = 0, sum_util = 0;
6598 	unsigned long energy = 0;
6599 	int cpu;
6600 
6601 	/*
6602 	 * The capacity state of CPUs of the current rd can be driven by CPUs
6603 	 * of another rd if they belong to the same pd. So, account for the
6604 	 * utilization of these CPUs too by masking pd with cpu_online_mask
6605 	 * instead of the rd span.
6606 	 *
6607 	 * If an entire pd is outside of the current rd, it will not appear in
6608 	 * its pd list and will not be accounted by compute_energy().
6609 	 */
6610 	for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
6611 		unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
6612 		struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
6613 
6614 		/*
6615 		 * Busy time computation: utilization clamping is not
6616 		 * required since the ratio (sum_util / cpu_capacity)
6617 		 * is already enough to scale the EM reported power
6618 		 * consumption at the (eventually clamped) cpu_capacity.
6619 		 */
6620 		sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
6621 					       ENERGY_UTIL, NULL);
6622 
6623 		/*
6624 		 * Performance domain frequency: utilization clamping
6625 		 * must be considered since it affects the selection
6626 		 * of the performance domain frequency.
6627 		 * NOTE: in case RT tasks are running, by default the
6628 		 * FREQUENCY_UTIL's utilization can be max OPP.
6629 		 */
6630 		cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
6631 					      FREQUENCY_UTIL, tsk);
6632 		max_util = max(max_util, cpu_util);
6633 	}
6634 
6635 	trace_android_vh_em_cpu_energy(pd->em_pd, max_util, sum_util, &energy);
6636 	if (!energy)
6637 		energy = em_cpu_energy(pd->em_pd, max_util, sum_util);
6638 
6639 	return energy;
6640 }
6641 
6642 /*
6643  * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
6644  * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
6645  * spare capacity in each performance domain and uses it as a potential
6646  * candidate to execute the task. Then, it uses the Energy Model to figure
6647  * out which of the CPU candidates is the most energy-efficient.
6648  *
6649  * The rationale for this heuristic is as follows. In a performance domain,
6650  * all the most energy efficient CPU candidates (according to the Energy
6651  * Model) are those for which we'll request a low frequency. When there are
6652  * several CPUs for which the frequency request will be the same, we don't
6653  * have enough data to break the tie between them, because the Energy Model
6654  * only includes active power costs. With this model, if we assume that
6655  * frequency requests follow utilization (e.g. using schedutil), the CPU with
6656  * the maximum spare capacity in a performance domain is guaranteed to be among
6657  * the best candidates of the performance domain.
6658  *
6659  * In practice, it could be preferable from an energy standpoint to pack
6660  * small tasks on a CPU in order to let other CPUs go in deeper idle states,
6661  * but that could also hurt our chances to go cluster idle, and we have no
6662  * ways to tell with the current Energy Model if this is actually a good
6663  * idea or not. So, find_energy_efficient_cpu() basically favors
6664  * cluster-packing, and spreading inside a cluster. That should at least be
6665  * a good thing for latency, and this is consistent with the idea that most
6666  * of the energy savings of EAS come from the asymmetry of the system, and
6667  * not so much from breaking the tie between identical CPUs. That's also the
6668  * reason why EAS is enabled in the topology code only for systems where
6669  * SD_ASYM_CPUCAPACITY is set.
6670  *
6671  * NOTE: Forkees are not accepted in the energy-aware wake-up path because
6672  * they don't have any useful utilization data yet and it's not possible to
6673  * forecast their impact on energy consumption. Consequently, they will be
6674  * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
6675  * to be energy-inefficient in some use-cases. The alternative would be to
6676  * bias new tasks towards specific types of CPUs first, or to try to infer
6677  * their util_avg from the parent task, but those heuristics could hurt
6678  * other use-cases too. So, until someone finds a better way to solve this,
6679  * let's keep things simple by re-using the existing slow path.
6680  */
find_energy_efficient_cpu(struct task_struct * p,int prev_cpu,int sync)6681 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
6682 {
6683 	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
6684 	unsigned long best_delta2 = ULONG_MAX;
6685 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
6686 	int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
6687 	unsigned long max_spare_cap_ls = 0, target_cap;
6688 	unsigned long cpu_cap, util, base_energy = 0;
6689 	bool boosted, latency_sensitive = false;
6690 	unsigned int min_exit_lat = UINT_MAX;
6691 	int cpu, best_energy_cpu = prev_cpu;
6692 	struct cpuidle_state *idle;
6693 	struct sched_domain *sd;
6694 	struct perf_domain *pd;
6695 	int new_cpu = INT_MAX;
6696 
6697 	sync_entity_load_avg(&p->se);
6698 	trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
6699 	if (new_cpu != INT_MAX)
6700 		return new_cpu;
6701 
6702 	rcu_read_lock();
6703 	pd = rcu_dereference(rd->pd);
6704 	if (!pd || READ_ONCE(rd->overutilized))
6705 		goto fail;
6706 
6707 	cpu = smp_processor_id();
6708 	if (sync && cpu_rq(cpu)->nr_running == 1 &&
6709 	    cpumask_test_cpu(cpu, p->cpus_ptr) &&
6710 	    task_fits_capacity(p, capacity_of(cpu))) {
6711 		rcu_read_unlock();
6712 		return cpu;
6713 	}
6714 
6715 	/*
6716 	 * Energy-aware wake-up happens on the lowest sched_domain starting
6717 	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
6718 	 */
6719 	sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
6720 	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
6721 		sd = sd->parent;
6722 	if (!sd)
6723 		goto fail;
6724 
6725 	if (!task_util_est(p))
6726 		goto unlock;
6727 
6728 	latency_sensitive = uclamp_latency_sensitive(p);
6729 	boosted = uclamp_boosted(p);
6730 	target_cap = boosted ? 0 : ULONG_MAX;
6731 
6732 	for (; pd; pd = pd->next) {
6733 		unsigned long cur_delta, spare_cap, max_spare_cap = 0;
6734 		unsigned long base_energy_pd;
6735 		int max_spare_cap_cpu = -1;
6736 
6737 		/* Compute the 'base' energy of the pd, without @p */
6738 		base_energy_pd = compute_energy(p, -1, pd);
6739 		base_energy += base_energy_pd;
6740 
6741 		for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
6742 			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
6743 				continue;
6744 
6745 			util = cpu_util_next(cpu, p, cpu);
6746 			cpu_cap = capacity_of(cpu);
6747 			spare_cap = cpu_cap;
6748 			lsub_positive(&spare_cap, util);
6749 
6750 			/*
6751 			 * Skip CPUs that cannot satisfy the capacity request.
6752 			 * IOW, placing the task there would make the CPU
6753 			 * overutilized. Take uclamp into account to see how
6754 			 * much capacity we can get out of the CPU; this is
6755 			 * aligned with schedutil_cpu_util().
6756 			 */
6757 			util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
6758 			if (!fits_capacity(util, cpu_cap))
6759 				continue;
6760 
6761 			/* Always use prev_cpu as a candidate. */
6762 			if (!latency_sensitive && cpu == prev_cpu) {
6763 				prev_delta = compute_energy(p, prev_cpu, pd);
6764 				prev_delta -= base_energy_pd;
6765 				best_delta = min(best_delta, prev_delta);
6766 				if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6767 					if (prev_delta == best_delta)
6768 						best_energy_cpu = prev_cpu;
6769 				}
6770 			}
6771 
6772 			/*
6773 			 * Find the CPU with the maximum spare capacity in
6774 			 * the performance domain
6775 			 */
6776 			if (spare_cap > max_spare_cap) {
6777 				max_spare_cap = spare_cap;
6778 				max_spare_cap_cpu = cpu;
6779 			}
6780 
6781 			if (!IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6782 				if (!latency_sensitive)
6783 					continue;
6784 			}
6785 
6786 			if (idle_cpu(cpu)) {
6787 				cpu_cap = capacity_orig_of(cpu);
6788 				if (boosted && cpu_cap < target_cap)
6789 					continue;
6790 				if (!boosted && cpu_cap > target_cap)
6791 					continue;
6792 				idle = idle_get_state(cpu_rq(cpu));
6793 				if (idle && idle->exit_latency > min_exit_lat &&
6794 						cpu_cap == target_cap)
6795 					continue;
6796 
6797 				if (idle)
6798 					min_exit_lat = idle->exit_latency;
6799 				target_cap = cpu_cap;
6800 				best_idle_cpu = cpu;
6801 				if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6802 					best_delta2 = compute_energy(p, cpu, pd);
6803 					best_delta2 -= base_energy_pd;
6804 				}
6805 			} else if (spare_cap > max_spare_cap_ls) {
6806 				max_spare_cap_ls = spare_cap;
6807 				max_spare_cap_cpu_ls = cpu;
6808 				if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6809 					if (best_idle_cpu == -1) {
6810 						best_delta2 = compute_energy(p, cpu, pd);
6811 						best_delta2 -= base_energy_pd;
6812 					}
6813 				}
6814 			}
6815 		}
6816 
6817 		/* Evaluate the energy impact of using this CPU. */
6818 		if (!latency_sensitive && max_spare_cap_cpu >= 0 &&
6819 						max_spare_cap_cpu != prev_cpu) {
6820 			cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
6821 			cur_delta -= base_energy_pd;
6822 			if (cur_delta < best_delta) {
6823 				best_delta = cur_delta;
6824 				best_energy_cpu = max_spare_cap_cpu;
6825 			}
6826 		}
6827 	}
6828 unlock:
6829 	rcu_read_unlock();
6830 
6831 	if (latency_sensitive)
6832 		return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
6833 
6834 	/*
6835 	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
6836 	 * least 6% of the energy used by prev_cpu.
6837 	 */
6838 	if (prev_delta == ULONG_MAX)
6839 		return best_energy_cpu;
6840 
6841 	if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
6842 		return best_energy_cpu;
6843 
6844 	if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6845 		struct cpumask *cpul_mask = rockchip_perf_get_cpul_mask();
6846 		struct cpumask *cpub_mask = rockchip_perf_get_cpub_mask();
6847 		int level = rockchip_perf_get_level();
6848 
6849 		/*
6850 		 * when select ROCKCHIP_PERFORMANCE_LOW:
6851 		 * Pick best_energy_cpu if prev_cpu is big cpu and best_energy_cpu
6852 		 * is little cpu, so that tasks can migrate from big cpu to little
6853 		 * cpu easier to save power.
6854 		 */
6855 		if ((level == ROCKCHIP_PERFORMANCE_LOW) && cpul_mask &&
6856 		    cpub_mask && cpumask_test_cpu(prev_cpu, cpub_mask) &&
6857 		    cpumask_test_cpu(best_energy_cpu, cpul_mask)) {
6858 			return best_energy_cpu;
6859 		}
6860 
6861 		/*
6862 		 * Pick the idlest cpu if it is a little power increased(<3.1%).
6863 		 */
6864 		if ((best_delta2 <= prev_delta) ||
6865 			((best_delta2 - prev_delta) < ((prev_delta + base_energy) >> 5)))
6866 			return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
6867 	}
6868 
6869 	return prev_cpu;
6870 
6871 fail:
6872 	rcu_read_unlock();
6873 
6874 	return -1;
6875 }
6876 
6877 /*
6878  * select_task_rq_fair: Select target runqueue for the waking task in domains
6879  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
6880  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
6881  *
6882  * Balances load by selecting the idlest CPU in the idlest group, or under
6883  * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
6884  *
6885  * Returns the target CPU number.
6886  *
6887  * preempt must be disabled.
6888  */
6889 static int
select_task_rq_fair(struct task_struct * p,int prev_cpu,int sd_flag,int wake_flags)6890 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
6891 {
6892 	struct sched_domain *tmp, *sd = NULL;
6893 	int cpu = smp_processor_id();
6894 	int new_cpu = prev_cpu;
6895 	int want_affine = 0;
6896 	int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
6897 	int target_cpu = -1;
6898 
6899 	if (trace_android_rvh_select_task_rq_fair_enabled() &&
6900 	    !(sd_flag & SD_BALANCE_FORK))
6901 		sync_entity_load_avg(&p->se);
6902 	trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
6903 			wake_flags, &target_cpu);
6904 	if (target_cpu >= 0)
6905 		return target_cpu;
6906 
6907 	if (sd_flag & SD_BALANCE_WAKE) {
6908 		record_wakee(p);
6909 
6910 		if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6911 			if (rockchip_perf_get_level() == ROCKCHIP_PERFORMANCE_HIGH)
6912 				goto no_eas;
6913 		}
6914 
6915 		if (sched_energy_enabled()) {
6916 			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
6917 			if (new_cpu >= 0)
6918 				return new_cpu;
6919 			new_cpu = prev_cpu;
6920 		}
6921 
6922 no_eas:
6923 		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
6924 	}
6925 
6926 	rcu_read_lock();
6927 	for_each_domain(cpu, tmp) {
6928 		/*
6929 		 * If both 'cpu' and 'prev_cpu' are part of this domain,
6930 		 * cpu is a valid SD_WAKE_AFFINE target.
6931 		 */
6932 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
6933 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
6934 			if (cpu != prev_cpu)
6935 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
6936 
6937 			sd = NULL; /* Prefer wake_affine over balance flags */
6938 			break;
6939 		}
6940 
6941 		if (tmp->flags & sd_flag)
6942 			sd = tmp;
6943 		else if (!want_affine)
6944 			break;
6945 	}
6946 
6947 	if (unlikely(sd)) {
6948 		/* Slow path */
6949 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
6950 	} else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
6951 		/* Fast path */
6952 
6953 		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6954 
6955 		if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
6956 			struct root_domain *rd = cpu_rq(cpu)->rd;
6957 			struct cpumask *cpul_mask = rockchip_perf_get_cpul_mask();
6958 			struct cpumask *cpub_mask = rockchip_perf_get_cpub_mask();
6959 			int level = rockchip_perf_get_level();
6960 
6961 			if ((level == ROCKCHIP_PERFORMANCE_HIGH) && !READ_ONCE(rd->overutilized) &&
6962 			    cpul_mask && cpub_mask && cpumask_intersects(p->cpus_ptr, cpub_mask) &&
6963 			    cpumask_test_cpu(new_cpu, cpul_mask)) {
6964 				for_each_domain(cpu, tmp) {
6965 					sd = tmp;
6966 				}
6967 				if (sd)
6968 					new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
6969 			}
6970 		}
6971 
6972 		if (want_affine)
6973 			current->recent_used_cpu = cpu;
6974 	}
6975 	rcu_read_unlock();
6976 
6977 	return new_cpu;
6978 }
6979 
6980 static void detach_entity_cfs_rq(struct sched_entity *se);
6981 
6982 /*
6983  * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
6984  * cfs_rq_of(p) references at time of call are still valid and identify the
6985  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
6986  */
migrate_task_rq_fair(struct task_struct * p,int new_cpu)6987 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
6988 {
6989 	/*
6990 	 * As blocked tasks retain absolute vruntime the migration needs to
6991 	 * deal with this by subtracting the old and adding the new
6992 	 * min_vruntime -- the latter is done by enqueue_entity() when placing
6993 	 * the task on the new runqueue.
6994 	 */
6995 	if (p->state == TASK_WAKING) {
6996 		struct sched_entity *se = &p->se;
6997 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
6998 		u64 min_vruntime;
6999 
7000 #ifndef CONFIG_64BIT
7001 		u64 min_vruntime_copy;
7002 
7003 		do {
7004 			min_vruntime_copy = cfs_rq->min_vruntime_copy;
7005 			smp_rmb();
7006 			min_vruntime = cfs_rq->min_vruntime;
7007 		} while (min_vruntime != min_vruntime_copy);
7008 #else
7009 		min_vruntime = cfs_rq->min_vruntime;
7010 #endif
7011 
7012 		se->vruntime -= min_vruntime;
7013 	}
7014 
7015 	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
7016 		/*
7017 		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
7018 		 * rq->lock and can modify state directly.
7019 		 */
7020 		lockdep_assert_held(&task_rq(p)->lock);
7021 		detach_entity_cfs_rq(&p->se);
7022 
7023 	} else {
7024 		/*
7025 		 * We are supposed to update the task to "current" time, then
7026 		 * its up to date and ready to go to new CPU/cfs_rq. But we
7027 		 * have difficulty in getting what current time is, so simply
7028 		 * throw away the out-of-date time. This will result in the
7029 		 * wakee task is less decayed, but giving the wakee more load
7030 		 * sounds not bad.
7031 		 */
7032 		remove_entity_load_avg(&p->se);
7033 	}
7034 
7035 	/* Tell new CPU we are migrated */
7036 	p->se.avg.last_update_time = 0;
7037 
7038 	/* We have migrated, no longer consider this task hot */
7039 	p->se.exec_start = 0;
7040 
7041 	update_scan_period(p, new_cpu);
7042 }
7043 
task_dead_fair(struct task_struct * p)7044 static void task_dead_fair(struct task_struct *p)
7045 {
7046 	remove_entity_load_avg(&p->se);
7047 }
7048 
7049 static int
balance_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)7050 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7051 {
7052 	if (rq->nr_running)
7053 		return 1;
7054 
7055 	return newidle_balance(rq, rf) != 0;
7056 }
7057 #endif /* CONFIG_SMP */
7058 
wakeup_gran(struct sched_entity * se)7059 static unsigned long wakeup_gran(struct sched_entity *se)
7060 {
7061 	unsigned long gran = sysctl_sched_wakeup_granularity;
7062 
7063 	/*
7064 	 * Since its curr running now, convert the gran from real-time
7065 	 * to virtual-time in his units.
7066 	 *
7067 	 * By using 'se' instead of 'curr' we penalize light tasks, so
7068 	 * they get preempted easier. That is, if 'se' < 'curr' then
7069 	 * the resulting gran will be larger, therefore penalizing the
7070 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
7071 	 * be smaller, again penalizing the lighter task.
7072 	 *
7073 	 * This is especially important for buddies when the leftmost
7074 	 * task is higher priority than the buddy.
7075 	 */
7076 	return calc_delta_fair(gran, se);
7077 }
7078 
7079 /*
7080  * Should 'se' preempt 'curr'.
7081  *
7082  *             |s1
7083  *        |s2
7084  *   |s3
7085  *         g
7086  *      |<--->|c
7087  *
7088  *  w(c, s1) = -1
7089  *  w(c, s2) =  0
7090  *  w(c, s3) =  1
7091  *
7092  */
7093 static int
wakeup_preempt_entity(struct sched_entity * curr,struct sched_entity * se)7094 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7095 {
7096 	s64 gran, vdiff = curr->vruntime - se->vruntime;
7097 
7098 	if (vdiff <= 0)
7099 		return -1;
7100 
7101 	gran = wakeup_gran(se);
7102 	if (vdiff > gran)
7103 		return 1;
7104 
7105 	return 0;
7106 }
7107 
set_last_buddy(struct sched_entity * se)7108 static void set_last_buddy(struct sched_entity *se)
7109 {
7110 	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
7111 		return;
7112 
7113 	for_each_sched_entity(se) {
7114 		if (SCHED_WARN_ON(!se->on_rq))
7115 			return;
7116 		cfs_rq_of(se)->last = se;
7117 	}
7118 }
7119 
set_next_buddy(struct sched_entity * se)7120 static void set_next_buddy(struct sched_entity *se)
7121 {
7122 	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
7123 		return;
7124 
7125 	for_each_sched_entity(se) {
7126 		if (SCHED_WARN_ON(!se->on_rq))
7127 			return;
7128 		cfs_rq_of(se)->next = se;
7129 	}
7130 }
7131 
set_skip_buddy(struct sched_entity * se)7132 static void set_skip_buddy(struct sched_entity *se)
7133 {
7134 	for_each_sched_entity(se)
7135 		cfs_rq_of(se)->skip = se;
7136 }
7137 
7138 /*
7139  * Preempt the current task with a newly woken task if needed:
7140  */
check_preempt_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)7141 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
7142 {
7143 	struct task_struct *curr = rq->curr;
7144 	struct sched_entity *se = &curr->se, *pse = &p->se;
7145 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7146 	int scale = cfs_rq->nr_running >= sched_nr_latency;
7147 	int next_buddy_marked = 0;
7148 	bool preempt = false, nopreempt = false;
7149 
7150 	if (unlikely(se == pse))
7151 		return;
7152 
7153 	/*
7154 	 * This is possible from callers such as attach_tasks(), in which we
7155 	 * unconditionally check_prempt_curr() after an enqueue (which may have
7156 	 * lead to a throttle).  This both saves work and prevents false
7157 	 * next-buddy nomination below.
7158 	 */
7159 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
7160 		return;
7161 
7162 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
7163 		set_next_buddy(pse);
7164 		next_buddy_marked = 1;
7165 	}
7166 
7167 	/*
7168 	 * We can come here with TIF_NEED_RESCHED already set from new task
7169 	 * wake up path.
7170 	 *
7171 	 * Note: this also catches the edge-case of curr being in a throttled
7172 	 * group (e.g. via set_curr_task), since update_curr() (in the
7173 	 * enqueue of curr) will have resulted in resched being set.  This
7174 	 * prevents us from potentially nominating it as a false LAST_BUDDY
7175 	 * below.
7176 	 */
7177 	if (test_tsk_need_resched(curr))
7178 		return;
7179 
7180 	/* Idle tasks are by definition preempted by non-idle tasks. */
7181 	if (unlikely(task_has_idle_policy(curr)) &&
7182 	    likely(!task_has_idle_policy(p)))
7183 		goto preempt;
7184 
7185 	/*
7186 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
7187 	 * is driven by the tick):
7188 	 */
7189 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
7190 		return;
7191 
7192 	find_matching_se(&se, &pse);
7193 	update_curr(cfs_rq_of(se));
7194 	trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &nopreempt,
7195 			wake_flags, se, pse, next_buddy_marked, sysctl_sched_wakeup_granularity);
7196 	if (preempt)
7197 		goto preempt;
7198 	if (nopreempt)
7199 		return;
7200 	BUG_ON(!pse);
7201 	if (wakeup_preempt_entity(se, pse) == 1) {
7202 		/*
7203 		 * Bias pick_next to pick the sched entity that is
7204 		 * triggering this preemption.
7205 		 */
7206 		if (!next_buddy_marked)
7207 			set_next_buddy(pse);
7208 		goto preempt;
7209 	}
7210 
7211 	return;
7212 
7213 preempt:
7214 	resched_curr(rq);
7215 	/*
7216 	 * Only set the backward buddy when the current task is still
7217 	 * on the rq. This can happen when a wakeup gets interleaved
7218 	 * with schedule on the ->pre_schedule() or idle_balance()
7219 	 * point, either of which can * drop the rq lock.
7220 	 *
7221 	 * Also, during early boot the idle thread is in the fair class,
7222 	 * for obvious reasons its a bad idea to schedule back to it.
7223 	 */
7224 	if (unlikely(!se->on_rq || curr == rq->idle))
7225 		return;
7226 
7227 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
7228 		set_last_buddy(se);
7229 }
7230 
7231 struct task_struct *
pick_next_task_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)7232 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7233 {
7234 	struct cfs_rq *cfs_rq = &rq->cfs;
7235 	struct sched_entity *se = NULL;
7236 	struct task_struct *p = NULL;
7237 	int new_tasks;
7238 	bool repick = false;
7239 
7240 again:
7241 	if (!sched_fair_runnable(rq))
7242 		goto idle;
7243 
7244 #ifdef CONFIG_FAIR_GROUP_SCHED
7245 	if (!prev || prev->sched_class != &fair_sched_class)
7246 		goto simple;
7247 
7248 	/*
7249 	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
7250 	 * likely that a next task is from the same cgroup as the current.
7251 	 *
7252 	 * Therefore attempt to avoid putting and setting the entire cgroup
7253 	 * hierarchy, only change the part that actually changes.
7254 	 */
7255 
7256 	do {
7257 		struct sched_entity *curr = cfs_rq->curr;
7258 
7259 		/*
7260 		 * Since we got here without doing put_prev_entity() we also
7261 		 * have to consider cfs_rq->curr. If it is still a runnable
7262 		 * entity, update_curr() will update its vruntime, otherwise
7263 		 * forget we've ever seen it.
7264 		 */
7265 		if (curr) {
7266 			if (curr->on_rq)
7267 				update_curr(cfs_rq);
7268 			else
7269 				curr = NULL;
7270 
7271 			/*
7272 			 * This call to check_cfs_rq_runtime() will do the
7273 			 * throttle and dequeue its entity in the parent(s).
7274 			 * Therefore the nr_running test will indeed
7275 			 * be correct.
7276 			 */
7277 			if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
7278 				cfs_rq = &rq->cfs;
7279 
7280 				if (!cfs_rq->nr_running)
7281 					goto idle;
7282 
7283 				goto simple;
7284 			}
7285 		}
7286 
7287 		se = pick_next_entity(cfs_rq, curr);
7288 		cfs_rq = group_cfs_rq(se);
7289 	} while (cfs_rq);
7290 
7291 	p = task_of(se);
7292 	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev);
7293 	/*
7294 	 * Since we haven't yet done put_prev_entity and if the selected task
7295 	 * is a different task than we started out with, try and touch the
7296 	 * least amount of cfs_rqs.
7297 	 */
7298 	if (prev != p) {
7299 		struct sched_entity *pse = &prev->se;
7300 
7301 		while (!(cfs_rq = is_same_group(se, pse))) {
7302 			int se_depth = se->depth;
7303 			int pse_depth = pse->depth;
7304 
7305 			if (se_depth <= pse_depth) {
7306 				put_prev_entity(cfs_rq_of(pse), pse);
7307 				pse = parent_entity(pse);
7308 			}
7309 			if (se_depth >= pse_depth) {
7310 				set_next_entity(cfs_rq_of(se), se);
7311 				se = parent_entity(se);
7312 			}
7313 		}
7314 
7315 		put_prev_entity(cfs_rq, pse);
7316 		set_next_entity(cfs_rq, se);
7317 	}
7318 
7319 	goto done;
7320 simple:
7321 #endif
7322 	if (prev)
7323 		put_prev_task(rq, prev);
7324 
7325 	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev);
7326 	if (repick) {
7327 		for_each_sched_entity(se)
7328 			set_next_entity(cfs_rq_of(se), se);
7329 		goto done;
7330 	}
7331 
7332 	do {
7333 		se = pick_next_entity(cfs_rq, NULL);
7334 		set_next_entity(cfs_rq, se);
7335 		cfs_rq = group_cfs_rq(se);
7336 	} while (cfs_rq);
7337 
7338 	p = task_of(se);
7339 
7340 done: __maybe_unused;
7341 #ifdef CONFIG_SMP
7342 	/*
7343 	 * Move the next running task to the front of
7344 	 * the list, so our cfs_tasks list becomes MRU
7345 	 * one.
7346 	 */
7347 	list_move(&p->se.group_node, &rq->cfs_tasks);
7348 #endif
7349 
7350 	if (hrtick_enabled(rq))
7351 		hrtick_start_fair(rq, p);
7352 
7353 	update_misfit_status(p, rq);
7354 
7355 	return p;
7356 
7357 idle:
7358 	if (!rf)
7359 		return NULL;
7360 
7361 	new_tasks = newidle_balance(rq, rf);
7362 
7363 	/*
7364 	 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
7365 	 * possible for any higher priority task to appear. In that case we
7366 	 * must re-start the pick_next_entity() loop.
7367 	 */
7368 	if (new_tasks < 0)
7369 		return RETRY_TASK;
7370 
7371 	if (new_tasks > 0)
7372 		goto again;
7373 
7374 	/*
7375 	 * rq is about to be idle, check if we need to update the
7376 	 * lost_idle_time of clock_pelt
7377 	 */
7378 	update_idle_rq_clock_pelt(rq);
7379 
7380 	return NULL;
7381 }
7382 
__pick_next_task_fair(struct rq * rq)7383 static struct task_struct *__pick_next_task_fair(struct rq *rq)
7384 {
7385 	return pick_next_task_fair(rq, NULL, NULL);
7386 }
7387 
7388 /*
7389  * Account for a descheduled task:
7390  */
put_prev_task_fair(struct rq * rq,struct task_struct * prev)7391 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
7392 {
7393 	struct sched_entity *se = &prev->se;
7394 	struct cfs_rq *cfs_rq;
7395 
7396 	for_each_sched_entity(se) {
7397 		cfs_rq = cfs_rq_of(se);
7398 		put_prev_entity(cfs_rq, se);
7399 	}
7400 }
7401 
7402 /*
7403  * sched_yield() is very simple
7404  *
7405  * The magic of dealing with the ->skip buddy is in pick_next_entity.
7406  */
yield_task_fair(struct rq * rq)7407 static void yield_task_fair(struct rq *rq)
7408 {
7409 	struct task_struct *curr = rq->curr;
7410 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7411 	struct sched_entity *se = &curr->se;
7412 
7413 	/*
7414 	 * Are we the only task in the tree?
7415 	 */
7416 	if (unlikely(rq->nr_running == 1))
7417 		return;
7418 
7419 	clear_buddies(cfs_rq, se);
7420 
7421 	if (curr->policy != SCHED_BATCH) {
7422 		update_rq_clock(rq);
7423 		/*
7424 		 * Update run-time statistics of the 'current'.
7425 		 */
7426 		update_curr(cfs_rq);
7427 		/*
7428 		 * Tell update_rq_clock() that we've just updated,
7429 		 * so we don't do microscopic update in schedule()
7430 		 * and double the fastpath cost.
7431 		 */
7432 		rq_clock_skip_update(rq);
7433 	}
7434 
7435 	set_skip_buddy(se);
7436 }
7437 
yield_to_task_fair(struct rq * rq,struct task_struct * p)7438 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
7439 {
7440 	struct sched_entity *se = &p->se;
7441 
7442 	/* throttled hierarchies are not runnable */
7443 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
7444 		return false;
7445 
7446 	/* Tell the scheduler that we'd really like pse to run next. */
7447 	set_next_buddy(se);
7448 
7449 	yield_task_fair(rq);
7450 
7451 	return true;
7452 }
7453 
7454 #ifdef CONFIG_SMP
7455 /**************************************************
7456  * Fair scheduling class load-balancing methods.
7457  *
7458  * BASICS
7459  *
7460  * The purpose of load-balancing is to achieve the same basic fairness the
7461  * per-CPU scheduler provides, namely provide a proportional amount of compute
7462  * time to each task. This is expressed in the following equation:
7463  *
7464  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
7465  *
7466  * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
7467  * W_i,0 is defined as:
7468  *
7469  *   W_i,0 = \Sum_j w_i,j                                             (2)
7470  *
7471  * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
7472  * is derived from the nice value as per sched_prio_to_weight[].
7473  *
7474  * The weight average is an exponential decay average of the instantaneous
7475  * weight:
7476  *
7477  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
7478  *
7479  * C_i is the compute capacity of CPU i, typically it is the
7480  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
7481  * can also include other factors [XXX].
7482  *
7483  * To achieve this balance we define a measure of imbalance which follows
7484  * directly from (1):
7485  *
7486  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
7487  *
7488  * We them move tasks around to minimize the imbalance. In the continuous
7489  * function space it is obvious this converges, in the discrete case we get
7490  * a few fun cases generally called infeasible weight scenarios.
7491  *
7492  * [XXX expand on:
7493  *     - infeasible weights;
7494  *     - local vs global optima in the discrete case. ]
7495  *
7496  *
7497  * SCHED DOMAINS
7498  *
7499  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
7500  * for all i,j solution, we create a tree of CPUs that follows the hardware
7501  * topology where each level pairs two lower groups (or better). This results
7502  * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
7503  * tree to only the first of the previous level and we decrease the frequency
7504  * of load-balance at each level inv. proportional to the number of CPUs in
7505  * the groups.
7506  *
7507  * This yields:
7508  *
7509  *     log_2 n     1     n
7510  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
7511  *     i = 0      2^i   2^i
7512  *                               `- size of each group
7513  *         |         |     `- number of CPUs doing load-balance
7514  *         |         `- freq
7515  *         `- sum over all levels
7516  *
7517  * Coupled with a limit on how many tasks we can migrate every balance pass,
7518  * this makes (5) the runtime complexity of the balancer.
7519  *
7520  * An important property here is that each CPU is still (indirectly) connected
7521  * to every other CPU in at most O(log n) steps:
7522  *
7523  * The adjacency matrix of the resulting graph is given by:
7524  *
7525  *             log_2 n
7526  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
7527  *             k = 0
7528  *
7529  * And you'll find that:
7530  *
7531  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
7532  *
7533  * Showing there's indeed a path between every CPU in at most O(log n) steps.
7534  * The task movement gives a factor of O(m), giving a convergence complexity
7535  * of:
7536  *
7537  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
7538  *
7539  *
7540  * WORK CONSERVING
7541  *
7542  * In order to avoid CPUs going idle while there's still work to do, new idle
7543  * balancing is more aggressive and has the newly idle CPU iterate up the domain
7544  * tree itself instead of relying on other CPUs to bring it work.
7545  *
7546  * This adds some complexity to both (5) and (8) but it reduces the total idle
7547  * time.
7548  *
7549  * [XXX more?]
7550  *
7551  *
7552  * CGROUPS
7553  *
7554  * Cgroups make a horror show out of (2), instead of a simple sum we get:
7555  *
7556  *                                s_k,i
7557  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
7558  *                                 S_k
7559  *
7560  * Where
7561  *
7562  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
7563  *
7564  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
7565  *
7566  * The big problem is S_k, its a global sum needed to compute a local (W_i)
7567  * property.
7568  *
7569  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
7570  *      rewrite all of this once again.]
7571  */
7572 
7573 unsigned long __read_mostly max_load_balance_interval = HZ/10;
7574 EXPORT_SYMBOL_GPL(max_load_balance_interval);
7575 
7576 enum fbq_type { regular, remote, all };
7577 
7578 /*
7579  * 'group_type' describes the group of CPUs at the moment of load balancing.
7580  *
7581  * The enum is ordered by pulling priority, with the group with lowest priority
7582  * first so the group_type can simply be compared when selecting the busiest
7583  * group. See update_sd_pick_busiest().
7584  */
7585 enum group_type {
7586 	/* The group has spare capacity that can be used to run more tasks.  */
7587 	group_has_spare = 0,
7588 	/*
7589 	 * The group is fully used and the tasks don't compete for more CPU
7590 	 * cycles. Nevertheless, some tasks might wait before running.
7591 	 */
7592 	group_fully_busy,
7593 	/*
7594 	 * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity
7595 	 * and must be migrated to a more powerful CPU.
7596 	 */
7597 	group_misfit_task,
7598 	/*
7599 	 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
7600 	 * and the task should be migrated to it instead of running on the
7601 	 * current CPU.
7602 	 */
7603 	group_asym_packing,
7604 	/*
7605 	 * The tasks' affinity constraints previously prevented the scheduler
7606 	 * from balancing the load across the system.
7607 	 */
7608 	group_imbalanced,
7609 	/*
7610 	 * The CPU is overloaded and can't provide expected CPU cycles to all
7611 	 * tasks.
7612 	 */
7613 	group_overloaded
7614 };
7615 
7616 enum migration_type {
7617 	migrate_load = 0,
7618 	migrate_util,
7619 	migrate_task,
7620 	migrate_misfit
7621 };
7622 
7623 #define LBF_ALL_PINNED	0x01
7624 #define LBF_NEED_BREAK	0x02
7625 #define LBF_DST_PINNED  0x04
7626 #define LBF_SOME_PINNED	0x08
7627 #define LBF_NOHZ_STATS	0x10
7628 #define LBF_NOHZ_AGAIN	0x20
7629 
7630 struct lb_env {
7631 	struct sched_domain	*sd;
7632 
7633 	struct rq		*src_rq;
7634 	int			src_cpu;
7635 
7636 	int			dst_cpu;
7637 	struct rq		*dst_rq;
7638 
7639 	struct cpumask		*dst_grpmask;
7640 	int			new_dst_cpu;
7641 	enum cpu_idle_type	idle;
7642 	long			imbalance;
7643 	/* The set of CPUs under consideration for load-balancing */
7644 	struct cpumask		*cpus;
7645 
7646 	unsigned int		flags;
7647 
7648 	unsigned int		loop;
7649 	unsigned int		loop_break;
7650 	unsigned int		loop_max;
7651 
7652 	enum fbq_type		fbq_type;
7653 	enum migration_type	migration_type;
7654 	struct list_head	tasks;
7655 	struct rq_flags		*src_rq_rf;
7656 };
7657 
7658 /*
7659  * Is this task likely cache-hot:
7660  */
task_hot(struct task_struct * p,struct lb_env * env)7661 static int task_hot(struct task_struct *p, struct lb_env *env)
7662 {
7663 	s64 delta;
7664 
7665 	lockdep_assert_held(&env->src_rq->lock);
7666 
7667 	if (p->sched_class != &fair_sched_class)
7668 		return 0;
7669 
7670 	if (unlikely(task_has_idle_policy(p)))
7671 		return 0;
7672 
7673 	/* SMT siblings share cache */
7674 	if (env->sd->flags & SD_SHARE_CPUCAPACITY)
7675 		return 0;
7676 
7677 	/*
7678 	 * Buddy candidates are cache hot:
7679 	 */
7680 	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
7681 			(&p->se == cfs_rq_of(&p->se)->next ||
7682 			 &p->se == cfs_rq_of(&p->se)->last))
7683 		return 1;
7684 
7685 	if (sysctl_sched_migration_cost == -1)
7686 		return 1;
7687 	if (sysctl_sched_migration_cost == 0)
7688 		return 0;
7689 
7690 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
7691 
7692 	return delta < (s64)sysctl_sched_migration_cost;
7693 }
7694 
7695 #ifdef CONFIG_NUMA_BALANCING
7696 /*
7697  * Returns 1, if task migration degrades locality
7698  * Returns 0, if task migration improves locality i.e migration preferred.
7699  * Returns -1, if task migration is not affected by locality.
7700  */
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)7701 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
7702 {
7703 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
7704 	unsigned long src_weight, dst_weight;
7705 	int src_nid, dst_nid, dist;
7706 
7707 	if (!static_branch_likely(&sched_numa_balancing))
7708 		return -1;
7709 
7710 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
7711 		return -1;
7712 
7713 	src_nid = cpu_to_node(env->src_cpu);
7714 	dst_nid = cpu_to_node(env->dst_cpu);
7715 
7716 	if (src_nid == dst_nid)
7717 		return -1;
7718 
7719 	/* Migrating away from the preferred node is always bad. */
7720 	if (src_nid == p->numa_preferred_nid) {
7721 		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
7722 			return 1;
7723 		else
7724 			return -1;
7725 	}
7726 
7727 	/* Encourage migration to the preferred node. */
7728 	if (dst_nid == p->numa_preferred_nid)
7729 		return 0;
7730 
7731 	/* Leaving a core idle is often worse than degrading locality. */
7732 	if (env->idle == CPU_IDLE)
7733 		return -1;
7734 
7735 	dist = node_distance(src_nid, dst_nid);
7736 	if (numa_group) {
7737 		src_weight = group_weight(p, src_nid, dist);
7738 		dst_weight = group_weight(p, dst_nid, dist);
7739 	} else {
7740 		src_weight = task_weight(p, src_nid, dist);
7741 		dst_weight = task_weight(p, dst_nid, dist);
7742 	}
7743 
7744 	return dst_weight < src_weight;
7745 }
7746 
7747 #else
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)7748 static inline int migrate_degrades_locality(struct task_struct *p,
7749 					     struct lb_env *env)
7750 {
7751 	return -1;
7752 }
7753 #endif
7754 
7755 /*
7756  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
7757  */
7758 static
can_migrate_task(struct task_struct * p,struct lb_env * env)7759 int can_migrate_task(struct task_struct *p, struct lb_env *env)
7760 {
7761 	int tsk_cache_hot;
7762 	int can_migrate = 1;
7763 
7764 	lockdep_assert_held(&env->src_rq->lock);
7765 
7766 	trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
7767 	if (!can_migrate)
7768 		return 0;
7769 
7770 	/*
7771 	 * We do not migrate tasks that are:
7772 	 * 1) throttled_lb_pair, or
7773 	 * 2) cannot be migrated to this CPU due to cpus_ptr, or
7774 	 * 3) running (obviously), or
7775 	 * 4) are cache-hot on their current CPU.
7776 	 */
7777 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
7778 		return 0;
7779 
7780 	/* Disregard pcpu kthreads; they are where they need to be. */
7781 	if (kthread_is_per_cpu(p))
7782 		return 0;
7783 
7784 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
7785 		int cpu;
7786 
7787 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
7788 
7789 		env->flags |= LBF_SOME_PINNED;
7790 
7791 		/*
7792 		 * Remember if this task can be migrated to any other CPU in
7793 		 * our sched_group. We may want to revisit it if we couldn't
7794 		 * meet load balance goals by pulling other tasks on src_cpu.
7795 		 *
7796 		 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
7797 		 * already computed one in current iteration.
7798 		 */
7799 		if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED))
7800 			return 0;
7801 
7802 		/* Prevent to re-select dst_cpu via env's CPUs: */
7803 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
7804 			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
7805 				env->flags |= LBF_DST_PINNED;
7806 				env->new_dst_cpu = cpu;
7807 				break;
7808 			}
7809 		}
7810 
7811 		return 0;
7812 	}
7813 
7814 	/* Record that we found atleast one task that could run on dst_cpu */
7815 	env->flags &= ~LBF_ALL_PINNED;
7816 
7817 	if (task_running(env->src_rq, p)) {
7818 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
7819 		return 0;
7820 	}
7821 
7822 	/*
7823 	 * Aggressive migration if:
7824 	 * 1) destination numa is preferred
7825 	 * 2) task is cache cold, or
7826 	 * 3) too many balance attempts have failed.
7827 	 */
7828 	tsk_cache_hot = migrate_degrades_locality(p, env);
7829 	if (tsk_cache_hot == -1)
7830 		tsk_cache_hot = task_hot(p, env);
7831 
7832 	if (tsk_cache_hot <= 0 ||
7833 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
7834 		if (tsk_cache_hot == 1) {
7835 			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
7836 			schedstat_inc(p->se.statistics.nr_forced_migrations);
7837 		}
7838 		return 1;
7839 	}
7840 
7841 	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
7842 	return 0;
7843 }
7844 
7845 /*
7846  * detach_task() -- detach the task for the migration specified in env
7847  */
detach_task(struct task_struct * p,struct lb_env * env)7848 static void detach_task(struct task_struct *p, struct lb_env *env)
7849 {
7850 	int detached = 0;
7851 
7852 	lockdep_assert_held(&env->src_rq->lock);
7853 
7854 	/*
7855 	 * The vendor hook may drop the lock temporarily, so
7856 	 * pass the rq flags to unpin lock. We expect the
7857 	 * rq lock to be held after return.
7858 	 */
7859 	trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
7860 					      env->dst_cpu, &detached);
7861 	if (detached)
7862 		return;
7863 
7864 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
7865 	set_task_cpu(p, env->dst_cpu);
7866 }
7867 
7868 /*
7869  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
7870  * part of active balancing operations within "domain".
7871  *
7872  * Returns a task if successful and NULL otherwise.
7873  */
detach_one_task(struct lb_env * env)7874 static struct task_struct *detach_one_task(struct lb_env *env)
7875 {
7876 	struct task_struct *p;
7877 
7878 	lockdep_assert_held(&env->src_rq->lock);
7879 
7880 	list_for_each_entry_reverse(p,
7881 			&env->src_rq->cfs_tasks, se.group_node) {
7882 		if (!can_migrate_task(p, env))
7883 			continue;
7884 
7885 		detach_task(p, env);
7886 
7887 		/*
7888 		 * Right now, this is only the second place where
7889 		 * lb_gained[env->idle] is updated (other is detach_tasks)
7890 		 * so we can safely collect stats here rather than
7891 		 * inside detach_tasks().
7892 		 */
7893 		schedstat_inc(env->sd->lb_gained[env->idle]);
7894 		return p;
7895 	}
7896 	return NULL;
7897 }
7898 
7899 static const unsigned int sched_nr_migrate_break = 32;
7900 
7901 /*
7902  * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
7903  * busiest_rq, as part of a balancing operation within domain "sd".
7904  *
7905  * Returns number of detached tasks if successful and 0 otherwise.
7906  */
detach_tasks(struct lb_env * env)7907 static int detach_tasks(struct lb_env *env)
7908 {
7909 	struct list_head *tasks = &env->src_rq->cfs_tasks;
7910 	unsigned long util, load;
7911 	struct task_struct *p;
7912 	int detached = 0;
7913 
7914 	lockdep_assert_held(&env->src_rq->lock);
7915 
7916 	if (env->imbalance <= 0)
7917 		return 0;
7918 
7919 	while (!list_empty(tasks)) {
7920 		/*
7921 		 * We don't want to steal all, otherwise we may be treated likewise,
7922 		 * which could at worst lead to a livelock crash.
7923 		 */
7924 		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
7925 			break;
7926 
7927 		p = list_last_entry(tasks, struct task_struct, se.group_node);
7928 
7929 		env->loop++;
7930 		/* We've more or less seen every task there is, call it quits */
7931 		if (env->loop > env->loop_max)
7932 			break;
7933 
7934 		/* take a breather every nr_migrate tasks */
7935 		if (env->loop > env->loop_break) {
7936 			env->loop_break += sched_nr_migrate_break;
7937 			env->flags |= LBF_NEED_BREAK;
7938 			break;
7939 		}
7940 
7941 		if (!can_migrate_task(p, env))
7942 			goto next;
7943 
7944 		switch (env->migration_type) {
7945 		case migrate_load:
7946 			/*
7947 			 * Depending of the number of CPUs and tasks and the
7948 			 * cgroup hierarchy, task_h_load() can return a null
7949 			 * value. Make sure that env->imbalance decreases
7950 			 * otherwise detach_tasks() will stop only after
7951 			 * detaching up to loop_max tasks.
7952 			 */
7953 			load = max_t(unsigned long, task_h_load(p), 1);
7954 
7955 			if (sched_feat(LB_MIN) &&
7956 			    load < 16 && !env->sd->nr_balance_failed)
7957 				goto next;
7958 
7959 			/*
7960 			 * Make sure that we don't migrate too much load.
7961 			 * Nevertheless, let relax the constraint if
7962 			 * scheduler fails to find a good waiting task to
7963 			 * migrate.
7964 			 */
7965 			if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
7966 				goto next;
7967 
7968 			env->imbalance -= load;
7969 			break;
7970 
7971 		case migrate_util:
7972 			util = task_util_est(p);
7973 
7974 			if (util > env->imbalance)
7975 				goto next;
7976 
7977 			env->imbalance -= util;
7978 			break;
7979 
7980 		case migrate_task:
7981 			env->imbalance--;
7982 			break;
7983 
7984 		case migrate_misfit:
7985 			/* This is not a misfit task */
7986 			if (task_fits_capacity(p, capacity_of(env->src_cpu)))
7987 				goto next;
7988 
7989 			env->imbalance = 0;
7990 			break;
7991 		}
7992 
7993 		detach_task(p, env);
7994 		list_add(&p->se.group_node, &env->tasks);
7995 
7996 		detached++;
7997 
7998 #ifdef CONFIG_PREEMPTION
7999 		/*
8000 		 * NEWIDLE balancing is a source of latency, so preemptible
8001 		 * kernels will stop after the first task is detached to minimize
8002 		 * the critical section.
8003 		 */
8004 		if (env->idle == CPU_NEWLY_IDLE)
8005 			break;
8006 #endif
8007 
8008 		/*
8009 		 * We only want to steal up to the prescribed amount of
8010 		 * load/util/tasks.
8011 		 */
8012 		if (env->imbalance <= 0)
8013 			break;
8014 
8015 		continue;
8016 next:
8017 		list_move(&p->se.group_node, tasks);
8018 	}
8019 
8020 	/*
8021 	 * Right now, this is one of only two places we collect this stat
8022 	 * so we can safely collect detach_one_task() stats here rather
8023 	 * than inside detach_one_task().
8024 	 */
8025 	schedstat_add(env->sd->lb_gained[env->idle], detached);
8026 
8027 	return detached;
8028 }
8029 
8030 /*
8031  * attach_task() -- attach the task detached by detach_task() to its new rq.
8032  */
attach_task(struct rq * rq,struct task_struct * p)8033 static void attach_task(struct rq *rq, struct task_struct *p)
8034 {
8035 	lockdep_assert_held(&rq->lock);
8036 
8037 	BUG_ON(task_rq(p) != rq);
8038 	activate_task(rq, p, ENQUEUE_NOCLOCK);
8039 	check_preempt_curr(rq, p, 0);
8040 }
8041 
8042 /*
8043  * attach_one_task() -- attaches the task returned from detach_one_task() to
8044  * its new rq.
8045  */
attach_one_task(struct rq * rq,struct task_struct * p)8046 static void attach_one_task(struct rq *rq, struct task_struct *p)
8047 {
8048 	struct rq_flags rf;
8049 
8050 	rq_lock(rq, &rf);
8051 	update_rq_clock(rq);
8052 	attach_task(rq, p);
8053 	rq_unlock(rq, &rf);
8054 }
8055 
8056 /*
8057  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
8058  * new rq.
8059  */
attach_tasks(struct lb_env * env)8060 static void attach_tasks(struct lb_env *env)
8061 {
8062 	struct list_head *tasks = &env->tasks;
8063 	struct task_struct *p;
8064 	struct rq_flags rf;
8065 
8066 	rq_lock(env->dst_rq, &rf);
8067 	update_rq_clock(env->dst_rq);
8068 
8069 	while (!list_empty(tasks)) {
8070 		p = list_first_entry(tasks, struct task_struct, se.group_node);
8071 		list_del_init(&p->se.group_node);
8072 
8073 		attach_task(env->dst_rq, p);
8074 	}
8075 
8076 	rq_unlock(env->dst_rq, &rf);
8077 }
8078 
8079 #ifdef CONFIG_NO_HZ_COMMON
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)8080 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
8081 {
8082 	if (cfs_rq->avg.load_avg)
8083 		return true;
8084 
8085 	if (cfs_rq->avg.util_avg)
8086 		return true;
8087 
8088 	return false;
8089 }
8090 
others_have_blocked(struct rq * rq)8091 static inline bool others_have_blocked(struct rq *rq)
8092 {
8093 	if (READ_ONCE(rq->avg_rt.util_avg))
8094 		return true;
8095 
8096 	if (READ_ONCE(rq->avg_dl.util_avg))
8097 		return true;
8098 
8099 	if (thermal_load_avg(rq))
8100 		return true;
8101 
8102 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
8103 	if (READ_ONCE(rq->avg_irq.util_avg))
8104 		return true;
8105 #endif
8106 
8107 	return false;
8108 }
8109 
update_blocked_load_status(struct rq * rq,bool has_blocked)8110 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8111 {
8112 	rq->last_blocked_load_update_tick = jiffies;
8113 
8114 	if (!has_blocked)
8115 		rq->has_blocked_load = 0;
8116 }
8117 #else
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)8118 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
others_have_blocked(struct rq * rq)8119 static inline bool others_have_blocked(struct rq *rq) { return false; }
update_blocked_load_status(struct rq * rq,bool has_blocked)8120 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
8121 #endif
8122 
__update_blocked_others(struct rq * rq,bool * done)8123 static bool __update_blocked_others(struct rq *rq, bool *done)
8124 {
8125 	const struct sched_class *curr_class;
8126 	u64 now = rq_clock_pelt(rq);
8127 	unsigned long thermal_pressure;
8128 	bool decayed;
8129 
8130 	/*
8131 	 * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
8132 	 * DL and IRQ signals have been updated before updating CFS.
8133 	 */
8134 	curr_class = rq->curr->sched_class;
8135 
8136 	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
8137 
8138 	decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
8139 		  update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
8140 		  update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
8141 		  update_irq_load_avg(rq, 0);
8142 
8143 	if (others_have_blocked(rq))
8144 		*done = false;
8145 
8146 	return decayed;
8147 }
8148 
8149 #ifdef CONFIG_FAIR_GROUP_SCHED
8150 
cfs_rq_is_decayed(struct cfs_rq * cfs_rq)8151 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
8152 {
8153 	if (cfs_rq->load.weight)
8154 		return false;
8155 
8156 	if (cfs_rq->avg.load_sum)
8157 		return false;
8158 
8159 	if (cfs_rq->avg.util_sum)
8160 		return false;
8161 
8162 	if (cfs_rq->avg.runnable_sum)
8163 		return false;
8164 
8165 	return true;
8166 }
8167 
__update_blocked_fair(struct rq * rq,bool * done)8168 static bool __update_blocked_fair(struct rq *rq, bool *done)
8169 {
8170 	struct cfs_rq *cfs_rq, *pos;
8171 	bool decayed = false;
8172 	int cpu = cpu_of(rq);
8173 
8174 	/*
8175 	 * Iterates the task_group tree in a bottom up fashion, see
8176 	 * list_add_leaf_cfs_rq() for details.
8177 	 */
8178 	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
8179 		struct sched_entity *se;
8180 
8181 		if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
8182 			update_tg_load_avg(cfs_rq);
8183 
8184 			if (cfs_rq == &rq->cfs)
8185 				decayed = true;
8186 		}
8187 
8188 		/* Propagate pending load changes to the parent, if any: */
8189 		se = cfs_rq->tg->se[cpu];
8190 		if (se && !skip_blocked_update(se))
8191 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
8192 
8193 		/*
8194 		 * There can be a lot of idle CPU cgroups.  Don't let fully
8195 		 * decayed cfs_rqs linger on the list.
8196 		 */
8197 		if (cfs_rq_is_decayed(cfs_rq))
8198 			list_del_leaf_cfs_rq(cfs_rq);
8199 
8200 		/* Don't need periodic decay once load/util_avg are null */
8201 		if (cfs_rq_has_blocked(cfs_rq))
8202 			*done = false;
8203 	}
8204 
8205 	return decayed;
8206 }
8207 
8208 /*
8209  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8210  * This needs to be done in a top-down fashion because the load of a child
8211  * group is a fraction of its parents load.
8212  */
update_cfs_rq_h_load(struct cfs_rq * cfs_rq)8213 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
8214 {
8215 	struct rq *rq = rq_of(cfs_rq);
8216 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8217 	unsigned long now = jiffies;
8218 	unsigned long load;
8219 
8220 	if (cfs_rq->last_h_load_update == now)
8221 		return;
8222 
8223 	WRITE_ONCE(cfs_rq->h_load_next, NULL);
8224 	for_each_sched_entity(se) {
8225 		cfs_rq = cfs_rq_of(se);
8226 		WRITE_ONCE(cfs_rq->h_load_next, se);
8227 		if (cfs_rq->last_h_load_update == now)
8228 			break;
8229 	}
8230 
8231 	if (!se) {
8232 		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
8233 		cfs_rq->last_h_load_update = now;
8234 	}
8235 
8236 	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
8237 		load = cfs_rq->h_load;
8238 		load = div64_ul(load * se->avg.load_avg,
8239 			cfs_rq_load_avg(cfs_rq) + 1);
8240 		cfs_rq = group_cfs_rq(se);
8241 		cfs_rq->h_load = load;
8242 		cfs_rq->last_h_load_update = now;
8243 	}
8244 }
8245 
task_h_load(struct task_struct * p)8246 static unsigned long task_h_load(struct task_struct *p)
8247 {
8248 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
8249 
8250 	update_cfs_rq_h_load(cfs_rq);
8251 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
8252 			cfs_rq_load_avg(cfs_rq) + 1);
8253 }
8254 #else
__update_blocked_fair(struct rq * rq,bool * done)8255 static bool __update_blocked_fair(struct rq *rq, bool *done)
8256 {
8257 	struct cfs_rq *cfs_rq = &rq->cfs;
8258 	bool decayed;
8259 
8260 	decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
8261 	if (cfs_rq_has_blocked(cfs_rq))
8262 		*done = false;
8263 
8264 	return decayed;
8265 }
8266 
task_h_load(struct task_struct * p)8267 static unsigned long task_h_load(struct task_struct *p)
8268 {
8269 	return p->se.avg.load_avg;
8270 }
8271 #endif
8272 
update_blocked_averages(int cpu)8273 static void update_blocked_averages(int cpu)
8274 {
8275 	bool decayed = false, done = true;
8276 	struct rq *rq = cpu_rq(cpu);
8277 	struct rq_flags rf;
8278 
8279 	rq_lock_irqsave(rq, &rf);
8280 	update_rq_clock(rq);
8281 
8282 	decayed |= __update_blocked_others(rq, &done);
8283 	decayed |= __update_blocked_fair(rq, &done);
8284 
8285 	update_blocked_load_status(rq, !done);
8286 	if (decayed)
8287 		cpufreq_update_util(rq, 0);
8288 	rq_unlock_irqrestore(rq, &rf);
8289 }
8290 
8291 /********** Helpers for find_busiest_group ************************/
8292 
8293 /*
8294  * sg_lb_stats - stats of a sched_group required for load_balancing
8295  */
8296 struct sg_lb_stats {
8297 	unsigned long avg_load; /*Avg load across the CPUs of the group */
8298 	unsigned long group_load; /* Total load over the CPUs of the group */
8299 	unsigned long group_capacity;
8300 	unsigned long group_util; /* Total utilization over the CPUs of the group */
8301 	unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
8302 	unsigned int sum_nr_running; /* Nr of tasks running in the group */
8303 	unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
8304 	unsigned int idle_cpus;
8305 	unsigned int group_weight;
8306 	enum group_type group_type;
8307 	unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
8308 	unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
8309 #ifdef CONFIG_NUMA_BALANCING
8310 	unsigned int nr_numa_running;
8311 	unsigned int nr_preferred_running;
8312 #endif
8313 };
8314 
8315 /*
8316  * sd_lb_stats - Structure to store the statistics of a sched_domain
8317  *		 during load balancing.
8318  */
8319 struct sd_lb_stats {
8320 	struct sched_group *busiest;	/* Busiest group in this sd */
8321 	struct sched_group *local;	/* Local group in this sd */
8322 	unsigned long total_load;	/* Total load of all groups in sd */
8323 	unsigned long total_capacity;	/* Total capacity of all groups in sd */
8324 	unsigned long avg_load;	/* Average load across all groups in sd */
8325 	unsigned int prefer_sibling; /* tasks should go to sibling first */
8326 
8327 	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
8328 	struct sg_lb_stats local_stat;	/* Statistics of the local group */
8329 };
8330 
init_sd_lb_stats(struct sd_lb_stats * sds)8331 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
8332 {
8333 	/*
8334 	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
8335 	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
8336 	 * We must however set busiest_stat::group_type and
8337 	 * busiest_stat::idle_cpus to the worst busiest group because
8338 	 * update_sd_pick_busiest() reads these before assignment.
8339 	 */
8340 	*sds = (struct sd_lb_stats){
8341 		.busiest = NULL,
8342 		.local = NULL,
8343 		.total_load = 0UL,
8344 		.total_capacity = 0UL,
8345 		.busiest_stat = {
8346 			.idle_cpus = UINT_MAX,
8347 			.group_type = group_has_spare,
8348 		},
8349 	};
8350 }
8351 
scale_rt_capacity(int cpu)8352 static unsigned long scale_rt_capacity(int cpu)
8353 {
8354 	struct rq *rq = cpu_rq(cpu);
8355 	unsigned long max = arch_scale_cpu_capacity(cpu);
8356 	unsigned long used, free;
8357 	unsigned long irq;
8358 
8359 	irq = cpu_util_irq(rq);
8360 
8361 	if (unlikely(irq >= max))
8362 		return 1;
8363 
8364 	/*
8365 	 * avg_rt.util_avg and avg_dl.util_avg track binary signals
8366 	 * (running and not running) with weights 0 and 1024 respectively.
8367 	 * avg_thermal.load_avg tracks thermal pressure and the weighted
8368 	 * average uses the actual delta max capacity(load).
8369 	 */
8370 	used = READ_ONCE(rq->avg_rt.util_avg);
8371 	used += READ_ONCE(rq->avg_dl.util_avg);
8372 	used += thermal_load_avg(rq);
8373 
8374 	if (unlikely(used >= max))
8375 		return 1;
8376 
8377 	free = max - used;
8378 
8379 	return scale_irq_capacity(free, irq, max);
8380 }
8381 
update_cpu_capacity(struct sched_domain * sd,int cpu)8382 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
8383 {
8384 	unsigned long capacity = scale_rt_capacity(cpu);
8385 	struct sched_group *sdg = sd->groups;
8386 
8387 	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
8388 
8389 	if (!capacity)
8390 		capacity = 1;
8391 
8392 	trace_android_rvh_update_cpu_capacity(cpu, &capacity);
8393 	cpu_rq(cpu)->cpu_capacity = capacity;
8394 	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
8395 
8396 	sdg->sgc->capacity = capacity;
8397 	sdg->sgc->min_capacity = capacity;
8398 	sdg->sgc->max_capacity = capacity;
8399 }
8400 
update_group_capacity(struct sched_domain * sd,int cpu)8401 void update_group_capacity(struct sched_domain *sd, int cpu)
8402 {
8403 	struct sched_domain *child = sd->child;
8404 	struct sched_group *group, *sdg = sd->groups;
8405 	unsigned long capacity, min_capacity, max_capacity;
8406 	unsigned long interval;
8407 
8408 	interval = msecs_to_jiffies(sd->balance_interval);
8409 	interval = clamp(interval, 1UL, max_load_balance_interval);
8410 	sdg->sgc->next_update = jiffies + interval;
8411 
8412 	if (!child) {
8413 		update_cpu_capacity(sd, cpu);
8414 		return;
8415 	}
8416 
8417 	capacity = 0;
8418 	min_capacity = ULONG_MAX;
8419 	max_capacity = 0;
8420 
8421 	if (child->flags & SD_OVERLAP) {
8422 		/*
8423 		 * SD_OVERLAP domains cannot assume that child groups
8424 		 * span the current group.
8425 		 */
8426 
8427 		for_each_cpu(cpu, sched_group_span(sdg)) {
8428 			unsigned long cpu_cap = capacity_of(cpu);
8429 
8430 			capacity += cpu_cap;
8431 			min_capacity = min(cpu_cap, min_capacity);
8432 			max_capacity = max(cpu_cap, max_capacity);
8433 		}
8434 	} else  {
8435 		/*
8436 		 * !SD_OVERLAP domains can assume that child groups
8437 		 * span the current group.
8438 		 */
8439 
8440 		group = child->groups;
8441 		do {
8442 			struct sched_group_capacity *sgc = group->sgc;
8443 
8444 			capacity += sgc->capacity;
8445 			min_capacity = min(sgc->min_capacity, min_capacity);
8446 			max_capacity = max(sgc->max_capacity, max_capacity);
8447 			group = group->next;
8448 		} while (group != child->groups);
8449 	}
8450 
8451 	sdg->sgc->capacity = capacity;
8452 	sdg->sgc->min_capacity = min_capacity;
8453 	sdg->sgc->max_capacity = max_capacity;
8454 }
8455 
8456 /*
8457  * Check whether the capacity of the rq has been noticeably reduced by side
8458  * activity. The imbalance_pct is used for the threshold.
8459  * Return true is the capacity is reduced
8460  */
8461 static inline int
check_cpu_capacity(struct rq * rq,struct sched_domain * sd)8462 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
8463 {
8464 	return ((rq->cpu_capacity * sd->imbalance_pct) <
8465 				(rq->cpu_capacity_orig * 100));
8466 }
8467 
8468 /*
8469  * Check whether a rq has a misfit task and if it looks like we can actually
8470  * help that task: we can migrate the task to a CPU of higher capacity, or
8471  * the task's current CPU is heavily pressured.
8472  */
check_misfit_status(struct rq * rq,struct sched_domain * sd)8473 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8474 {
8475 	return rq->misfit_task_load &&
8476 		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
8477 		 check_cpu_capacity(rq, sd));
8478 }
8479 
8480 /*
8481  * Group imbalance indicates (and tries to solve) the problem where balancing
8482  * groups is inadequate due to ->cpus_ptr constraints.
8483  *
8484  * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
8485  * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
8486  * Something like:
8487  *
8488  *	{ 0 1 2 3 } { 4 5 6 7 }
8489  *	        *     * * *
8490  *
8491  * If we were to balance group-wise we'd place two tasks in the first group and
8492  * two tasks in the second group. Clearly this is undesired as it will overload
8493  * cpu 3 and leave one of the CPUs in the second group unused.
8494  *
8495  * The current solution to this issue is detecting the skew in the first group
8496  * by noticing the lower domain failed to reach balance and had difficulty
8497  * moving tasks due to affinity constraints.
8498  *
8499  * When this is so detected; this group becomes a candidate for busiest; see
8500  * update_sd_pick_busiest(). And calculate_imbalance() and
8501  * find_busiest_group() avoid some of the usual balance conditions to allow it
8502  * to create an effective group imbalance.
8503  *
8504  * This is a somewhat tricky proposition since the next run might not find the
8505  * group imbalance and decide the groups need to be balanced again. A most
8506  * subtle and fragile situation.
8507  */
8508 
sg_imbalanced(struct sched_group * group)8509 static inline int sg_imbalanced(struct sched_group *group)
8510 {
8511 	return group->sgc->imbalance;
8512 }
8513 
8514 /*
8515  * group_has_capacity returns true if the group has spare capacity that could
8516  * be used by some tasks.
8517  * We consider that a group has spare capacity if the  * number of task is
8518  * smaller than the number of CPUs or if the utilization is lower than the
8519  * available capacity for CFS tasks.
8520  * For the latter, we use a threshold to stabilize the state, to take into
8521  * account the variance of the tasks' load and to return true if the available
8522  * capacity in meaningful for the load balancer.
8523  * As an example, an available capacity of 1% can appear but it doesn't make
8524  * any benefit for the load balance.
8525  */
8526 static inline bool
group_has_capacity(unsigned int imbalance_pct,struct sg_lb_stats * sgs)8527 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8528 {
8529 	if (sgs->sum_nr_running < sgs->group_weight)
8530 		return true;
8531 
8532 	if ((sgs->group_capacity * imbalance_pct) <
8533 			(sgs->group_runnable * 100))
8534 		return false;
8535 
8536 	if ((sgs->group_capacity * 100) >
8537 			(sgs->group_util * imbalance_pct))
8538 		return true;
8539 
8540 	return false;
8541 }
8542 
8543 /*
8544  *  group_is_overloaded returns true if the group has more tasks than it can
8545  *  handle.
8546  *  group_is_overloaded is not equals to !group_has_capacity because a group
8547  *  with the exact right number of tasks, has no more spare capacity but is not
8548  *  overloaded so both group_has_capacity and group_is_overloaded return
8549  *  false.
8550  */
8551 static inline bool
group_is_overloaded(unsigned int imbalance_pct,struct sg_lb_stats * sgs)8552 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8553 {
8554 	if (sgs->sum_nr_running <= sgs->group_weight)
8555 		return false;
8556 
8557 	if ((sgs->group_capacity * 100) <
8558 			(sgs->group_util * imbalance_pct))
8559 		return true;
8560 
8561 	if ((sgs->group_capacity * imbalance_pct) <
8562 			(sgs->group_runnable * 100))
8563 		return true;
8564 
8565 	return false;
8566 }
8567 
8568 /*
8569  * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
8570  * per-CPU capacity than sched_group ref.
8571  */
8572 static inline bool
group_smaller_min_cpu_capacity(struct sched_group * sg,struct sched_group * ref)8573 group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
8574 {
8575 	return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity);
8576 }
8577 
8578 /*
8579  * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
8580  * per-CPU capacity_orig than sched_group ref.
8581  */
8582 static inline bool
group_smaller_max_cpu_capacity(struct sched_group * sg,struct sched_group * ref)8583 group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
8584 {
8585 	return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity);
8586 }
8587 
8588 static inline enum
group_classify(unsigned int imbalance_pct,struct sched_group * group,struct sg_lb_stats * sgs)8589 group_type group_classify(unsigned int imbalance_pct,
8590 			  struct sched_group *group,
8591 			  struct sg_lb_stats *sgs)
8592 {
8593 	if (group_is_overloaded(imbalance_pct, sgs))
8594 		return group_overloaded;
8595 
8596 	if (sg_imbalanced(group))
8597 		return group_imbalanced;
8598 
8599 	if (sgs->group_asym_packing)
8600 		return group_asym_packing;
8601 
8602 	if (sgs->group_misfit_task_load)
8603 		return group_misfit_task;
8604 
8605 	if (!group_has_capacity(imbalance_pct, sgs))
8606 		return group_fully_busy;
8607 
8608 	return group_has_spare;
8609 }
8610 
update_nohz_stats(struct rq * rq,bool force)8611 static bool update_nohz_stats(struct rq *rq, bool force)
8612 {
8613 #ifdef CONFIG_NO_HZ_COMMON
8614 	unsigned int cpu = rq->cpu;
8615 
8616 	if (!rq->has_blocked_load)
8617 		return false;
8618 
8619 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
8620 		return false;
8621 
8622 	if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
8623 		return true;
8624 
8625 	update_blocked_averages(cpu);
8626 
8627 	return rq->has_blocked_load;
8628 #else
8629 	return false;
8630 #endif
8631 }
8632 
8633 /**
8634  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
8635  * @env: The load balancing environment.
8636  * @group: sched_group whose statistics are to be updated.
8637  * @sgs: variable to hold the statistics for this group.
8638  * @sg_status: Holds flag indicating the status of the sched_group
8639  */
update_sg_lb_stats(struct lb_env * env,struct sched_group * group,struct sg_lb_stats * sgs,int * sg_status)8640 static inline void update_sg_lb_stats(struct lb_env *env,
8641 				      struct sched_group *group,
8642 				      struct sg_lb_stats *sgs,
8643 				      int *sg_status)
8644 {
8645 	int i, nr_running, local_group;
8646 
8647 	memset(sgs, 0, sizeof(*sgs));
8648 
8649 	local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
8650 
8651 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
8652 		struct rq *rq = cpu_rq(i);
8653 
8654 		if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
8655 			env->flags |= LBF_NOHZ_AGAIN;
8656 
8657 		sgs->group_load += cpu_load(rq);
8658 		sgs->group_util += cpu_util(i);
8659 		sgs->group_runnable += cpu_runnable(rq);
8660 		sgs->sum_h_nr_running += rq->cfs.h_nr_running;
8661 
8662 		nr_running = rq->nr_running;
8663 		sgs->sum_nr_running += nr_running;
8664 
8665 		if (nr_running > 1)
8666 			*sg_status |= SG_OVERLOAD;
8667 
8668 		if (cpu_overutilized(i))
8669 			*sg_status |= SG_OVERUTILIZED;
8670 
8671 #ifdef CONFIG_NUMA_BALANCING
8672 		sgs->nr_numa_running += rq->nr_numa_running;
8673 		sgs->nr_preferred_running += rq->nr_preferred_running;
8674 #endif
8675 		/*
8676 		 * No need to call idle_cpu() if nr_running is not 0
8677 		 */
8678 		if (!nr_running && idle_cpu(i)) {
8679 			sgs->idle_cpus++;
8680 			/* Idle cpu can't have misfit task */
8681 			continue;
8682 		}
8683 
8684 		if (local_group)
8685 			continue;
8686 
8687 		/* Check for a misfit task on the cpu */
8688 		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
8689 		    sgs->group_misfit_task_load < rq->misfit_task_load) {
8690 			sgs->group_misfit_task_load = rq->misfit_task_load;
8691 			*sg_status |= SG_OVERLOAD;
8692 		}
8693 	}
8694 
8695 	/* Check if dst CPU is idle and preferred to this group */
8696 	if (env->sd->flags & SD_ASYM_PACKING &&
8697 	    env->idle != CPU_NOT_IDLE &&
8698 	    sgs->sum_h_nr_running &&
8699 	    sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
8700 		sgs->group_asym_packing = 1;
8701 	}
8702 
8703 	sgs->group_capacity = group->sgc->capacity;
8704 
8705 	sgs->group_weight = group->group_weight;
8706 
8707 	sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
8708 
8709 	/* Computing avg_load makes sense only when group is overloaded */
8710 	if (sgs->group_type == group_overloaded)
8711 		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
8712 				sgs->group_capacity;
8713 }
8714 
8715 /**
8716  * update_sd_pick_busiest - return 1 on busiest group
8717  * @env: The load balancing environment.
8718  * @sds: sched_domain statistics
8719  * @sg: sched_group candidate to be checked for being the busiest
8720  * @sgs: sched_group statistics
8721  *
8722  * Determine if @sg is a busier group than the previously selected
8723  * busiest group.
8724  *
8725  * Return: %true if @sg is a busier group than the previously selected
8726  * busiest group. %false otherwise.
8727  */
update_sd_pick_busiest(struct lb_env * env,struct sd_lb_stats * sds,struct sched_group * sg,struct sg_lb_stats * sgs)8728 static bool update_sd_pick_busiest(struct lb_env *env,
8729 				   struct sd_lb_stats *sds,
8730 				   struct sched_group *sg,
8731 				   struct sg_lb_stats *sgs)
8732 {
8733 	struct sg_lb_stats *busiest = &sds->busiest_stat;
8734 
8735 	/* Make sure that there is at least one task to pull */
8736 	if (!sgs->sum_h_nr_running)
8737 		return false;
8738 
8739 	/*
8740 	 * Don't try to pull misfit tasks we can't help.
8741 	 * We can use max_capacity here as reduction in capacity on some
8742 	 * CPUs in the group should either be possible to resolve
8743 	 * internally or be covered by avg_load imbalance (eventually).
8744 	 */
8745 	if (sgs->group_type == group_misfit_task &&
8746 	    (!group_smaller_max_cpu_capacity(sg, sds->local) ||
8747 	     sds->local_stat.group_type != group_has_spare))
8748 		return false;
8749 
8750 	if (sgs->group_type > busiest->group_type)
8751 		return true;
8752 
8753 	if (sgs->group_type < busiest->group_type)
8754 		return false;
8755 
8756 	/*
8757 	 * The candidate and the current busiest group are the same type of
8758 	 * group. Let check which one is the busiest according to the type.
8759 	 */
8760 
8761 	switch (sgs->group_type) {
8762 	case group_overloaded:
8763 		/* Select the overloaded group with highest avg_load. */
8764 		if (sgs->avg_load <= busiest->avg_load)
8765 			return false;
8766 		break;
8767 
8768 	case group_imbalanced:
8769 		/*
8770 		 * Select the 1st imbalanced group as we don't have any way to
8771 		 * choose one more than another.
8772 		 */
8773 		return false;
8774 
8775 	case group_asym_packing:
8776 		/* Prefer to move from lowest priority CPU's work */
8777 		if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
8778 			return false;
8779 		break;
8780 
8781 	case group_misfit_task:
8782 		/*
8783 		 * If we have more than one misfit sg go with the biggest
8784 		 * misfit.
8785 		 */
8786 		if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
8787 			return false;
8788 		break;
8789 
8790 	case group_fully_busy:
8791 		/*
8792 		 * Select the fully busy group with highest avg_load. In
8793 		 * theory, there is no need to pull task from such kind of
8794 		 * group because tasks have all compute capacity that they need
8795 		 * but we can still improve the overall throughput by reducing
8796 		 * contention when accessing shared HW resources.
8797 		 *
8798 		 * XXX for now avg_load is not computed and always 0 so we
8799 		 * select the 1st one.
8800 		 */
8801 		if (sgs->avg_load <= busiest->avg_load)
8802 			return false;
8803 		break;
8804 
8805 	case group_has_spare:
8806 		/*
8807 		 * Select not overloaded group with lowest number of idle cpus
8808 		 * and highest number of running tasks. We could also compare
8809 		 * the spare capacity which is more stable but it can end up
8810 		 * that the group has less spare capacity but finally more idle
8811 		 * CPUs which means less opportunity to pull tasks.
8812 		 */
8813 		if (sgs->idle_cpus > busiest->idle_cpus)
8814 			return false;
8815 		else if ((sgs->idle_cpus == busiest->idle_cpus) &&
8816 			 (sgs->sum_nr_running <= busiest->sum_nr_running))
8817 			return false;
8818 
8819 		break;
8820 	}
8821 
8822 	/*
8823 	 * Candidate sg has no more than one task per CPU and has higher
8824 	 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
8825 	 * throughput. Maximize throughput, power/energy consequences are not
8826 	 * considered.
8827 	 */
8828 	if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
8829 	    (sgs->group_type <= group_fully_busy) &&
8830 	    (group_smaller_min_cpu_capacity(sds->local, sg)))
8831 		return false;
8832 
8833 	return true;
8834 }
8835 
8836 #ifdef CONFIG_NUMA_BALANCING
fbq_classify_group(struct sg_lb_stats * sgs)8837 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
8838 {
8839 	if (sgs->sum_h_nr_running > sgs->nr_numa_running)
8840 		return regular;
8841 	if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
8842 		return remote;
8843 	return all;
8844 }
8845 
fbq_classify_rq(struct rq * rq)8846 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
8847 {
8848 	if (rq->nr_running > rq->nr_numa_running)
8849 		return regular;
8850 	if (rq->nr_running > rq->nr_preferred_running)
8851 		return remote;
8852 	return all;
8853 }
8854 #else
fbq_classify_group(struct sg_lb_stats * sgs)8855 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
8856 {
8857 	return all;
8858 }
8859 
fbq_classify_rq(struct rq * rq)8860 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
8861 {
8862 	return regular;
8863 }
8864 #endif /* CONFIG_NUMA_BALANCING */
8865 
8866 
8867 struct sg_lb_stats;
8868 
8869 /*
8870  * task_running_on_cpu - return 1 if @p is running on @cpu.
8871  */
8872 
task_running_on_cpu(int cpu,struct task_struct * p)8873 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
8874 {
8875 	/* Task has no contribution or is new */
8876 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
8877 		return 0;
8878 
8879 	if (task_on_rq_queued(p))
8880 		return 1;
8881 
8882 	return 0;
8883 }
8884 
8885 /**
8886  * idle_cpu_without - would a given CPU be idle without p ?
8887  * @cpu: the processor on which idleness is tested.
8888  * @p: task which should be ignored.
8889  *
8890  * Return: 1 if the CPU would be idle. 0 otherwise.
8891  */
idle_cpu_without(int cpu,struct task_struct * p)8892 static int idle_cpu_without(int cpu, struct task_struct *p)
8893 {
8894 	struct rq *rq = cpu_rq(cpu);
8895 
8896 	if (rq->curr != rq->idle && rq->curr != p)
8897 		return 0;
8898 
8899 	/*
8900 	 * rq->nr_running can't be used but an updated version without the
8901 	 * impact of p on cpu must be used instead. The updated nr_running
8902 	 * be computed and tested before calling idle_cpu_without().
8903 	 */
8904 
8905 #ifdef CONFIG_SMP
8906 	if (rq->ttwu_pending)
8907 		return 0;
8908 #endif
8909 
8910 	return 1;
8911 }
8912 
8913 /*
8914  * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
8915  * @sd: The sched_domain level to look for idlest group.
8916  * @group: sched_group whose statistics are to be updated.
8917  * @sgs: variable to hold the statistics for this group.
8918  * @p: The task for which we look for the idlest group/CPU.
8919  */
update_sg_wakeup_stats(struct sched_domain * sd,struct sched_group * group,struct sg_lb_stats * sgs,struct task_struct * p)8920 static inline void update_sg_wakeup_stats(struct sched_domain *sd,
8921 					  struct sched_group *group,
8922 					  struct sg_lb_stats *sgs,
8923 					  struct task_struct *p)
8924 {
8925 	int i, nr_running;
8926 
8927 	memset(sgs, 0, sizeof(*sgs));
8928 
8929 	for_each_cpu(i, sched_group_span(group)) {
8930 		struct rq *rq = cpu_rq(i);
8931 		unsigned int local;
8932 
8933 		sgs->group_load += cpu_load_without(rq, p);
8934 		sgs->group_util += cpu_util_without(i, p);
8935 		sgs->group_runnable += cpu_runnable_without(rq, p);
8936 		local = task_running_on_cpu(i, p);
8937 		sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
8938 
8939 		nr_running = rq->nr_running - local;
8940 		sgs->sum_nr_running += nr_running;
8941 
8942 		/*
8943 		 * No need to call idle_cpu_without() if nr_running is not 0
8944 		 */
8945 		if (!nr_running && idle_cpu_without(i, p))
8946 			sgs->idle_cpus++;
8947 
8948 	}
8949 
8950 	/* Check if task fits in the group */
8951 	if (sd->flags & SD_ASYM_CPUCAPACITY &&
8952 	    !task_fits_capacity(p, group->sgc->max_capacity)) {
8953 		sgs->group_misfit_task_load = 1;
8954 	}
8955 
8956 	sgs->group_capacity = group->sgc->capacity;
8957 
8958 	sgs->group_weight = group->group_weight;
8959 
8960 	sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
8961 
8962 	/*
8963 	 * Computing avg_load makes sense only when group is fully busy or
8964 	 * overloaded
8965 	 */
8966 	if (sgs->group_type == group_fully_busy ||
8967 		sgs->group_type == group_overloaded)
8968 		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
8969 				sgs->group_capacity;
8970 }
8971 
update_pick_idlest(struct sched_group * idlest,struct sg_lb_stats * idlest_sgs,struct sched_group * group,struct sg_lb_stats * sgs)8972 static bool update_pick_idlest(struct sched_group *idlest,
8973 			       struct sg_lb_stats *idlest_sgs,
8974 			       struct sched_group *group,
8975 			       struct sg_lb_stats *sgs)
8976 {
8977 	if (sgs->group_type < idlest_sgs->group_type)
8978 		return true;
8979 
8980 	if (sgs->group_type > idlest_sgs->group_type)
8981 		return false;
8982 
8983 	/*
8984 	 * The candidate and the current idlest group are the same type of
8985 	 * group. Let check which one is the idlest according to the type.
8986 	 */
8987 
8988 	switch (sgs->group_type) {
8989 	case group_overloaded:
8990 	case group_fully_busy:
8991 		/* Select the group with lowest avg_load. */
8992 		if (idlest_sgs->avg_load <= sgs->avg_load)
8993 			return false;
8994 		break;
8995 
8996 	case group_imbalanced:
8997 	case group_asym_packing:
8998 		/* Those types are not used in the slow wakeup path */
8999 		return false;
9000 
9001 	case group_misfit_task:
9002 		/* Select group with the highest max capacity */
9003 		if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
9004 			return false;
9005 		break;
9006 
9007 	case group_has_spare:
9008 		/* Select group with most idle CPUs */
9009 		if (idlest_sgs->idle_cpus > sgs->idle_cpus)
9010 			return false;
9011 
9012 		/* Select group with lowest group_util */
9013 		if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
9014 			idlest_sgs->group_util <= sgs->group_util)
9015 			return false;
9016 
9017 		break;
9018 	}
9019 
9020 	return true;
9021 }
9022 
9023 /*
9024  * find_idlest_group() finds and returns the least busy CPU group within the
9025  * domain.
9026  *
9027  * Assumes p is allowed on at least one CPU in sd.
9028  */
9029 static struct sched_group *
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu)9030 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
9031 {
9032 	struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
9033 	struct sg_lb_stats local_sgs, tmp_sgs;
9034 	struct sg_lb_stats *sgs;
9035 	unsigned long imbalance;
9036 	struct sg_lb_stats idlest_sgs = {
9037 			.avg_load = UINT_MAX,
9038 			.group_type = group_overloaded,
9039 	};
9040 
9041 	imbalance = scale_load_down(NICE_0_LOAD) *
9042 				(sd->imbalance_pct-100) / 100;
9043 
9044 	do {
9045 		int local_group;
9046 
9047 		if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
9048 			struct root_domain *rd = cpu_rq(this_cpu)->rd;
9049 			struct cpumask *cpub_mask = rockchip_perf_get_cpub_mask();
9050 			int level = rockchip_perf_get_level();
9051 
9052 			if ((level == ROCKCHIP_PERFORMANCE_HIGH) && !READ_ONCE(rd->overutilized) &&
9053 			    cpub_mask && cpumask_intersects(p->cpus_ptr, cpub_mask) &&
9054 			    !cpumask_intersects(sched_group_span(group), cpub_mask))
9055 				continue;
9056 		}
9057 
9058 		/* Skip over this group if it has no CPUs allowed */
9059 		if (!cpumask_intersects(sched_group_span(group),
9060 					p->cpus_ptr))
9061 			continue;
9062 
9063 		local_group = cpumask_test_cpu(this_cpu,
9064 					       sched_group_span(group));
9065 
9066 		if (local_group) {
9067 			sgs = &local_sgs;
9068 			local = group;
9069 		} else {
9070 			sgs = &tmp_sgs;
9071 		}
9072 
9073 		update_sg_wakeup_stats(sd, group, sgs, p);
9074 
9075 		if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
9076 			idlest = group;
9077 			idlest_sgs = *sgs;
9078 		}
9079 
9080 	} while (group = group->next, group != sd->groups);
9081 
9082 
9083 	/* There is no idlest group to push tasks to */
9084 	if (!idlest)
9085 		return NULL;
9086 
9087 	/* The local group has been skipped because of CPU affinity */
9088 	if (!local)
9089 		return idlest;
9090 
9091 	/*
9092 	 * If the local group is idler than the selected idlest group
9093 	 * don't try and push the task.
9094 	 */
9095 	if (local_sgs.group_type < idlest_sgs.group_type)
9096 		return NULL;
9097 
9098 	/*
9099 	 * If the local group is busier than the selected idlest group
9100 	 * try and push the task.
9101 	 */
9102 	if (local_sgs.group_type > idlest_sgs.group_type)
9103 		return idlest;
9104 
9105 	switch (local_sgs.group_type) {
9106 	case group_overloaded:
9107 	case group_fully_busy:
9108 		/*
9109 		 * When comparing groups across NUMA domains, it's possible for
9110 		 * the local domain to be very lightly loaded relative to the
9111 		 * remote domains but "imbalance" skews the comparison making
9112 		 * remote CPUs look much more favourable. When considering
9113 		 * cross-domain, add imbalance to the load on the remote node
9114 		 * and consider staying local.
9115 		 */
9116 
9117 		if ((sd->flags & SD_NUMA) &&
9118 		    ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
9119 			return NULL;
9120 
9121 		/*
9122 		 * If the local group is less loaded than the selected
9123 		 * idlest group don't try and push any tasks.
9124 		 */
9125 		if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
9126 			return NULL;
9127 
9128 		if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
9129 			return NULL;
9130 		break;
9131 
9132 	case group_imbalanced:
9133 	case group_asym_packing:
9134 		/* Those type are not used in the slow wakeup path */
9135 		return NULL;
9136 
9137 	case group_misfit_task:
9138 		/* Select group with the highest max capacity */
9139 		if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
9140 			return NULL;
9141 		break;
9142 
9143 	case group_has_spare:
9144 		if (sd->flags & SD_NUMA) {
9145 #ifdef CONFIG_NUMA_BALANCING
9146 			int idlest_cpu;
9147 			/*
9148 			 * If there is spare capacity at NUMA, try to select
9149 			 * the preferred node
9150 			 */
9151 			if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
9152 				return NULL;
9153 
9154 			idlest_cpu = cpumask_first(sched_group_span(idlest));
9155 			if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
9156 				return idlest;
9157 #endif
9158 			/*
9159 			 * Otherwise, keep the task on this node to stay close
9160 			 * its wakeup source and improve locality. If there is
9161 			 * a real need of migration, periodic load balance will
9162 			 * take care of it.
9163 			 */
9164 			if (local_sgs.idle_cpus)
9165 				return NULL;
9166 		}
9167 
9168 		/*
9169 		 * Select group with highest number of idle CPUs. We could also
9170 		 * compare the utilization which is more stable but it can end
9171 		 * up that the group has less spare capacity but finally more
9172 		 * idle CPUs which means more opportunity to run task.
9173 		 */
9174 		if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
9175 			return NULL;
9176 		break;
9177 	}
9178 
9179 	return idlest;
9180 }
9181 
9182 /**
9183  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
9184  * @env: The load balancing environment.
9185  * @sds: variable to hold the statistics for this sched_domain.
9186  */
9187 
update_sd_lb_stats(struct lb_env * env,struct sd_lb_stats * sds)9188 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
9189 {
9190 	struct sched_domain *child = env->sd->child;
9191 	struct sched_group *sg = env->sd->groups;
9192 	struct sg_lb_stats *local = &sds->local_stat;
9193 	struct sg_lb_stats tmp_sgs;
9194 	int sg_status = 0;
9195 
9196 #ifdef CONFIG_NO_HZ_COMMON
9197 	if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
9198 		env->flags |= LBF_NOHZ_STATS;
9199 #endif
9200 
9201 	do {
9202 		struct sg_lb_stats *sgs = &tmp_sgs;
9203 		int local_group;
9204 
9205 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
9206 		if (local_group) {
9207 			sds->local = sg;
9208 			sgs = local;
9209 
9210 			if (env->idle != CPU_NEWLY_IDLE ||
9211 			    time_after_eq(jiffies, sg->sgc->next_update))
9212 				update_group_capacity(env->sd, env->dst_cpu);
9213 		}
9214 
9215 		update_sg_lb_stats(env, sg, sgs, &sg_status);
9216 
9217 		if (local_group)
9218 			goto next_group;
9219 
9220 
9221 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
9222 			sds->busiest = sg;
9223 			sds->busiest_stat = *sgs;
9224 		}
9225 
9226 next_group:
9227 		/* Now, start updating sd_lb_stats */
9228 		sds->total_load += sgs->group_load;
9229 		sds->total_capacity += sgs->group_capacity;
9230 
9231 		sg = sg->next;
9232 	} while (sg != env->sd->groups);
9233 
9234 	/* Tag domain that child domain prefers tasks go to siblings first */
9235 	sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
9236 
9237 #ifdef CONFIG_NO_HZ_COMMON
9238 	if ((env->flags & LBF_NOHZ_AGAIN) &&
9239 	    cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
9240 
9241 		WRITE_ONCE(nohz.next_blocked,
9242 			   jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
9243 	}
9244 #endif
9245 
9246 	if (env->sd->flags & SD_NUMA)
9247 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
9248 
9249 	if (!env->sd->parent) {
9250 		struct root_domain *rd = env->dst_rq->rd;
9251 
9252 		/* update overload indicator if we are at root domain */
9253 		WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
9254 
9255 		/* Update over-utilization (tipping point, U >= 0) indicator */
9256 		WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
9257 		trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
9258 	} else if (sg_status & SG_OVERUTILIZED) {
9259 		struct root_domain *rd = env->dst_rq->rd;
9260 
9261 		WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
9262 		trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
9263 	}
9264 }
9265 
adjust_numa_imbalance(int imbalance,int nr_running)9266 static inline long adjust_numa_imbalance(int imbalance, int nr_running)
9267 {
9268 	unsigned int imbalance_min;
9269 
9270 	/*
9271 	 * Allow a small imbalance based on a simple pair of communicating
9272 	 * tasks that remain local when the source domain is almost idle.
9273 	 */
9274 	imbalance_min = 2;
9275 	if (nr_running <= imbalance_min)
9276 		return 0;
9277 
9278 	return imbalance;
9279 }
9280 
9281 /**
9282  * calculate_imbalance - Calculate the amount of imbalance present within the
9283  *			 groups of a given sched_domain during load balance.
9284  * @env: load balance environment
9285  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
9286  */
calculate_imbalance(struct lb_env * env,struct sd_lb_stats * sds)9287 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
9288 {
9289 	struct sg_lb_stats *local, *busiest;
9290 
9291 	local = &sds->local_stat;
9292 	busiest = &sds->busiest_stat;
9293 
9294 	if (busiest->group_type == group_misfit_task) {
9295 		/* Set imbalance to allow misfit tasks to be balanced. */
9296 		env->migration_type = migrate_misfit;
9297 		env->imbalance = 1;
9298 		return;
9299 	}
9300 
9301 	if (busiest->group_type == group_asym_packing) {
9302 		/*
9303 		 * In case of asym capacity, we will try to migrate all load to
9304 		 * the preferred CPU.
9305 		 */
9306 		env->migration_type = migrate_task;
9307 		env->imbalance = busiest->sum_h_nr_running;
9308 		return;
9309 	}
9310 
9311 	if (busiest->group_type == group_imbalanced) {
9312 		/*
9313 		 * In the group_imb case we cannot rely on group-wide averages
9314 		 * to ensure CPU-load equilibrium, try to move any task to fix
9315 		 * the imbalance. The next load balance will take care of
9316 		 * balancing back the system.
9317 		 */
9318 		env->migration_type = migrate_task;
9319 		env->imbalance = 1;
9320 		return;
9321 	}
9322 
9323 	/*
9324 	 * Try to use spare capacity of local group without overloading it or
9325 	 * emptying busiest.
9326 	 */
9327 	if (local->group_type == group_has_spare) {
9328 		if ((busiest->group_type > group_fully_busy) &&
9329 		    !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
9330 			/*
9331 			 * If busiest is overloaded, try to fill spare
9332 			 * capacity. This might end up creating spare capacity
9333 			 * in busiest or busiest still being overloaded but
9334 			 * there is no simple way to directly compute the
9335 			 * amount of load to migrate in order to balance the
9336 			 * system.
9337 			 */
9338 			env->migration_type = migrate_util;
9339 			env->imbalance = max(local->group_capacity, local->group_util) -
9340 					 local->group_util;
9341 
9342 			/*
9343 			 * In some cases, the group's utilization is max or even
9344 			 * higher than capacity because of migrations but the
9345 			 * local CPU is (newly) idle. There is at least one
9346 			 * waiting task in this overloaded busiest group. Let's
9347 			 * try to pull it.
9348 			 */
9349 			if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
9350 				env->migration_type = migrate_task;
9351 				env->imbalance = 1;
9352 			}
9353 
9354 			return;
9355 		}
9356 
9357 		if (busiest->group_weight == 1 || sds->prefer_sibling) {
9358 			unsigned int nr_diff = busiest->sum_nr_running;
9359 			/*
9360 			 * When prefer sibling, evenly spread running tasks on
9361 			 * groups.
9362 			 */
9363 			env->migration_type = migrate_task;
9364 			lsub_positive(&nr_diff, local->sum_nr_running);
9365 			env->imbalance = nr_diff >> 1;
9366 		} else {
9367 
9368 			/*
9369 			 * If there is no overload, we just want to even the number of
9370 			 * idle cpus.
9371 			 */
9372 			env->migration_type = migrate_task;
9373 			env->imbalance = max_t(long, 0, (local->idle_cpus -
9374 						 busiest->idle_cpus) >> 1);
9375 		}
9376 
9377 		/* Consider allowing a small imbalance between NUMA groups */
9378 		if (env->sd->flags & SD_NUMA)
9379 			env->imbalance = adjust_numa_imbalance(env->imbalance,
9380 						busiest->sum_nr_running);
9381 
9382 		return;
9383 	}
9384 
9385 	/*
9386 	 * Local is fully busy but has to take more load to relieve the
9387 	 * busiest group
9388 	 */
9389 	if (local->group_type < group_overloaded) {
9390 		/*
9391 		 * Local will become overloaded so the avg_load metrics are
9392 		 * finally needed.
9393 		 */
9394 
9395 		local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
9396 				  local->group_capacity;
9397 
9398 		sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
9399 				sds->total_capacity;
9400 		/*
9401 		 * If the local group is more loaded than the selected
9402 		 * busiest group don't try to pull any tasks.
9403 		 */
9404 		if (local->avg_load >= busiest->avg_load) {
9405 			env->imbalance = 0;
9406 			return;
9407 		}
9408 	}
9409 
9410 	/*
9411 	 * Both group are or will become overloaded and we're trying to get all
9412 	 * the CPUs to the average_load, so we don't want to push ourselves
9413 	 * above the average load, nor do we wish to reduce the max loaded CPU
9414 	 * below the average load. At the same time, we also don't want to
9415 	 * reduce the group load below the group capacity. Thus we look for
9416 	 * the minimum possible imbalance.
9417 	 */
9418 	env->migration_type = migrate_load;
9419 	env->imbalance = min(
9420 		(busiest->avg_load - sds->avg_load) * busiest->group_capacity,
9421 		(sds->avg_load - local->avg_load) * local->group_capacity
9422 	) / SCHED_CAPACITY_SCALE;
9423 }
9424 
9425 /******* find_busiest_group() helpers end here *********************/
9426 
9427 /*
9428  * Decision matrix according to the local and busiest group type:
9429  *
9430  * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
9431  * has_spare        nr_idle   balanced   N/A    N/A  balanced   balanced
9432  * fully_busy       nr_idle   nr_idle    N/A    N/A  balanced   balanced
9433  * misfit_task      force     N/A        N/A    N/A  force      force
9434  * asym_packing     force     force      N/A    N/A  force      force
9435  * imbalanced       force     force      N/A    N/A  force      force
9436  * overloaded       force     force      N/A    N/A  force      avg_load
9437  *
9438  * N/A :      Not Applicable because already filtered while updating
9439  *            statistics.
9440  * balanced : The system is balanced for these 2 groups.
9441  * force :    Calculate the imbalance as load migration is probably needed.
9442  * avg_load : Only if imbalance is significant enough.
9443  * nr_idle :  dst_cpu is not busy and the number of idle CPUs is quite
9444  *            different in groups.
9445  */
9446 
9447 /**
9448  * find_busiest_group - Returns the busiest group within the sched_domain
9449  * if there is an imbalance.
9450  *
9451  * Also calculates the amount of runnable load which should be moved
9452  * to restore balance.
9453  *
9454  * @env: The load balancing environment.
9455  *
9456  * Return:	- The busiest group if imbalance exists.
9457  */
find_busiest_group(struct lb_env * env)9458 static struct sched_group *find_busiest_group(struct lb_env *env)
9459 {
9460 	struct sg_lb_stats *local, *busiest;
9461 	struct sd_lb_stats sds;
9462 
9463 	init_sd_lb_stats(&sds);
9464 
9465 	/*
9466 	 * Compute the various statistics relevant for load balancing at
9467 	 * this level.
9468 	 */
9469 	update_sd_lb_stats(env, &sds);
9470 
9471 	if (sched_energy_enabled()) {
9472 		struct root_domain *rd = env->dst_rq->rd;
9473 		int out_balance = 1;
9474 
9475 		trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
9476 					&out_balance);
9477 		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
9478 					&& out_balance)
9479 			goto out_balanced;
9480 	}
9481 
9482 	local = &sds.local_stat;
9483 	busiest = &sds.busiest_stat;
9484 
9485 	/* There is no busy sibling group to pull tasks from */
9486 	if (!sds.busiest)
9487 		goto out_balanced;
9488 
9489 	/* Misfit tasks should be dealt with regardless of the avg load */
9490 	if (busiest->group_type == group_misfit_task)
9491 		goto force_balance;
9492 
9493 	/* ASYM feature bypasses nice load balance check */
9494 	if (busiest->group_type == group_asym_packing)
9495 		goto force_balance;
9496 
9497 	/*
9498 	 * If the busiest group is imbalanced the below checks don't
9499 	 * work because they assume all things are equal, which typically
9500 	 * isn't true due to cpus_ptr constraints and the like.
9501 	 */
9502 	if (busiest->group_type == group_imbalanced)
9503 		goto force_balance;
9504 
9505 	/*
9506 	 * If the local group is busier than the selected busiest group
9507 	 * don't try and pull any tasks.
9508 	 */
9509 	if (local->group_type > busiest->group_type)
9510 		goto out_balanced;
9511 
9512 	/*
9513 	 * When groups are overloaded, use the avg_load to ensure fairness
9514 	 * between tasks.
9515 	 */
9516 	if (local->group_type == group_overloaded) {
9517 		/*
9518 		 * If the local group is more loaded than the selected
9519 		 * busiest group don't try to pull any tasks.
9520 		 */
9521 		if (local->avg_load >= busiest->avg_load)
9522 			goto out_balanced;
9523 
9524 		/* XXX broken for overlapping NUMA groups */
9525 		sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
9526 				sds.total_capacity;
9527 
9528 		/*
9529 		 * Don't pull any tasks if this group is already above the
9530 		 * domain average load.
9531 		 */
9532 		if (local->avg_load >= sds.avg_load)
9533 			goto out_balanced;
9534 
9535 		/*
9536 		 * If the busiest group is more loaded, use imbalance_pct to be
9537 		 * conservative.
9538 		 */
9539 		if (100 * busiest->avg_load <=
9540 				env->sd->imbalance_pct * local->avg_load)
9541 			goto out_balanced;
9542 	}
9543 
9544 	/* Try to move all excess tasks to child's sibling domain */
9545 	if (sds.prefer_sibling && local->group_type == group_has_spare &&
9546 	    busiest->sum_nr_running > local->sum_nr_running + 1)
9547 		goto force_balance;
9548 
9549 	if (busiest->group_type != group_overloaded) {
9550 		if (env->idle == CPU_NOT_IDLE)
9551 			/*
9552 			 * If the busiest group is not overloaded (and as a
9553 			 * result the local one too) but this CPU is already
9554 			 * busy, let another idle CPU try to pull task.
9555 			 */
9556 			goto out_balanced;
9557 
9558 		if (busiest->group_weight > 1 &&
9559 		    local->idle_cpus <= (busiest->idle_cpus + 1))
9560 			/*
9561 			 * If the busiest group is not overloaded
9562 			 * and there is no imbalance between this and busiest
9563 			 * group wrt idle CPUs, it is balanced. The imbalance
9564 			 * becomes significant if the diff is greater than 1
9565 			 * otherwise we might end up to just move the imbalance
9566 			 * on another group. Of course this applies only if
9567 			 * there is more than 1 CPU per group.
9568 			 */
9569 			goto out_balanced;
9570 
9571 		if (busiest->sum_h_nr_running == 1)
9572 			/*
9573 			 * busiest doesn't have any tasks waiting to run
9574 			 */
9575 			goto out_balanced;
9576 	}
9577 
9578 force_balance:
9579 	/* Looks like there is an imbalance. Compute it */
9580 	calculate_imbalance(env, &sds);
9581 	return env->imbalance ? sds.busiest : NULL;
9582 
9583 out_balanced:
9584 	env->imbalance = 0;
9585 	return NULL;
9586 }
9587 
9588 /*
9589  * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
9590  */
find_busiest_queue(struct lb_env * env,struct sched_group * group)9591 static struct rq *find_busiest_queue(struct lb_env *env,
9592 				     struct sched_group *group)
9593 {
9594 	struct rq *busiest = NULL, *rq;
9595 	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
9596 	unsigned int busiest_nr = 0;
9597 	int i, done = 0;
9598 
9599 	trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
9600 					     &busiest, &done);
9601 	if (done)
9602 		return busiest;
9603 
9604 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
9605 		unsigned long capacity, load, util;
9606 		unsigned int nr_running;
9607 		enum fbq_type rt;
9608 
9609 		rq = cpu_rq(i);
9610 		rt = fbq_classify_rq(rq);
9611 
9612 		/*
9613 		 * We classify groups/runqueues into three groups:
9614 		 *  - regular: there are !numa tasks
9615 		 *  - remote:  there are numa tasks that run on the 'wrong' node
9616 		 *  - all:     there is no distinction
9617 		 *
9618 		 * In order to avoid migrating ideally placed numa tasks,
9619 		 * ignore those when there's better options.
9620 		 *
9621 		 * If we ignore the actual busiest queue to migrate another
9622 		 * task, the next balance pass can still reduce the busiest
9623 		 * queue by moving tasks around inside the node.
9624 		 *
9625 		 * If we cannot move enough load due to this classification
9626 		 * the next pass will adjust the group classification and
9627 		 * allow migration of more tasks.
9628 		 *
9629 		 * Both cases only affect the total convergence complexity.
9630 		 */
9631 		if (rt > env->fbq_type)
9632 			continue;
9633 
9634 		capacity = capacity_of(i);
9635 		nr_running = rq->cfs.h_nr_running;
9636 
9637 		/*
9638 		 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
9639 		 * eventually lead to active_balancing high->low capacity.
9640 		 * Higher per-CPU capacity is considered better than balancing
9641 		 * average load.
9642 		 */
9643 		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
9644 		    capacity_of(env->dst_cpu) < capacity &&
9645 		    nr_running == 1)
9646 			continue;
9647 
9648 		switch (env->migration_type) {
9649 		case migrate_load:
9650 			/*
9651 			 * When comparing with load imbalance, use cpu_load()
9652 			 * which is not scaled with the CPU capacity.
9653 			 */
9654 			load = cpu_load(rq);
9655 
9656 			if (nr_running == 1 && load > env->imbalance &&
9657 			    !check_cpu_capacity(rq, env->sd))
9658 				break;
9659 
9660 			/*
9661 			 * For the load comparisons with the other CPUs,
9662 			 * consider the cpu_load() scaled with the CPU
9663 			 * capacity, so that the load can be moved away
9664 			 * from the CPU that is potentially running at a
9665 			 * lower capacity.
9666 			 *
9667 			 * Thus we're looking for max(load_i / capacity_i),
9668 			 * crosswise multiplication to rid ourselves of the
9669 			 * division works out to:
9670 			 * load_i * capacity_j > load_j * capacity_i;
9671 			 * where j is our previous maximum.
9672 			 */
9673 			if (load * busiest_capacity > busiest_load * capacity) {
9674 				busiest_load = load;
9675 				busiest_capacity = capacity;
9676 				busiest = rq;
9677 			}
9678 			break;
9679 
9680 		case migrate_util:
9681 			util = cpu_util(cpu_of(rq));
9682 
9683 			/*
9684 			 * Don't try to pull utilization from a CPU with one
9685 			 * running task. Whatever its utilization, we will fail
9686 			 * detach the task.
9687 			 */
9688 			if (nr_running <= 1)
9689 				continue;
9690 
9691 			if (busiest_util < util) {
9692 				busiest_util = util;
9693 				busiest = rq;
9694 			}
9695 			break;
9696 
9697 		case migrate_task:
9698 			if (busiest_nr < nr_running) {
9699 				busiest_nr = nr_running;
9700 				busiest = rq;
9701 			}
9702 			break;
9703 
9704 		case migrate_misfit:
9705 			/*
9706 			 * For ASYM_CPUCAPACITY domains with misfit tasks we
9707 			 * simply seek the "biggest" misfit task.
9708 			 */
9709 			if (rq->misfit_task_load > busiest_load) {
9710 				busiest_load = rq->misfit_task_load;
9711 				busiest = rq;
9712 			}
9713 
9714 			break;
9715 
9716 		}
9717 	}
9718 
9719 	return busiest;
9720 }
9721 
9722 /*
9723  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
9724  * so long as it is large enough.
9725  */
9726 #define MAX_PINNED_INTERVAL	512
9727 
9728 static inline bool
asym_active_balance(struct lb_env * env)9729 asym_active_balance(struct lb_env *env)
9730 {
9731 	/*
9732 	 * ASYM_PACKING needs to force migrate tasks from busy but
9733 	 * lower priority CPUs in order to pack all tasks in the
9734 	 * highest priority CPUs.
9735 	 */
9736 	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
9737 	       sched_asym_prefer(env->dst_cpu, env->src_cpu);
9738 }
9739 
9740 static inline bool
voluntary_active_balance(struct lb_env * env)9741 voluntary_active_balance(struct lb_env *env)
9742 {
9743 	struct sched_domain *sd = env->sd;
9744 
9745 	if (asym_active_balance(env))
9746 		return 1;
9747 
9748 	/*
9749 	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
9750 	 * It's worth migrating the task if the src_cpu's capacity is reduced
9751 	 * because of other sched_class or IRQs if more capacity stays
9752 	 * available on dst_cpu.
9753 	 */
9754 	if ((env->idle != CPU_NOT_IDLE) &&
9755 	    (env->src_rq->cfs.h_nr_running == 1)) {
9756 		if ((check_cpu_capacity(env->src_rq, sd)) &&
9757 		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
9758 			return 1;
9759 	}
9760 
9761 	if (env->migration_type == migrate_misfit)
9762 		return 1;
9763 
9764 	return 0;
9765 }
9766 
need_active_balance(struct lb_env * env)9767 static int need_active_balance(struct lb_env *env)
9768 {
9769 	struct sched_domain *sd = env->sd;
9770 
9771 	if (voluntary_active_balance(env))
9772 		return 1;
9773 
9774 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
9775 }
9776 
9777 static int active_load_balance_cpu_stop(void *data);
9778 
should_we_balance(struct lb_env * env)9779 static int should_we_balance(struct lb_env *env)
9780 {
9781 	struct sched_group *sg = env->sd->groups;
9782 	int cpu;
9783 
9784 	if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE)) {
9785 		struct root_domain *rd = env->dst_rq->rd;
9786 		struct cpumask *cpul_mask = rockchip_perf_get_cpul_mask();
9787 		int level = rockchip_perf_get_level();
9788 
9789 		if ((level == ROCKCHIP_PERFORMANCE_HIGH) && !READ_ONCE(rd->overutilized) &&
9790 		    cpul_mask && cpumask_test_cpu(env->dst_cpu, cpul_mask))
9791 			return 0;
9792 	}
9793 
9794 	/*
9795 	 * Ensure the balancing environment is consistent; can happen
9796 	 * when the softirq triggers 'during' hotplug.
9797 	 */
9798 	if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
9799 		return 0;
9800 
9801 	/*
9802 	 * In the newly idle case, we will allow all the CPUs
9803 	 * to do the newly idle load balance.
9804 	 */
9805 	if (env->idle == CPU_NEWLY_IDLE)
9806 		return 1;
9807 
9808 	/* Try to find first idle CPU */
9809 	for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
9810 		if (!idle_cpu(cpu))
9811 			continue;
9812 
9813 		/* Are we the first idle CPU? */
9814 		return cpu == env->dst_cpu;
9815 	}
9816 
9817 	/* Are we the first CPU of this group ? */
9818 	return group_balance_cpu(sg) == env->dst_cpu;
9819 }
9820 
9821 /*
9822  * Check this_cpu to ensure it is balanced within domain. Attempt to move
9823  * tasks if there is an imbalance.
9824  */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * continue_balancing)9825 static int load_balance(int this_cpu, struct rq *this_rq,
9826 			struct sched_domain *sd, enum cpu_idle_type idle,
9827 			int *continue_balancing)
9828 {
9829 	int ld_moved, cur_ld_moved, active_balance = 0;
9830 	struct sched_domain *sd_parent = sd->parent;
9831 	struct sched_group *group;
9832 	struct rq *busiest;
9833 	struct rq_flags rf;
9834 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
9835 
9836 	struct lb_env env = {
9837 		.sd		= sd,
9838 		.dst_cpu	= this_cpu,
9839 		.dst_rq		= this_rq,
9840 		.dst_grpmask    = sched_group_span(sd->groups),
9841 		.idle		= idle,
9842 		.loop_break	= sched_nr_migrate_break,
9843 		.cpus		= cpus,
9844 		.fbq_type	= all,
9845 		.tasks		= LIST_HEAD_INIT(env.tasks),
9846 	};
9847 
9848 	cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
9849 
9850 	schedstat_inc(sd->lb_count[idle]);
9851 
9852 redo:
9853 	if (!should_we_balance(&env)) {
9854 		*continue_balancing = 0;
9855 		goto out_balanced;
9856 	}
9857 
9858 	group = find_busiest_group(&env);
9859 	if (!group) {
9860 		schedstat_inc(sd->lb_nobusyg[idle]);
9861 		goto out_balanced;
9862 	}
9863 
9864 	busiest = find_busiest_queue(&env, group);
9865 	if (!busiest) {
9866 		schedstat_inc(sd->lb_nobusyq[idle]);
9867 		goto out_balanced;
9868 	}
9869 
9870 	BUG_ON(busiest == env.dst_rq);
9871 
9872 	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
9873 
9874 	env.src_cpu = busiest->cpu;
9875 	env.src_rq = busiest;
9876 
9877 	ld_moved = 0;
9878 	if (busiest->nr_running > 1) {
9879 		/*
9880 		 * Attempt to move tasks. If find_busiest_group has found
9881 		 * an imbalance but busiest->nr_running <= 1, the group is
9882 		 * still unbalanced. ld_moved simply stays zero, so it is
9883 		 * correctly treated as an imbalance.
9884 		 */
9885 		env.flags |= LBF_ALL_PINNED;
9886 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
9887 
9888 more_balance:
9889 		rq_lock_irqsave(busiest, &rf);
9890 		env.src_rq_rf = &rf;
9891 		update_rq_clock(busiest);
9892 
9893 		/*
9894 		 * cur_ld_moved - load moved in current iteration
9895 		 * ld_moved     - cumulative load moved across iterations
9896 		 */
9897 		cur_ld_moved = detach_tasks(&env);
9898 
9899 		/*
9900 		 * We've detached some tasks from busiest_rq. Every
9901 		 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
9902 		 * unlock busiest->lock, and we are able to be sure
9903 		 * that nobody can manipulate the tasks in parallel.
9904 		 * See task_rq_lock() family for the details.
9905 		 */
9906 
9907 		rq_unlock(busiest, &rf);
9908 
9909 		if (cur_ld_moved) {
9910 			attach_tasks(&env);
9911 			ld_moved += cur_ld_moved;
9912 		}
9913 
9914 		local_irq_restore(rf.flags);
9915 
9916 		if (env.flags & LBF_NEED_BREAK) {
9917 			env.flags &= ~LBF_NEED_BREAK;
9918 			goto more_balance;
9919 		}
9920 
9921 		/*
9922 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
9923 		 * us and move them to an alternate dst_cpu in our sched_group
9924 		 * where they can run. The upper limit on how many times we
9925 		 * iterate on same src_cpu is dependent on number of CPUs in our
9926 		 * sched_group.
9927 		 *
9928 		 * This changes load balance semantics a bit on who can move
9929 		 * load to a given_cpu. In addition to the given_cpu itself
9930 		 * (or a ilb_cpu acting on its behalf where given_cpu is
9931 		 * nohz-idle), we now have balance_cpu in a position to move
9932 		 * load to given_cpu. In rare situations, this may cause
9933 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
9934 		 * _independently_ and at _same_ time to move some load to
9935 		 * given_cpu) causing exceess load to be moved to given_cpu.
9936 		 * This however should not happen so much in practice and
9937 		 * moreover subsequent load balance cycles should correct the
9938 		 * excess load moved.
9939 		 */
9940 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
9941 
9942 			/* Prevent to re-select dst_cpu via env's CPUs */
9943 			__cpumask_clear_cpu(env.dst_cpu, env.cpus);
9944 
9945 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
9946 			env.dst_cpu	 = env.new_dst_cpu;
9947 			env.flags	&= ~LBF_DST_PINNED;
9948 			env.loop	 = 0;
9949 			env.loop_break	 = sched_nr_migrate_break;
9950 
9951 			/*
9952 			 * Go back to "more_balance" rather than "redo" since we
9953 			 * need to continue with same src_cpu.
9954 			 */
9955 			goto more_balance;
9956 		}
9957 
9958 		/*
9959 		 * We failed to reach balance because of affinity.
9960 		 */
9961 		if (sd_parent) {
9962 			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
9963 
9964 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
9965 				*group_imbalance = 1;
9966 		}
9967 
9968 		/* All tasks on this runqueue were pinned by CPU affinity */
9969 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
9970 			__cpumask_clear_cpu(cpu_of(busiest), cpus);
9971 			/*
9972 			 * Attempting to continue load balancing at the current
9973 			 * sched_domain level only makes sense if there are
9974 			 * active CPUs remaining as possible busiest CPUs to
9975 			 * pull load from which are not contained within the
9976 			 * destination group that is receiving any migrated
9977 			 * load.
9978 			 */
9979 			if (!cpumask_subset(cpus, env.dst_grpmask)) {
9980 				env.loop = 0;
9981 				env.loop_break = sched_nr_migrate_break;
9982 				goto redo;
9983 			}
9984 			goto out_all_pinned;
9985 		}
9986 	}
9987 
9988 	if (!ld_moved) {
9989 		schedstat_inc(sd->lb_failed[idle]);
9990 		/*
9991 		 * Increment the failure counter only on periodic balance.
9992 		 * We do not want newidle balance, which can be very
9993 		 * frequent, pollute the failure counter causing
9994 		 * excessive cache_hot migrations and active balances.
9995 		 */
9996 		if (idle != CPU_NEWLY_IDLE)
9997 			sd->nr_balance_failed++;
9998 
9999 		if (need_active_balance(&env)) {
10000 			unsigned long flags;
10001 
10002 			raw_spin_lock_irqsave(&busiest->lock, flags);
10003 
10004 			/*
10005 			 * Don't kick the active_load_balance_cpu_stop,
10006 			 * if the curr task on busiest CPU can't be
10007 			 * moved to this_cpu:
10008 			 */
10009 			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
10010 				raw_spin_unlock_irqrestore(&busiest->lock,
10011 							    flags);
10012 				env.flags |= LBF_ALL_PINNED;
10013 				goto out_one_pinned;
10014 			}
10015 
10016 			/*
10017 			 * ->active_balance synchronizes accesses to
10018 			 * ->active_balance_work.  Once set, it's cleared
10019 			 * only after active load balance is finished.
10020 			 */
10021 			if (!busiest->active_balance) {
10022 				busiest->active_balance = 1;
10023 				busiest->push_cpu = this_cpu;
10024 				active_balance = 1;
10025 			}
10026 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
10027 
10028 			if (active_balance) {
10029 				stop_one_cpu_nowait(cpu_of(busiest),
10030 					active_load_balance_cpu_stop, busiest,
10031 					&busiest->active_balance_work);
10032 			}
10033 
10034 			/* We've kicked active balancing, force task migration. */
10035 			sd->nr_balance_failed = sd->cache_nice_tries+1;
10036 		}
10037 	} else
10038 		sd->nr_balance_failed = 0;
10039 
10040 	if (likely(!active_balance) || voluntary_active_balance(&env)) {
10041 		/* We were unbalanced, so reset the balancing interval */
10042 		sd->balance_interval = sd->min_interval;
10043 	} else {
10044 		/*
10045 		 * If we've begun active balancing, start to back off. This
10046 		 * case may not be covered by the all_pinned logic if there
10047 		 * is only 1 task on the busy runqueue (because we don't call
10048 		 * detach_tasks).
10049 		 */
10050 		if (sd->balance_interval < sd->max_interval)
10051 			sd->balance_interval *= 2;
10052 	}
10053 
10054 	goto out;
10055 
10056 out_balanced:
10057 	/*
10058 	 * We reach balance although we may have faced some affinity
10059 	 * constraints. Clear the imbalance flag only if other tasks got
10060 	 * a chance to move and fix the imbalance.
10061 	 */
10062 	if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10063 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10064 
10065 		if (*group_imbalance)
10066 			*group_imbalance = 0;
10067 	}
10068 
10069 out_all_pinned:
10070 	/*
10071 	 * We reach balance because all tasks are pinned at this level so
10072 	 * we can't migrate them. Let the imbalance flag set so parent level
10073 	 * can try to migrate them.
10074 	 */
10075 	schedstat_inc(sd->lb_balanced[idle]);
10076 
10077 	sd->nr_balance_failed = 0;
10078 
10079 out_one_pinned:
10080 	ld_moved = 0;
10081 
10082 	/*
10083 	 * newidle_balance() disregards balance intervals, so we could
10084 	 * repeatedly reach this code, which would lead to balance_interval
10085 	 * skyrocketting in a short amount of time. Skip the balance_interval
10086 	 * increase logic to avoid that.
10087 	 */
10088 	if (env.idle == CPU_NEWLY_IDLE)
10089 		goto out;
10090 
10091 	/* tune up the balancing interval */
10092 	if ((env.flags & LBF_ALL_PINNED &&
10093 	     sd->balance_interval < MAX_PINNED_INTERVAL) ||
10094 	    sd->balance_interval < sd->max_interval)
10095 		sd->balance_interval *= 2;
10096 out:
10097 	return ld_moved;
10098 }
10099 
10100 static inline unsigned long
get_sd_balance_interval(struct sched_domain * sd,int cpu_busy)10101 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
10102 {
10103 	unsigned long interval = sd->balance_interval;
10104 
10105 	if (cpu_busy)
10106 		interval *= sd->busy_factor;
10107 
10108 	/* scale ms to jiffies */
10109 	interval = msecs_to_jiffies(interval);
10110 
10111 	/*
10112 	 * Reduce likelihood of busy balancing at higher domains racing with
10113 	 * balancing at lower domains by preventing their balancing periods
10114 	 * from being multiples of each other.
10115 	 */
10116 	if (cpu_busy)
10117 		interval -= 1;
10118 
10119 	interval = clamp(interval, 1UL, max_load_balance_interval);
10120 
10121 	return interval;
10122 }
10123 
10124 static inline void
update_next_balance(struct sched_domain * sd,unsigned long * next_balance)10125 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
10126 {
10127 	unsigned long interval, next;
10128 
10129 	/* used by idle balance, so cpu_busy = 0 */
10130 	interval = get_sd_balance_interval(sd, 0);
10131 	next = sd->last_balance + interval;
10132 
10133 	if (time_after(*next_balance, next))
10134 		*next_balance = next;
10135 }
10136 
10137 /*
10138  * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
10139  * running tasks off the busiest CPU onto idle CPUs. It requires at
10140  * least 1 task to be running on each physical CPU where possible, and
10141  * avoids physical / logical imbalances.
10142  */
active_load_balance_cpu_stop(void * data)10143 static int active_load_balance_cpu_stop(void *data)
10144 {
10145 	struct rq *busiest_rq = data;
10146 	int busiest_cpu = cpu_of(busiest_rq);
10147 	int target_cpu = busiest_rq->push_cpu;
10148 	struct rq *target_rq = cpu_rq(target_cpu);
10149 	struct sched_domain *sd;
10150 	struct task_struct *p = NULL;
10151 	struct rq_flags rf;
10152 
10153 	rq_lock_irq(busiest_rq, &rf);
10154 	/*
10155 	 * Between queueing the stop-work and running it is a hole in which
10156 	 * CPUs can become inactive. We should not move tasks from or to
10157 	 * inactive CPUs.
10158 	 */
10159 	if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
10160 		goto out_unlock;
10161 
10162 	/* Make sure the requested CPU hasn't gone down in the meantime: */
10163 	if (unlikely(busiest_cpu != smp_processor_id() ||
10164 		     !busiest_rq->active_balance))
10165 		goto out_unlock;
10166 
10167 	/* Is there any task to move? */
10168 	if (busiest_rq->nr_running <= 1)
10169 		goto out_unlock;
10170 
10171 	/*
10172 	 * This condition is "impossible", if it occurs
10173 	 * we need to fix it. Originally reported by
10174 	 * Bjorn Helgaas on a 128-CPU setup.
10175 	 */
10176 	BUG_ON(busiest_rq == target_rq);
10177 
10178 	/* Search for an sd spanning us and the target CPU. */
10179 	rcu_read_lock();
10180 	for_each_domain(target_cpu, sd) {
10181 		if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
10182 			break;
10183 	}
10184 
10185 	if (likely(sd)) {
10186 		struct lb_env env = {
10187 			.sd		= sd,
10188 			.dst_cpu	= target_cpu,
10189 			.dst_rq		= target_rq,
10190 			.src_cpu	= busiest_rq->cpu,
10191 			.src_rq		= busiest_rq,
10192 			.idle		= CPU_IDLE,
10193 			/*
10194 			 * can_migrate_task() doesn't need to compute new_dst_cpu
10195 			 * for active balancing. Since we have CPU_IDLE, but no
10196 			 * @dst_grpmask we need to make that test go away with lying
10197 			 * about DST_PINNED.
10198 			 */
10199 			.flags		= LBF_DST_PINNED,
10200 			.src_rq_rf	= &rf,
10201 		};
10202 
10203 		schedstat_inc(sd->alb_count);
10204 		update_rq_clock(busiest_rq);
10205 
10206 		p = detach_one_task(&env);
10207 		if (p) {
10208 			schedstat_inc(sd->alb_pushed);
10209 			/* Active balancing done, reset the failure counter. */
10210 			sd->nr_balance_failed = 0;
10211 		} else {
10212 			schedstat_inc(sd->alb_failed);
10213 		}
10214 	}
10215 	rcu_read_unlock();
10216 out_unlock:
10217 	busiest_rq->active_balance = 0;
10218 	rq_unlock(busiest_rq, &rf);
10219 
10220 	if (p)
10221 		attach_one_task(target_rq, p);
10222 
10223 	local_irq_enable();
10224 
10225 	return 0;
10226 }
10227 
10228 static DEFINE_SPINLOCK(balancing);
10229 
10230 /*
10231  * Scale the max load_balance interval with the number of CPUs in the system.
10232  * This trades load-balance latency on larger machines for less cross talk.
10233  */
update_max_interval(void)10234 void update_max_interval(void)
10235 {
10236 	max_load_balance_interval = HZ*num_active_cpus()/10;
10237 }
10238 
10239 /*
10240  * It checks each scheduling domain to see if it is due to be balanced,
10241  * and initiates a balancing operation if so.
10242  *
10243  * Balancing parameters are set up in init_sched_domains.
10244  */
rebalance_domains(struct rq * rq,enum cpu_idle_type idle)10245 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10246 {
10247 	int continue_balancing = 1;
10248 	int cpu = rq->cpu;
10249 	int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10250 	unsigned long interval;
10251 	struct sched_domain *sd;
10252 	/* Earliest time when we have to do rebalance again */
10253 	unsigned long next_balance = jiffies + 60*HZ;
10254 	int update_next_balance = 0;
10255 	int need_serialize, need_decay = 0;
10256 	u64 max_cost = 0;
10257 
10258 	trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing);
10259 	if (!continue_balancing)
10260 		return;
10261 
10262 	rcu_read_lock();
10263 	for_each_domain(cpu, sd) {
10264 		/*
10265 		 * Decay the newidle max times here because this is a regular
10266 		 * visit to all the domains. Decay ~1% per second.
10267 		 */
10268 		if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
10269 			sd->max_newidle_lb_cost =
10270 				(sd->max_newidle_lb_cost * 253) / 256;
10271 			sd->next_decay_max_lb_cost = jiffies + HZ;
10272 			need_decay = 1;
10273 		}
10274 		max_cost += sd->max_newidle_lb_cost;
10275 
10276 		/*
10277 		 * Stop the load balance at this level. There is another
10278 		 * CPU in our sched group which is doing load balancing more
10279 		 * actively.
10280 		 */
10281 		if (!continue_balancing) {
10282 			if (need_decay)
10283 				continue;
10284 			break;
10285 		}
10286 
10287 		interval = get_sd_balance_interval(sd, busy);
10288 
10289 		need_serialize = sd->flags & SD_SERIALIZE;
10290 		if (need_serialize) {
10291 			if (!spin_trylock(&balancing))
10292 				goto out;
10293 		}
10294 
10295 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
10296 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10297 				/*
10298 				 * The LBF_DST_PINNED logic could have changed
10299 				 * env->dst_cpu, so we can't know our idle
10300 				 * state even if we migrated tasks. Update it.
10301 				 */
10302 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10303 				busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10304 			}
10305 			sd->last_balance = jiffies;
10306 			interval = get_sd_balance_interval(sd, busy);
10307 		}
10308 		if (need_serialize)
10309 			spin_unlock(&balancing);
10310 out:
10311 		if (time_after(next_balance, sd->last_balance + interval)) {
10312 			next_balance = sd->last_balance + interval;
10313 			update_next_balance = 1;
10314 		}
10315 	}
10316 	if (need_decay) {
10317 		/*
10318 		 * Ensure the rq-wide value also decays but keep it at a
10319 		 * reasonable floor to avoid funnies with rq->avg_idle.
10320 		 */
10321 		rq->max_idle_balance_cost =
10322 			max((u64)sysctl_sched_migration_cost, max_cost);
10323 	}
10324 	rcu_read_unlock();
10325 
10326 	/*
10327 	 * next_balance will be updated only when there is a need.
10328 	 * When the cpu is attached to null domain for ex, it will not be
10329 	 * updated.
10330 	 */
10331 	if (likely(update_next_balance)) {
10332 		rq->next_balance = next_balance;
10333 
10334 #ifdef CONFIG_NO_HZ_COMMON
10335 		/*
10336 		 * If this CPU has been elected to perform the nohz idle
10337 		 * balance. Other idle CPUs have already rebalanced with
10338 		 * nohz_idle_balance() and nohz.next_balance has been
10339 		 * updated accordingly. This CPU is now running the idle load
10340 		 * balance for itself and we need to update the
10341 		 * nohz.next_balance accordingly.
10342 		 */
10343 		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
10344 			nohz.next_balance = rq->next_balance;
10345 #endif
10346 	}
10347 }
10348 
on_null_domain(struct rq * rq)10349 static inline int on_null_domain(struct rq *rq)
10350 {
10351 	return unlikely(!rcu_dereference_sched(rq->sd));
10352 }
10353 
10354 #ifdef CONFIG_NO_HZ_COMMON
10355 /*
10356  * idle load balancing details
10357  * - When one of the busy CPUs notice that there may be an idle rebalancing
10358  *   needed, they will kick the idle load balancer, which then does idle
10359  *   load balancing for all the idle CPUs.
10360  * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
10361  *   anywhere yet.
10362  */
10363 
find_new_ilb(void)10364 static inline int find_new_ilb(void)
10365 {
10366 	int ilb = -1;
10367 
10368 	trace_android_rvh_find_new_ilb(nohz.idle_cpus_mask, &ilb);
10369 	if (ilb >= 0)
10370 		return ilb;
10371 
10372 	for_each_cpu_and(ilb, nohz.idle_cpus_mask,
10373 			      housekeeping_cpumask(HK_FLAG_MISC)) {
10374 		if (idle_cpu(ilb))
10375 			return ilb;
10376 	}
10377 
10378 	return nr_cpu_ids;
10379 }
10380 
10381 /*
10382  * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
10383  * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
10384  */
kick_ilb(unsigned int flags)10385 static void kick_ilb(unsigned int flags)
10386 {
10387 	int ilb_cpu;
10388 
10389 	/*
10390 	 * Increase nohz.next_balance only when if full ilb is triggered but
10391 	 * not if we only update stats.
10392 	 */
10393 	if (flags & NOHZ_BALANCE_KICK)
10394 		nohz.next_balance = jiffies+1;
10395 
10396 	ilb_cpu = find_new_ilb();
10397 
10398 	if (ilb_cpu >= nr_cpu_ids)
10399 		return;
10400 
10401 	/*
10402 	 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
10403 	 * the first flag owns it; cleared by nohz_csd_func().
10404 	 */
10405 	flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
10406 	if (flags & NOHZ_KICK_MASK)
10407 		return;
10408 
10409 	/*
10410 	 * This way we generate an IPI on the target CPU which
10411 	 * is idle. And the softirq performing nohz idle load balance
10412 	 * will be run before returning from the IPI.
10413 	 */
10414 	smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
10415 }
10416 
10417 /*
10418  * Current decision point for kicking the idle load balancer in the presence
10419  * of idle CPUs in the system.
10420  */
nohz_balancer_kick(struct rq * rq)10421 static void nohz_balancer_kick(struct rq *rq)
10422 {
10423 	unsigned long now = jiffies;
10424 	struct sched_domain_shared *sds;
10425 	struct sched_domain *sd;
10426 	int nr_busy, i, cpu = rq->cpu;
10427 	unsigned int flags = 0;
10428 	int done = 0;
10429 
10430 	if (unlikely(rq->idle_balance))
10431 		return;
10432 
10433 	/*
10434 	 * We may be recently in ticked or tickless idle mode. At the first
10435 	 * busy tick after returning from idle, we will update the busy stats.
10436 	 */
10437 	nohz_balance_exit_idle(rq);
10438 
10439 	/*
10440 	 * None are in tickless mode and hence no need for NOHZ idle load
10441 	 * balancing.
10442 	 */
10443 	if (likely(!atomic_read(&nohz.nr_cpus)))
10444 		return;
10445 
10446 	if (READ_ONCE(nohz.has_blocked) &&
10447 	    time_after(now, READ_ONCE(nohz.next_blocked)))
10448 		flags = NOHZ_STATS_KICK;
10449 
10450 	if (time_before(now, nohz.next_balance))
10451 		goto out;
10452 
10453 	trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
10454 	if (done)
10455 		goto out;
10456 
10457 	if (rq->nr_running >= 2) {
10458 		flags = NOHZ_KICK_MASK;
10459 		goto out;
10460 	}
10461 
10462 	rcu_read_lock();
10463 
10464 	sd = rcu_dereference(rq->sd);
10465 	if (sd) {
10466 		/*
10467 		 * If there's a CFS task and the current CPU has reduced
10468 		 * capacity; kick the ILB to see if there's a better CPU to run
10469 		 * on.
10470 		 */
10471 		if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
10472 			flags = NOHZ_KICK_MASK;
10473 			goto unlock;
10474 		}
10475 	}
10476 
10477 	sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
10478 	if (sd) {
10479 		/*
10480 		 * When ASYM_PACKING; see if there's a more preferred CPU
10481 		 * currently idle; in which case, kick the ILB to move tasks
10482 		 * around.
10483 		 */
10484 		for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
10485 			if (sched_asym_prefer(i, cpu)) {
10486 				flags = NOHZ_KICK_MASK;
10487 				goto unlock;
10488 			}
10489 		}
10490 	}
10491 
10492 	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
10493 	if (sd) {
10494 		/*
10495 		 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
10496 		 * to run the misfit task on.
10497 		 */
10498 		if (check_misfit_status(rq, sd)) {
10499 			flags = NOHZ_KICK_MASK;
10500 			goto unlock;
10501 		}
10502 
10503 		/*
10504 		 * For asymmetric systems, we do not want to nicely balance
10505 		 * cache use, instead we want to embrace asymmetry and only
10506 		 * ensure tasks have enough CPU capacity.
10507 		 *
10508 		 * Skip the LLC logic because it's not relevant in that case.
10509 		 */
10510 		goto unlock;
10511 	}
10512 
10513 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10514 	if (sds) {
10515 		/*
10516 		 * If there is an imbalance between LLC domains (IOW we could
10517 		 * increase the overall cache use), we need some less-loaded LLC
10518 		 * domain to pull some load. Likewise, we may need to spread
10519 		 * load within the current LLC domain (e.g. packed SMT cores but
10520 		 * other CPUs are idle). We can't really know from here how busy
10521 		 * the others are - so just get a nohz balance going if it looks
10522 		 * like this LLC domain has tasks we could move.
10523 		 */
10524 		nr_busy = atomic_read(&sds->nr_busy_cpus);
10525 		if (nr_busy > 1) {
10526 			flags = NOHZ_KICK_MASK;
10527 			goto unlock;
10528 		}
10529 	}
10530 unlock:
10531 	rcu_read_unlock();
10532 out:
10533 	if (flags)
10534 		kick_ilb(flags);
10535 }
10536 
set_cpu_sd_state_busy(int cpu)10537 static void set_cpu_sd_state_busy(int cpu)
10538 {
10539 	struct sched_domain *sd;
10540 
10541 	rcu_read_lock();
10542 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
10543 
10544 	if (!sd || !sd->nohz_idle)
10545 		goto unlock;
10546 	sd->nohz_idle = 0;
10547 
10548 	atomic_inc(&sd->shared->nr_busy_cpus);
10549 unlock:
10550 	rcu_read_unlock();
10551 }
10552 
nohz_balance_exit_idle(struct rq * rq)10553 void nohz_balance_exit_idle(struct rq *rq)
10554 {
10555 	SCHED_WARN_ON(rq != this_rq());
10556 
10557 	if (likely(!rq->nohz_tick_stopped))
10558 		return;
10559 
10560 	rq->nohz_tick_stopped = 0;
10561 	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10562 	atomic_dec(&nohz.nr_cpus);
10563 
10564 	set_cpu_sd_state_busy(rq->cpu);
10565 }
10566 
set_cpu_sd_state_idle(int cpu)10567 static void set_cpu_sd_state_idle(int cpu)
10568 {
10569 	struct sched_domain *sd;
10570 
10571 	rcu_read_lock();
10572 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
10573 
10574 	if (!sd || sd->nohz_idle)
10575 		goto unlock;
10576 	sd->nohz_idle = 1;
10577 
10578 	atomic_dec(&sd->shared->nr_busy_cpus);
10579 unlock:
10580 	rcu_read_unlock();
10581 }
10582 
10583 /*
10584  * This routine will record that the CPU is going idle with tick stopped.
10585  * This info will be used in performing idle load balancing in the future.
10586  */
nohz_balance_enter_idle(int cpu)10587 void nohz_balance_enter_idle(int cpu)
10588 {
10589 	struct rq *rq = cpu_rq(cpu);
10590 
10591 	SCHED_WARN_ON(cpu != smp_processor_id());
10592 
10593 	if (!cpu_active(cpu)) {
10594 		/*
10595 		 * A CPU can be paused while it is idle with it's tick
10596 		 * stopped. nohz_balance_exit_idle() should be called
10597 		 * from the local CPU, so it can't be called during
10598 		 * pause. This results in paused CPU participating in
10599 		 * the nohz idle balance, which should be avoided.
10600 		 *
10601 		 * When the paused CPU exits idle and enters again,
10602 		 * exempt the paused CPU from nohz_balance_exit_idle.
10603 		 */
10604 		nohz_balance_exit_idle(rq);
10605 		return;
10606 	}
10607 
10608 	/* Spare idle load balancing on CPUs that don't want to be disturbed: */
10609 	if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
10610 		return;
10611 
10612 	/*
10613 	 * Can be set safely without rq->lock held
10614 	 * If a clear happens, it will have evaluated last additions because
10615 	 * rq->lock is held during the check and the clear
10616 	 */
10617 	rq->has_blocked_load = 1;
10618 
10619 	/*
10620 	 * The tick is still stopped but load could have been added in the
10621 	 * meantime. We set the nohz.has_blocked flag to trig a check of the
10622 	 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
10623 	 * of nohz.has_blocked can only happen after checking the new load
10624 	 */
10625 	if (rq->nohz_tick_stopped)
10626 		goto out;
10627 
10628 	/* If we're a completely isolated CPU, we don't play: */
10629 	if (on_null_domain(rq))
10630 		return;
10631 
10632 	rq->nohz_tick_stopped = 1;
10633 
10634 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
10635 	atomic_inc(&nohz.nr_cpus);
10636 
10637 	/*
10638 	 * Ensures that if nohz_idle_balance() fails to observe our
10639 	 * @idle_cpus_mask store, it must observe the @has_blocked
10640 	 * store.
10641 	 */
10642 	smp_mb__after_atomic();
10643 
10644 	set_cpu_sd_state_idle(cpu);
10645 
10646 out:
10647 	/*
10648 	 * Each time a cpu enter idle, we assume that it has blocked load and
10649 	 * enable the periodic update of the load of idle cpus
10650 	 */
10651 	WRITE_ONCE(nohz.has_blocked, 1);
10652 }
10653 
10654 /*
10655  * Internal function that runs load balance for all idle cpus. The load balance
10656  * can be a simple update of blocked load or a complete load balance with
10657  * tasks movement depending of flags.
10658  * The function returns false if the loop has stopped before running
10659  * through all idle CPUs.
10660  */
_nohz_idle_balance(struct rq * this_rq,unsigned int flags,enum cpu_idle_type idle)10661 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
10662 			       enum cpu_idle_type idle)
10663 {
10664 	/* Earliest time when we have to do rebalance again */
10665 	unsigned long now = jiffies;
10666 	unsigned long next_balance = now + 60*HZ;
10667 	bool has_blocked_load = false;
10668 	int update_next_balance = 0;
10669 	int this_cpu = this_rq->cpu;
10670 	int balance_cpu;
10671 	int ret = false;
10672 	struct rq *rq;
10673 
10674 	SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
10675 
10676 	/*
10677 	 * We assume there will be no idle load after this update and clear
10678 	 * the has_blocked flag. If a cpu enters idle in the mean time, it will
10679 	 * set the has_blocked flag and trig another update of idle load.
10680 	 * Because a cpu that becomes idle, is added to idle_cpus_mask before
10681 	 * setting the flag, we are sure to not clear the state and not
10682 	 * check the load of an idle cpu.
10683 	 */
10684 	WRITE_ONCE(nohz.has_blocked, 0);
10685 
10686 	/*
10687 	 * Ensures that if we miss the CPU, we must see the has_blocked
10688 	 * store from nohz_balance_enter_idle().
10689 	 */
10690 	smp_mb();
10691 
10692 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
10693 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
10694 			continue;
10695 
10696 		/*
10697 		 * If this CPU gets work to do, stop the load balancing
10698 		 * work being done for other CPUs. Next load
10699 		 * balancing owner will pick it up.
10700 		 */
10701 		if (need_resched()) {
10702 			has_blocked_load = true;
10703 			goto abort;
10704 		}
10705 
10706 		rq = cpu_rq(balance_cpu);
10707 
10708 		has_blocked_load |= update_nohz_stats(rq, true);
10709 
10710 		/*
10711 		 * If time for next balance is due,
10712 		 * do the balance.
10713 		 */
10714 		if (time_after_eq(jiffies, rq->next_balance)) {
10715 			struct rq_flags rf;
10716 
10717 			rq_lock_irqsave(rq, &rf);
10718 			update_rq_clock(rq);
10719 			rq_unlock_irqrestore(rq, &rf);
10720 
10721 			if (flags & NOHZ_BALANCE_KICK)
10722 				rebalance_domains(rq, CPU_IDLE);
10723 		}
10724 
10725 		if (time_after(next_balance, rq->next_balance)) {
10726 			next_balance = rq->next_balance;
10727 			update_next_balance = 1;
10728 		}
10729 	}
10730 
10731 	/*
10732 	 * next_balance will be updated only when there is a need.
10733 	 * When the CPU is attached to null domain for ex, it will not be
10734 	 * updated.
10735 	 */
10736 	if (likely(update_next_balance))
10737 		nohz.next_balance = next_balance;
10738 
10739 	/* Newly idle CPU doesn't need an update */
10740 	if (idle != CPU_NEWLY_IDLE) {
10741 		update_blocked_averages(this_cpu);
10742 		has_blocked_load |= this_rq->has_blocked_load;
10743 	}
10744 
10745 	if (flags & NOHZ_BALANCE_KICK)
10746 		rebalance_domains(this_rq, CPU_IDLE);
10747 
10748 	WRITE_ONCE(nohz.next_blocked,
10749 		now + msecs_to_jiffies(LOAD_AVG_PERIOD));
10750 
10751 	/* The full idle balance loop has been done */
10752 	ret = true;
10753 
10754 abort:
10755 	/* There is still blocked load, enable periodic update */
10756 	if (has_blocked_load)
10757 		WRITE_ONCE(nohz.has_blocked, 1);
10758 
10759 	return ret;
10760 }
10761 
10762 /*
10763  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
10764  * rebalancing for all the cpus for whom scheduler ticks are stopped.
10765  */
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)10766 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
10767 {
10768 	unsigned int flags = this_rq->nohz_idle_balance;
10769 
10770 	if (!flags)
10771 		return false;
10772 
10773 	this_rq->nohz_idle_balance = 0;
10774 
10775 	if (idle != CPU_IDLE)
10776 		return false;
10777 
10778 	_nohz_idle_balance(this_rq, flags, idle);
10779 
10780 	return true;
10781 }
10782 
nohz_newidle_balance(struct rq * this_rq)10783 static void nohz_newidle_balance(struct rq *this_rq)
10784 {
10785 	int this_cpu = this_rq->cpu;
10786 
10787 	/*
10788 	 * This CPU doesn't want to be disturbed by scheduler
10789 	 * housekeeping
10790 	 */
10791 	if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
10792 		return;
10793 
10794 	/* Will wake up very soon. No time for doing anything else*/
10795 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
10796 		return;
10797 
10798 	/* Don't need to update blocked load of idle CPUs*/
10799 	if (!READ_ONCE(nohz.has_blocked) ||
10800 	    time_before(jiffies, READ_ONCE(nohz.next_blocked)))
10801 		return;
10802 
10803 	raw_spin_unlock(&this_rq->lock);
10804 	/*
10805 	 * This CPU is going to be idle and blocked load of idle CPUs
10806 	 * need to be updated. Run the ilb locally as it is a good
10807 	 * candidate for ilb instead of waking up another idle CPU.
10808 	 * Kick an normal ilb if we failed to do the update.
10809 	 */
10810 	if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
10811 		kick_ilb(NOHZ_STATS_KICK);
10812 	raw_spin_lock(&this_rq->lock);
10813 }
10814 
10815 #else /* !CONFIG_NO_HZ_COMMON */
nohz_balancer_kick(struct rq * rq)10816 static inline void nohz_balancer_kick(struct rq *rq) { }
10817 
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)10818 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
10819 {
10820 	return false;
10821 }
10822 
nohz_newidle_balance(struct rq * this_rq)10823 static inline void nohz_newidle_balance(struct rq *this_rq) { }
10824 #endif /* CONFIG_NO_HZ_COMMON */
10825 
10826 /*
10827  * idle_balance is called by schedule() if this_cpu is about to become
10828  * idle. Attempts to pull tasks from other CPUs.
10829  *
10830  * Returns:
10831  *   < 0 - we released the lock and there are !fair tasks present
10832  *     0 - failed, no new tasks
10833  *   > 0 - success, new (fair) tasks present
10834  */
newidle_balance(struct rq * this_rq,struct rq_flags * rf)10835 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
10836 {
10837 	unsigned long next_balance = jiffies + HZ;
10838 	int this_cpu = this_rq->cpu;
10839 	struct sched_domain *sd;
10840 	int pulled_task = 0;
10841 	u64 curr_cost = 0;
10842 	int done = 0;
10843 
10844 	trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
10845 	if (done)
10846 		return pulled_task;
10847 
10848 	update_misfit_status(NULL, this_rq);
10849 	/*
10850 	 * We must set idle_stamp _before_ calling idle_balance(), such that we
10851 	 * measure the duration of idle_balance() as idle time.
10852 	 */
10853 	this_rq->idle_stamp = rq_clock(this_rq);
10854 
10855 	/*
10856 	 * Do not pull tasks towards !active CPUs...
10857 	 */
10858 	if (!cpu_active(this_cpu))
10859 		return 0;
10860 
10861 	/*
10862 	 * This is OK, because current is on_cpu, which avoids it being picked
10863 	 * for load-balance and preemption/IRQs are still disabled avoiding
10864 	 * further scheduler activity on it and we're being very careful to
10865 	 * re-start the picking loop.
10866 	 */
10867 	rq_unpin_lock(this_rq, rf);
10868 
10869 	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
10870 	    !READ_ONCE(this_rq->rd->overload)) {
10871 
10872 		rcu_read_lock();
10873 		sd = rcu_dereference_check_sched_domain(this_rq->sd);
10874 		if (sd)
10875 			update_next_balance(sd, &next_balance);
10876 		rcu_read_unlock();
10877 
10878 		nohz_newidle_balance(this_rq);
10879 
10880 		goto out;
10881 	}
10882 
10883 	raw_spin_unlock(&this_rq->lock);
10884 
10885 	update_blocked_averages(this_cpu);
10886 	rcu_read_lock();
10887 	for_each_domain(this_cpu, sd) {
10888 		int continue_balancing = 1;
10889 		u64 t0, domain_cost;
10890 
10891 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
10892 			update_next_balance(sd, &next_balance);
10893 			break;
10894 		}
10895 
10896 		if (sd->flags & SD_BALANCE_NEWIDLE) {
10897 			t0 = sched_clock_cpu(this_cpu);
10898 
10899 			pulled_task = load_balance(this_cpu, this_rq,
10900 						   sd, CPU_NEWLY_IDLE,
10901 						   &continue_balancing);
10902 
10903 			domain_cost = sched_clock_cpu(this_cpu) - t0;
10904 			if (domain_cost > sd->max_newidle_lb_cost)
10905 				sd->max_newidle_lb_cost = domain_cost;
10906 
10907 			curr_cost += domain_cost;
10908 		}
10909 
10910 		update_next_balance(sd, &next_balance);
10911 
10912 		/*
10913 		 * Stop searching for tasks to pull if there are
10914 		 * now runnable tasks on this rq.
10915 		 */
10916 		if (pulled_task || this_rq->nr_running > 0)
10917 			break;
10918 	}
10919 	rcu_read_unlock();
10920 
10921 	raw_spin_lock(&this_rq->lock);
10922 
10923 	if (curr_cost > this_rq->max_idle_balance_cost)
10924 		this_rq->max_idle_balance_cost = curr_cost;
10925 
10926 out:
10927 	/*
10928 	 * While browsing the domains, we released the rq lock, a task could
10929 	 * have been enqueued in the meantime. Since we're not going idle,
10930 	 * pretend we pulled a task.
10931 	 */
10932 	if (this_rq->cfs.h_nr_running && !pulled_task)
10933 		pulled_task = 1;
10934 
10935 	/* Move the next balance forward */
10936 	if (time_after(this_rq->next_balance, next_balance))
10937 		this_rq->next_balance = next_balance;
10938 
10939 	/* Is there a task of a high priority class? */
10940 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
10941 		pulled_task = -1;
10942 
10943 	if (pulled_task)
10944 		this_rq->idle_stamp = 0;
10945 
10946 	rq_repin_lock(this_rq, rf);
10947 
10948 	return pulled_task;
10949 }
10950 
10951 /*
10952  * run_rebalance_domains is triggered when needed from the scheduler tick.
10953  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
10954  */
run_rebalance_domains(struct softirq_action * h)10955 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
10956 {
10957 	struct rq *this_rq = this_rq();
10958 	enum cpu_idle_type idle = this_rq->idle_balance ?
10959 						CPU_IDLE : CPU_NOT_IDLE;
10960 
10961 	/*
10962 	 * If this CPU has a pending nohz_balance_kick, then do the
10963 	 * balancing on behalf of the other idle CPUs whose ticks are
10964 	 * stopped. Do nohz_idle_balance *before* rebalance_domains to
10965 	 * give the idle CPUs a chance to load balance. Else we may
10966 	 * load balance only within the local sched_domain hierarchy
10967 	 * and abort nohz_idle_balance altogether if we pull some load.
10968 	 */
10969 	if (nohz_idle_balance(this_rq, idle))
10970 		return;
10971 
10972 	/* normal load balance */
10973 	update_blocked_averages(this_rq->cpu);
10974 	rebalance_domains(this_rq, idle);
10975 }
10976 
10977 /*
10978  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
10979  */
trigger_load_balance(struct rq * rq)10980 void trigger_load_balance(struct rq *rq)
10981 {
10982 	/* Don't need to rebalance while attached to NULL domain */
10983 	if (unlikely(on_null_domain(rq)))
10984 		return;
10985 
10986 	if (time_after_eq(jiffies, rq->next_balance))
10987 		raise_softirq(SCHED_SOFTIRQ);
10988 
10989 	nohz_balancer_kick(rq);
10990 }
10991 
rq_online_fair(struct rq * rq)10992 static void rq_online_fair(struct rq *rq)
10993 {
10994 	update_sysctl();
10995 
10996 	update_runtime_enabled(rq);
10997 }
10998 
rq_offline_fair(struct rq * rq)10999 static void rq_offline_fair(struct rq *rq)
11000 {
11001 	update_sysctl();
11002 
11003 	/* Ensure any throttled groups are reachable by pick_next_task */
11004 	unthrottle_offline_cfs_rqs(rq);
11005 }
11006 
11007 #endif /* CONFIG_SMP */
11008 
11009 /*
11010  * scheduler tick hitting a task of our scheduling class.
11011  *
11012  * NOTE: This function can be called remotely by the tick offload that
11013  * goes along full dynticks. Therefore no local assumption can be made
11014  * and everything must be accessed through the @rq and @curr passed in
11015  * parameters.
11016  */
task_tick_fair(struct rq * rq,struct task_struct * curr,int queued)11017 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
11018 {
11019 	struct cfs_rq *cfs_rq;
11020 	struct sched_entity *se = &curr->se;
11021 
11022 	for_each_sched_entity(se) {
11023 		cfs_rq = cfs_rq_of(se);
11024 		entity_tick(cfs_rq, se, queued);
11025 	}
11026 
11027 	if (static_branch_unlikely(&sched_numa_balancing))
11028 		task_tick_numa(rq, curr);
11029 
11030 	update_misfit_status(curr, rq);
11031 	update_overutilized_status(task_rq(curr));
11032 }
11033 
11034 /*
11035  * called on fork with the child task as argument from the parent's context
11036  *  - child not yet on the tasklist
11037  *  - preemption disabled
11038  */
task_fork_fair(struct task_struct * p)11039 static void task_fork_fair(struct task_struct *p)
11040 {
11041 	struct cfs_rq *cfs_rq;
11042 	struct sched_entity *se = &p->se, *curr;
11043 	struct rq *rq = this_rq();
11044 	struct rq_flags rf;
11045 
11046 	rq_lock(rq, &rf);
11047 	update_rq_clock(rq);
11048 
11049 	cfs_rq = task_cfs_rq(current);
11050 	curr = cfs_rq->curr;
11051 	if (curr) {
11052 		update_curr(cfs_rq);
11053 		se->vruntime = curr->vruntime;
11054 	}
11055 	place_entity(cfs_rq, se, 1);
11056 
11057 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11058 		/*
11059 		 * Upon rescheduling, sched_class::put_prev_task() will place
11060 		 * 'current' within the tree based on its new key value.
11061 		 */
11062 		swap(curr->vruntime, se->vruntime);
11063 		resched_curr(rq);
11064 	}
11065 
11066 	se->vruntime -= cfs_rq->min_vruntime;
11067 	rq_unlock(rq, &rf);
11068 }
11069 
11070 /*
11071  * Priority of the task has changed. Check to see if we preempt
11072  * the current task.
11073  */
11074 static void
prio_changed_fair(struct rq * rq,struct task_struct * p,int oldprio)11075 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
11076 {
11077 	if (!task_on_rq_queued(p))
11078 		return;
11079 
11080 	if (rq->cfs.nr_running == 1)
11081 		return;
11082 
11083 	/*
11084 	 * Reschedule if we are currently running on this runqueue and
11085 	 * our priority decreased, or if we are not currently running on
11086 	 * this runqueue and our priority is higher than the current's
11087 	 */
11088 	if (rq->curr == p) {
11089 		if (p->prio > oldprio)
11090 			resched_curr(rq);
11091 	} else
11092 		check_preempt_curr(rq, p, 0);
11093 }
11094 
vruntime_normalized(struct task_struct * p)11095 static inline bool vruntime_normalized(struct task_struct *p)
11096 {
11097 	struct sched_entity *se = &p->se;
11098 
11099 	/*
11100 	 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
11101 	 * the dequeue_entity(.flags=0) will already have normalized the
11102 	 * vruntime.
11103 	 */
11104 	if (p->on_rq)
11105 		return true;
11106 
11107 	/*
11108 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
11109 	 * But there are some cases where it has already been normalized:
11110 	 *
11111 	 * - A forked child which is waiting for being woken up by
11112 	 *   wake_up_new_task().
11113 	 * - A task which has been woken up by try_to_wake_up() and
11114 	 *   waiting for actually being woken up by sched_ttwu_pending().
11115 	 */
11116 	if (!se->sum_exec_runtime ||
11117 	    (p->state == TASK_WAKING && p->sched_remote_wakeup))
11118 		return true;
11119 
11120 	return false;
11121 }
11122 
11123 #ifdef CONFIG_FAIR_GROUP_SCHED
11124 /*
11125  * Propagate the changes of the sched_entity across the tg tree to make it
11126  * visible to the root
11127  */
propagate_entity_cfs_rq(struct sched_entity * se)11128 static void propagate_entity_cfs_rq(struct sched_entity *se)
11129 {
11130 	struct cfs_rq *cfs_rq;
11131 
11132 	list_add_leaf_cfs_rq(cfs_rq_of(se));
11133 
11134 	/* Start to propagate at parent */
11135 	se = se->parent;
11136 
11137 	for_each_sched_entity(se) {
11138 		cfs_rq = cfs_rq_of(se);
11139 
11140 		if (!cfs_rq_throttled(cfs_rq)){
11141 			update_load_avg(cfs_rq, se, UPDATE_TG);
11142 			list_add_leaf_cfs_rq(cfs_rq);
11143 			continue;
11144 		}
11145 
11146 		if (list_add_leaf_cfs_rq(cfs_rq))
11147 			break;
11148 	}
11149 }
11150 #else
propagate_entity_cfs_rq(struct sched_entity * se)11151 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
11152 #endif
11153 
detach_entity_cfs_rq(struct sched_entity * se)11154 static void detach_entity_cfs_rq(struct sched_entity *se)
11155 {
11156 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11157 
11158 	/* Catch up with the cfs_rq and remove our load when we leave */
11159 	update_load_avg(cfs_rq, se, 0);
11160 	detach_entity_load_avg(cfs_rq, se);
11161 	update_tg_load_avg(cfs_rq);
11162 	propagate_entity_cfs_rq(se);
11163 }
11164 
attach_entity_cfs_rq(struct sched_entity * se)11165 static void attach_entity_cfs_rq(struct sched_entity *se)
11166 {
11167 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11168 
11169 #ifdef CONFIG_FAIR_GROUP_SCHED
11170 	/*
11171 	 * Since the real-depth could have been changed (only FAIR
11172 	 * class maintain depth value), reset depth properly.
11173 	 */
11174 	se->depth = se->parent ? se->parent->depth + 1 : 0;
11175 #endif
11176 
11177 	/* Synchronize entity with its cfs_rq */
11178 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
11179 	attach_entity_load_avg(cfs_rq, se);
11180 	update_tg_load_avg(cfs_rq);
11181 	propagate_entity_cfs_rq(se);
11182 }
11183 
detach_task_cfs_rq(struct task_struct * p)11184 static void detach_task_cfs_rq(struct task_struct *p)
11185 {
11186 	struct sched_entity *se = &p->se;
11187 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11188 
11189 	if (!vruntime_normalized(p)) {
11190 		/*
11191 		 * Fix up our vruntime so that the current sleep doesn't
11192 		 * cause 'unlimited' sleep bonus.
11193 		 */
11194 		place_entity(cfs_rq, se, 0);
11195 		se->vruntime -= cfs_rq->min_vruntime;
11196 	}
11197 
11198 	detach_entity_cfs_rq(se);
11199 }
11200 
attach_task_cfs_rq(struct task_struct * p)11201 static void attach_task_cfs_rq(struct task_struct *p)
11202 {
11203 	struct sched_entity *se = &p->se;
11204 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11205 
11206 	attach_entity_cfs_rq(se);
11207 
11208 	if (!vruntime_normalized(p))
11209 		se->vruntime += cfs_rq->min_vruntime;
11210 }
11211 
switched_from_fair(struct rq * rq,struct task_struct * p)11212 static void switched_from_fair(struct rq *rq, struct task_struct *p)
11213 {
11214 	detach_task_cfs_rq(p);
11215 }
11216 
switched_to_fair(struct rq * rq,struct task_struct * p)11217 static void switched_to_fair(struct rq *rq, struct task_struct *p)
11218 {
11219 	attach_task_cfs_rq(p);
11220 
11221 	if (task_on_rq_queued(p)) {
11222 		/*
11223 		 * We were most likely switched from sched_rt, so
11224 		 * kick off the schedule if running, otherwise just see
11225 		 * if we can still preempt the current task.
11226 		 */
11227 		if (rq->curr == p)
11228 			resched_curr(rq);
11229 		else
11230 			check_preempt_curr(rq, p, 0);
11231 	}
11232 }
11233 
11234 /* Account for a task changing its policy or group.
11235  *
11236  * This routine is mostly called to set cfs_rq->curr field when a task
11237  * migrates between groups/classes.
11238  */
set_next_task_fair(struct rq * rq,struct task_struct * p,bool first)11239 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
11240 {
11241 	struct sched_entity *se = &p->se;
11242 
11243 #ifdef CONFIG_SMP
11244 	if (task_on_rq_queued(p)) {
11245 		/*
11246 		 * Move the next running task to the front of the list, so our
11247 		 * cfs_tasks list becomes MRU one.
11248 		 */
11249 		list_move(&se->group_node, &rq->cfs_tasks);
11250 	}
11251 #endif
11252 
11253 	for_each_sched_entity(se) {
11254 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
11255 
11256 		set_next_entity(cfs_rq, se);
11257 		/* ensure bandwidth has been allocated on our new cfs_rq */
11258 		account_cfs_rq_runtime(cfs_rq, 0);
11259 	}
11260 }
11261 
init_cfs_rq(struct cfs_rq * cfs_rq)11262 void init_cfs_rq(struct cfs_rq *cfs_rq)
11263 {
11264 	cfs_rq->tasks_timeline = RB_ROOT_CACHED;
11265 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
11266 #ifndef CONFIG_64BIT
11267 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
11268 #endif
11269 #ifdef CONFIG_SMP
11270 	raw_spin_lock_init(&cfs_rq->removed.lock);
11271 #endif
11272 }
11273 
11274 #ifdef CONFIG_FAIR_GROUP_SCHED
task_set_group_fair(struct task_struct * p)11275 static void task_set_group_fair(struct task_struct *p)
11276 {
11277 	struct sched_entity *se = &p->se;
11278 
11279 	set_task_rq(p, task_cpu(p));
11280 	se->depth = se->parent ? se->parent->depth + 1 : 0;
11281 }
11282 
task_move_group_fair(struct task_struct * p)11283 static void task_move_group_fair(struct task_struct *p)
11284 {
11285 	detach_task_cfs_rq(p);
11286 	set_task_rq(p, task_cpu(p));
11287 
11288 #ifdef CONFIG_SMP
11289 	/* Tell se's cfs_rq has been changed -- migrated */
11290 	p->se.avg.last_update_time = 0;
11291 #endif
11292 	attach_task_cfs_rq(p);
11293 }
11294 
task_change_group_fair(struct task_struct * p,int type)11295 static void task_change_group_fair(struct task_struct *p, int type)
11296 {
11297 	switch (type) {
11298 	case TASK_SET_GROUP:
11299 		task_set_group_fair(p);
11300 		break;
11301 
11302 	case TASK_MOVE_GROUP:
11303 		task_move_group_fair(p);
11304 		break;
11305 	}
11306 }
11307 
free_fair_sched_group(struct task_group * tg)11308 void free_fair_sched_group(struct task_group *tg)
11309 {
11310 	int i;
11311 
11312 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
11313 
11314 	for_each_possible_cpu(i) {
11315 		if (tg->cfs_rq)
11316 			kfree(tg->cfs_rq[i]);
11317 		if (tg->se)
11318 			kfree(tg->se[i]);
11319 	}
11320 
11321 	kfree(tg->cfs_rq);
11322 	kfree(tg->se);
11323 }
11324 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)11325 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11326 {
11327 	struct sched_entity *se;
11328 	struct cfs_rq *cfs_rq;
11329 	int i;
11330 
11331 	tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
11332 	if (!tg->cfs_rq)
11333 		goto err;
11334 	tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11335 	if (!tg->se)
11336 		goto err;
11337 
11338 	tg->shares = NICE_0_LOAD;
11339 
11340 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
11341 
11342 	for_each_possible_cpu(i) {
11343 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
11344 				      GFP_KERNEL, cpu_to_node(i));
11345 		if (!cfs_rq)
11346 			goto err;
11347 
11348 		se = kzalloc_node(sizeof(struct sched_entity),
11349 				  GFP_KERNEL, cpu_to_node(i));
11350 		if (!se)
11351 			goto err_free_rq;
11352 
11353 		init_cfs_rq(cfs_rq);
11354 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11355 		init_entity_runnable_average(se);
11356 	}
11357 
11358 	return 1;
11359 
11360 err_free_rq:
11361 	kfree(cfs_rq);
11362 err:
11363 	return 0;
11364 }
11365 
online_fair_sched_group(struct task_group * tg)11366 void online_fair_sched_group(struct task_group *tg)
11367 {
11368 	struct sched_entity *se;
11369 	struct rq_flags rf;
11370 	struct rq *rq;
11371 	int i;
11372 
11373 	for_each_possible_cpu(i) {
11374 		rq = cpu_rq(i);
11375 		se = tg->se[i];
11376 		rq_lock_irq(rq, &rf);
11377 		update_rq_clock(rq);
11378 		attach_entity_cfs_rq(se);
11379 		sync_throttle(tg, i);
11380 		rq_unlock_irq(rq, &rf);
11381 	}
11382 }
11383 
unregister_fair_sched_group(struct task_group * tg)11384 void unregister_fair_sched_group(struct task_group *tg)
11385 {
11386 	unsigned long flags;
11387 	struct rq *rq;
11388 	int cpu;
11389 
11390 	for_each_possible_cpu(cpu) {
11391 		if (tg->se[cpu])
11392 			remove_entity_load_avg(tg->se[cpu]);
11393 
11394 		/*
11395 		 * Only empty task groups can be destroyed; so we can speculatively
11396 		 * check on_list without danger of it being re-added.
11397 		 */
11398 		if (!tg->cfs_rq[cpu]->on_list)
11399 			continue;
11400 
11401 		rq = cpu_rq(cpu);
11402 
11403 		raw_spin_lock_irqsave(&rq->lock, flags);
11404 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11405 		raw_spin_unlock_irqrestore(&rq->lock, flags);
11406 	}
11407 }
11408 
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent)11409 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
11410 			struct sched_entity *se, int cpu,
11411 			struct sched_entity *parent)
11412 {
11413 	struct rq *rq = cpu_rq(cpu);
11414 
11415 	cfs_rq->tg = tg;
11416 	cfs_rq->rq = rq;
11417 	init_cfs_rq_runtime(cfs_rq);
11418 
11419 	tg->cfs_rq[cpu] = cfs_rq;
11420 	tg->se[cpu] = se;
11421 
11422 	/* se could be NULL for root_task_group */
11423 	if (!se)
11424 		return;
11425 
11426 	if (!parent) {
11427 		se->cfs_rq = &rq->cfs;
11428 		se->depth = 0;
11429 	} else {
11430 		se->cfs_rq = parent->my_q;
11431 		se->depth = parent->depth + 1;
11432 	}
11433 
11434 	se->my_q = cfs_rq;
11435 	/* guarantee group entities always have weight */
11436 	update_load_set(&se->load, NICE_0_LOAD);
11437 	se->parent = parent;
11438 }
11439 
11440 static DEFINE_MUTEX(shares_mutex);
11441 
sched_group_set_shares(struct task_group * tg,unsigned long shares)11442 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
11443 {
11444 	int i;
11445 
11446 	/*
11447 	 * We can't change the weight of the root cgroup.
11448 	 */
11449 	if (!tg->se[0])
11450 		return -EINVAL;
11451 
11452 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
11453 
11454 	mutex_lock(&shares_mutex);
11455 	if (tg->shares == shares)
11456 		goto done;
11457 
11458 	tg->shares = shares;
11459 	for_each_possible_cpu(i) {
11460 		struct rq *rq = cpu_rq(i);
11461 		struct sched_entity *se = tg->se[i];
11462 		struct rq_flags rf;
11463 
11464 		/* Propagate contribution to hierarchy */
11465 		rq_lock_irqsave(rq, &rf);
11466 		update_rq_clock(rq);
11467 		for_each_sched_entity(se) {
11468 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
11469 			update_cfs_group(se);
11470 		}
11471 		rq_unlock_irqrestore(rq, &rf);
11472 	}
11473 
11474 done:
11475 	mutex_unlock(&shares_mutex);
11476 	return 0;
11477 }
11478 #else /* CONFIG_FAIR_GROUP_SCHED */
11479 
free_fair_sched_group(struct task_group * tg)11480 void free_fair_sched_group(struct task_group *tg) { }
11481 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)11482 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11483 {
11484 	return 1;
11485 }
11486 
online_fair_sched_group(struct task_group * tg)11487 void online_fair_sched_group(struct task_group *tg) { }
11488 
unregister_fair_sched_group(struct task_group * tg)11489 void unregister_fair_sched_group(struct task_group *tg) { }
11490 
11491 #endif /* CONFIG_FAIR_GROUP_SCHED */
11492 
11493 
get_rr_interval_fair(struct rq * rq,struct task_struct * task)11494 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
11495 {
11496 	struct sched_entity *se = &task->se;
11497 	unsigned int rr_interval = 0;
11498 
11499 	/*
11500 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
11501 	 * idle runqueue:
11502 	 */
11503 	if (rq->cfs.load.weight)
11504 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
11505 
11506 	return rr_interval;
11507 }
11508 
11509 /*
11510  * All the scheduling class methods:
11511  */
11512 const struct sched_class fair_sched_class
11513 	__section("__fair_sched_class") = {
11514 	.enqueue_task		= enqueue_task_fair,
11515 	.dequeue_task		= dequeue_task_fair,
11516 	.yield_task		= yield_task_fair,
11517 	.yield_to_task		= yield_to_task_fair,
11518 
11519 	.check_preempt_curr	= check_preempt_wakeup,
11520 
11521 	.pick_next_task		= __pick_next_task_fair,
11522 	.put_prev_task		= put_prev_task_fair,
11523 	.set_next_task          = set_next_task_fair,
11524 
11525 #ifdef CONFIG_SMP
11526 	.balance		= balance_fair,
11527 	.select_task_rq		= select_task_rq_fair,
11528 	.migrate_task_rq	= migrate_task_rq_fair,
11529 
11530 	.rq_online		= rq_online_fair,
11531 	.rq_offline		= rq_offline_fair,
11532 
11533 	.task_dead		= task_dead_fair,
11534 	.set_cpus_allowed	= set_cpus_allowed_common,
11535 #endif
11536 
11537 	.task_tick		= task_tick_fair,
11538 	.task_fork		= task_fork_fair,
11539 
11540 	.prio_changed		= prio_changed_fair,
11541 	.switched_from		= switched_from_fair,
11542 	.switched_to		= switched_to_fair,
11543 
11544 	.get_rr_interval	= get_rr_interval_fair,
11545 
11546 	.update_curr		= update_curr_fair,
11547 
11548 #ifdef CONFIG_FAIR_GROUP_SCHED
11549 	.task_change_group	= task_change_group_fair,
11550 #endif
11551 
11552 #ifdef CONFIG_UCLAMP_TASK
11553 	.uclamp_enabled		= 1,
11554 #endif
11555 };
11556 
11557 #ifdef CONFIG_SCHED_DEBUG
print_cfs_stats(struct seq_file * m,int cpu)11558 void print_cfs_stats(struct seq_file *m, int cpu)
11559 {
11560 	struct cfs_rq *cfs_rq, *pos;
11561 
11562 	rcu_read_lock();
11563 	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
11564 		print_cfs_rq(m, cpu, cfs_rq);
11565 	rcu_read_unlock();
11566 }
11567 
11568 #ifdef CONFIG_NUMA_BALANCING
show_numa_stats(struct task_struct * p,struct seq_file * m)11569 void show_numa_stats(struct task_struct *p, struct seq_file *m)
11570 {
11571 	int node;
11572 	unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
11573 	struct numa_group *ng;
11574 
11575 	rcu_read_lock();
11576 	ng = rcu_dereference(p->numa_group);
11577 	for_each_online_node(node) {
11578 		if (p->numa_faults) {
11579 			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
11580 			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
11581 		}
11582 		if (ng) {
11583 			gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
11584 			gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
11585 		}
11586 		print_numa_stats(m, node, tsf, tpf, gsf, gpf);
11587 	}
11588 	rcu_read_unlock();
11589 }
11590 #endif /* CONFIG_NUMA_BALANCING */
11591 #endif /* CONFIG_SCHED_DEBUG */
11592 
init_sched_fair_class(void)11593 __init void init_sched_fair_class(void)
11594 {
11595 #ifdef CONFIG_SMP
11596 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
11597 
11598 #ifdef CONFIG_NO_HZ_COMMON
11599 	nohz.next_balance = jiffies;
11600 	nohz.next_blocked = jiffies;
11601 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
11602 #endif
11603 #endif /* SMP */
11604 
11605 }
11606 
11607 /*
11608  * Helper functions to facilitate extracting info from tracepoints.
11609  */
11610 
sched_trace_cfs_rq_avg(struct cfs_rq * cfs_rq)11611 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
11612 {
11613 #ifdef CONFIG_SMP
11614 	return cfs_rq ? &cfs_rq->avg : NULL;
11615 #else
11616 	return NULL;
11617 #endif
11618 }
11619 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
11620 
sched_trace_cfs_rq_path(struct cfs_rq * cfs_rq,char * str,int len)11621 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
11622 {
11623 	if (!cfs_rq) {
11624 		if (str)
11625 			strlcpy(str, "(null)", len);
11626 		else
11627 			return NULL;
11628 	}
11629 
11630 	cfs_rq_tg_path(cfs_rq, str, len);
11631 	return str;
11632 }
11633 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
11634 
sched_trace_cfs_rq_cpu(struct cfs_rq * cfs_rq)11635 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
11636 {
11637 	return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
11638 }
11639 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
11640 
sched_trace_rq_avg_rt(struct rq * rq)11641 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
11642 {
11643 #ifdef CONFIG_SMP
11644 	return rq ? &rq->avg_rt : NULL;
11645 #else
11646 	return NULL;
11647 #endif
11648 }
11649 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
11650 
sched_trace_rq_avg_dl(struct rq * rq)11651 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
11652 {
11653 #ifdef CONFIG_SMP
11654 	return rq ? &rq->avg_dl : NULL;
11655 #else
11656 	return NULL;
11657 #endif
11658 }
11659 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
11660 
sched_trace_rq_avg_irq(struct rq * rq)11661 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
11662 {
11663 #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
11664 	return rq ? &rq->avg_irq : NULL;
11665 #else
11666 	return NULL;
11667 #endif
11668 }
11669 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
11670 
sched_trace_rq_cpu(struct rq * rq)11671 int sched_trace_rq_cpu(struct rq *rq)
11672 {
11673 	return rq ? cpu_of(rq) : -1;
11674 }
11675 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
11676 
sched_trace_rq_cpu_capacity(struct rq * rq)11677 int sched_trace_rq_cpu_capacity(struct rq *rq)
11678 {
11679 	return rq ?
11680 #ifdef CONFIG_SMP
11681 		rq->cpu_capacity
11682 #else
11683 		SCHED_CAPACITY_SCALE
11684 #endif
11685 		: -1;
11686 }
11687 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
11688 
sched_trace_rd_span(struct root_domain * rd)11689 const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
11690 {
11691 #ifdef CONFIG_SMP
11692 	return rd ? rd->span : NULL;
11693 #else
11694 	return NULL;
11695 #endif
11696 }
11697 EXPORT_SYMBOL_GPL(sched_trace_rd_span);
11698 
sched_trace_rq_nr_running(struct rq * rq)11699 int sched_trace_rq_nr_running(struct rq *rq)
11700 {
11701         return rq ? rq->nr_running : -1;
11702 }
11703 EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);
11704