Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 – 10 of 10) sorted by relevance

/OK3568_Linux_fs/kernel/kernel/sched/
H A Dfair.c277 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
279 return p->se.cfs_rq; in task_cfs_rq()
283 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
285 return se->cfs_rq; in cfs_rq_of()
289 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
294 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument
299 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) in cfs_rq_tg_path()
300 autogroup_path(cfs_rq->tg, path, len); in cfs_rq_tg_path()
301 else if (cfs_rq && cfs_rq->tg->css.cgroup) in cfs_rq_tg_path()
302 cgroup_path(cfs_rq->tg->css.cgroup, path, len); in cfs_rq_tg_path()
[all …]
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
152 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
154 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_pelt()
155 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
157 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
160 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
162 return rq_clock_pelt(rq_of(cfs_rq)); in cfs_rq_clock_pelt()
169 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
H A Dpelt.c386 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
389 cfs_rq->curr == se)) { in __update_load_avg_se()
400 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
402 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
403 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
404 cfs_rq->h_nr_running, in __update_load_avg_cfs_rq()
405 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
407 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
408 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
H A Ddebug.c557 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
567 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq()
573 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq()
576 if (rb_first_cached(&cfs_rq->tasks_timeline)) in print_cfs_rq()
577 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; in print_cfs_rq()
578 last = __pick_last_entity(cfs_rq); in print_cfs_rq()
581 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq()
597 cfs_rq->nr_spread_over); in print_cfs_rq()
598 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq()
599 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq()
[all …]
H A Dsched.h361 struct cfs_rq;
396 struct cfs_rq **cfs_rq; member
485 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
492 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
518 struct cfs_rq *prev, struct cfs_rq *next);
521 struct cfs_rq *prev, struct cfs_rq *next) { } in set_task_rq_fair()
532 struct cfs_rq { struct
954 struct cfs_rq cfs;
1093 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1095 return cfs_rq->rq; in rq_of()
[all …]
H A Dcore.c3320 p->se.cfs_rq = NULL; in __sched_fork()
4197 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
7498 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
8403 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
8404 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
8408 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
8409 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
8411 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
8412 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
/OK3568_Linux_fs/kernel/include/trace/hooks/
H A Dsched.h200 TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 vruntime),
201 TP_ARGS(cfs_rq, se, initial, vruntime), 1);
263 struct cfs_rq;
265 TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *curr,
267 TP_ARGS(cfs_rq, curr, se), 1);
296 unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr,
298 TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, curr, granularity), 1);
309 TP_PROTO(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep, int *ret),
310 TP_ARGS(cfs_rq, p, task_sleep, ret), 1);
344 struct cfs_rq;
[all …]
/OK3568_Linux_fs/kernel/include/linux/
H A Dsched.h46 struct cfs_rq;
481 struct cfs_rq *cfs_rq; member
483 struct cfs_rq *my_q;
2120 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2121 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2122 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
/OK3568_Linux_fs/kernel/include/trace/events/
H A Dsched.h637 TP_PROTO(struct cfs_rq *cfs_rq),
638 TP_ARGS(cfs_rq));
669 TP_PROTO(struct cfs_rq *cfs_rq),
670 TP_ARGS(cfs_rq));
/OK3568_Linux_fs/kernel/drivers/soc/rockchip/minidump/
H A Dminidump_log.c789 static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr);
795 struct cfs_rq *my_q = NULL; in md_dump_cgroup_state()
844 static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr) in md_dump_cfs_rq()
887 struct cfs_rq *cfs; in md_dump_runqueues()