| /OK3568_Linux_fs/kernel/include/trace/hooks/ |
| H A D | sched.h | 33 TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu), 34 TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1); 37 TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu), 38 TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1); 41 TP_PROTO(int cpu, struct task_struct *p, int *new_cpu), 42 TP_ARGS(cpu, p, new_cpu), 1); 122 struct task_struct *p, int new_cpu, 124 TP_ARGS(rq, rf, p, new_cpu, detached), 1); 131 TP_PROTO(struct task_struct *p, int prev_cpu, int sync, int *new_cpu), 132 TP_ARGS(p, prev_cpu, sync, new_cpu), 1); [all …]
|
| /OK3568_Linux_fs/kernel/drivers/irqchip/ |
| H A D | irq-bcm6345-l1.c | 204 unsigned int new_cpu; in bcm6345_l1_set_affinity() local 212 new_cpu = cpumask_any_and(&valid, cpu_online_mask); in bcm6345_l1_set_affinity() 213 if (new_cpu >= nr_cpu_ids) in bcm6345_l1_set_affinity() 216 dest = cpumask_of(new_cpu); in bcm6345_l1_set_affinity() 219 if (old_cpu != new_cpu) { in bcm6345_l1_set_affinity() 231 irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); in bcm6345_l1_set_affinity()
|
| /OK3568_Linux_fs/buildroot/package/binutils/2.38/ |
| H A D | 0005-binutils-2.38-vs.-ppc32-linux-kernel.patch | 25 - new_cpu = ppc_parse_cpu (ppc_cpu, &machine_sticky, cpu_string); 47 + new_cpu = ppc_parse_cpu (ppc_cpu, 50 if (new_cpu != 0) 51 ppc_cpu = new_cpu;
|
| /OK3568_Linux_fs/kernel/arch/ia64/kernel/ |
| H A D | irq.c | 82 int irq, new_cpu; in migrate_irqs() local 108 new_cpu = cpumask_any(cpu_online_mask); in migrate_irqs() 117 cpumask_of(new_cpu), false); in migrate_irqs()
|
| /OK3568_Linux_fs/kernel/kernel/sched/ |
| H A D | cpudeadline.c | 176 int old_idx, new_cpu; in cpudl_clear() local 191 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_clear() 193 cp->elements[old_idx].cpu = new_cpu; in cpudl_clear() 195 cp->elements[new_cpu].idx = old_idx; in cpudl_clear()
|
| H A D | fair.c | 2955 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 2958 int dst_nid = cpu_to_node(new_cpu); in update_scan_period() 3002 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 6025 int new_cpu = cpu; in find_idlest_cpu() local 6053 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu() 6054 if (new_cpu == cpu) { in find_idlest_cpu() 6061 cpu = new_cpu; in find_idlest_cpu() 6072 return new_cpu; in find_idlest_cpu() 6695 int new_cpu = INT_MAX; in find_energy_efficient_cpu() local 6698 trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu); in find_energy_efficient_cpu() [all …]
|
| H A D | core.c | 1842 struct task_struct *p, int new_cpu) in move_queued_task() argument 1853 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached); in move_queued_task() 1858 set_task_cpu(p, new_cpu); in move_queued_task() 1862 rq = cpu_rq(new_cpu); in move_queued_task() 1865 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task() 2160 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument 2196 WARN_ON_ONCE(!cpu_online(new_cpu)); in set_task_cpu() 2199 trace_sched_migrate_task(p, new_cpu); in set_task_cpu() 2201 if (task_cpu(p) != new_cpu) { in set_task_cpu() 2203 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu() [all …]
|
| H A D | sched.h | 1859 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
|
| H A D | deadline.c | 1726 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) in migrate_task_rq_dl()
|
| /OK3568_Linux_fs/kernel/arch/x86/hyperv/ |
| H A D | hv_init.c | 214 unsigned int new_cpu; in hv_cpu_die() local 239 new_cpu = cpumask_any_but(cpu_online_mask, cpu); in hv_cpu_die() 241 if (new_cpu < nr_cpu_ids) in hv_cpu_die() 242 re_ctrl.target_vp = hv_vp_index[new_cpu]; in hv_cpu_die()
|
| /OK3568_Linux_fs/kernel/tools/perf/scripts/python/ |
| H A D | sched-migration.py | 191 def migrate(self, ts_list, new, old_cpu, new_cpu): argument 192 if old_cpu == new_cpu: 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 208 self.event_cpus.append(new_cpu)
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_device.c | 931 int cpu, new_cpu; in kfd_queue_work() local 933 cpu = new_cpu = smp_processor_id(); in kfd_queue_work() 935 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; in kfd_queue_work() 936 if (cpu_to_node(new_cpu) == numa_node_id()) in kfd_queue_work() 938 } while (cpu != new_cpu); in kfd_queue_work() 940 queue_work_on(new_cpu, wq, work); in kfd_queue_work()
|
| /OK3568_Linux_fs/kernel/drivers/hv/ |
| H A D | hyperv_vmbus.h | 441 unsigned int new_cpu) in hv_update_alloced_cpus() argument 443 hv_set_alloced_cpu(new_cpu); in hv_update_alloced_cpus()
|
| /OK3568_Linux_fs/kernel/arch/x86/events/intel/ |
| H A D | uncore.c | 1318 int new_cpu) in uncore_change_type_ctx() argument 1324 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); in uncore_change_type_ctx() 1332 box->cpu = new_cpu; in uncore_change_type_ctx() 1338 if (new_cpu < 0) in uncore_change_type_ctx() 1342 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); in uncore_change_type_ctx() 1343 box->cpu = new_cpu; in uncore_change_type_ctx() 1348 int old_cpu, int new_cpu) in uncore_change_context() argument 1351 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); in uncore_change_context()
|
| /OK3568_Linux_fs/kernel/drivers/perf/ |
| H A D | thunderx2_pmu.c | 939 int new_cpu; in tx2_uncore_pmu_offline_cpu() local 954 new_cpu = cpumask_any_and( in tx2_uncore_pmu_offline_cpu() 958 tx2_pmu->cpu = new_cpu; in tx2_uncore_pmu_offline_cpu() 959 if (new_cpu >= nr_cpu_ids) in tx2_uncore_pmu_offline_cpu() 961 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); in tx2_uncore_pmu_offline_cpu()
|
| /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/ |
| H A D | lpfc_init.c | 10940 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local 11008 new_cpu = start_cpu; in lpfc_cpu_affinity_check() 11010 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check() 11015 new_cpu = cpumask_next( in lpfc_cpu_affinity_check() 11016 new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check() 11017 if (new_cpu == nr_cpumask_bits) in lpfc_cpu_affinity_check() 11018 new_cpu = first_cpu; in lpfc_cpu_affinity_check() 11030 start_cpu = cpumask_next(new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check() 11038 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check() 11059 new_cpu = start_cpu; in lpfc_cpu_affinity_check() [all …]
|
| /OK3568_Linux_fs/kernel/kernel/ |
| H A D | workqueue.c | 1391 int new_cpu; in wq_select_unbound_cpu() local 1404 new_cpu = __this_cpu_read(wq_rr_cpu_last); in wq_select_unbound_cpu() 1405 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); in wq_select_unbound_cpu() 1406 if (unlikely(new_cpu >= nr_cpu_ids)) { in wq_select_unbound_cpu() 1407 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); in wq_select_unbound_cpu() 1408 if (unlikely(new_cpu >= nr_cpu_ids)) in wq_select_unbound_cpu() 1411 __this_cpu_write(wq_rr_cpu_last, new_cpu); in wq_select_unbound_cpu() 1413 return new_cpu; in wq_select_unbound_cpu()
|
| /OK3568_Linux_fs/kernel/arch/arm64/kvm/vgic/ |
| H A D | vgic.c | 696 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; in vgic_prune_ap_list() local 700 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
|
| /OK3568_Linux_fs/kernel/arch/powerpc/perf/ |
| H A D | imc-pmu.c | 330 static void nest_change_cpu_context(int old_cpu, int new_cpu) in nest_change_cpu_context() argument 334 if (old_cpu < 0 || new_cpu < 0) in nest_change_cpu_context() 338 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
|
| /OK3568_Linux_fs/kernel/tools/perf/ |
| H A D | builtin-sched.c | 1540 bool new_cpu = false; in map_switch_event() local 1553 new_cpu = true; in map_switch_event() 1658 if (sched->map.comp && new_cpu) in map_switch_event()
|