| /OK3568_Linux_fs/kernel/kernel/ |
| H A D | watchdog.c | 118 unsigned int next_cpu = watchdog_next_cpu(cpu); in watchdog_nmi_disable() local 127 if (next_cpu < nr_cpu_ids) in watchdog_nmi_disable() 128 per_cpu(watchdog_nmi_touch, next_cpu) = true; in watchdog_nmi_disable() 363 unsigned int next_cpu; in watchdog_next_cpu() local 365 next_cpu = cpumask_next(cpu, &cpus); in watchdog_next_cpu() 366 if (next_cpu >= nr_cpu_ids) in watchdog_next_cpu() 367 next_cpu = cpumask_first(&cpus); in watchdog_next_cpu() 369 if (next_cpu == cpu) in watchdog_next_cpu() 372 return next_cpu; in watchdog_next_cpu() 388 unsigned int next_cpu; in watchdog_check_hardlockup_other_cpu() local [all …]
|
| H A D | smp.c | 617 int cpu, next_cpu, this_cpu = smp_processor_id(); in smp_call_function_many_cond() local 646 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond() 647 if (next_cpu == this_cpu) in smp_call_function_many_cond() 648 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); in smp_call_function_many_cond() 651 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many_cond()
|
| /OK3568_Linux_fs/kernel/arch/x86/platform/uv/ |
| H A D | uv_time.c | 50 int next_cpu; member 159 head->next_cpu = -1; in uv_rtc_allocate_timers() 176 head->next_cpu = -1; in uv_rtc_find_next_timer() 185 head->next_cpu = bcpu; in uv_rtc_find_next_timer() 209 int next_cpu; in uv_rtc_set_timer() local 213 next_cpu = head->next_cpu; in uv_rtc_set_timer() 217 if (next_cpu < 0 || bcpu == next_cpu || in uv_rtc_set_timer() 218 expires < head->cpu[next_cpu].expires) { in uv_rtc_set_timer() 219 head->next_cpu = bcpu; in uv_rtc_set_timer() 249 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) in uv_rtc_unset_timer() [all …]
|
| /OK3568_Linux_fs/kernel/tools/testing/selftests/bpf/ |
| H A D | test_lru_map.c | 163 int next_cpu = 0; in test_lru_sanity0() local 168 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity0() 254 int next_cpu = 0; in test_lru_sanity1() local 263 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity1() 331 int next_cpu = 0; in test_lru_sanity2() local 340 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity2() 438 int next_cpu = 0; in test_lru_sanity3() local 447 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity3() 501 int next_cpu = 0; in test_lru_sanity4() local 506 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity4() [all …]
|
| H A D | bench.c | 278 static int next_cpu(struct cpu_set *cpu_set) in next_cpu() function 284 for (i = cpu_set->next_cpu; i < cpu_set->cpus_len; i++) { in next_cpu() 286 cpu_set->next_cpu = i + 1; in next_cpu() 294 return cpu_set->next_cpu++; in next_cpu() 392 next_cpu(&env.cons_cpus)); in setup_benchmark() 399 env.prod_cpus.next_cpu = env.cons_cpus.next_cpu; in setup_benchmark() 411 next_cpu(&env.prod_cpus)); in setup_benchmark()
|
| H A D | bench.h | 17 int next_cpu; member
|
| /OK3568_Linux_fs/kernel/kernel/trace/ |
| H A D | trace_hwlat.c | 287 int next_cpu; in move_to_next_cpu() local 301 next_cpu = cpumask_next(smp_processor_id(), current_mask); in move_to_next_cpu() 304 if (next_cpu >= nr_cpu_ids) in move_to_next_cpu() 305 next_cpu = cpumask_first(current_mask); in move_to_next_cpu() 307 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ in move_to_next_cpu() 311 cpumask_set_cpu(next_cpu, current_mask); in move_to_next_cpu() 369 int next_cpu; in start_kthread() local 378 next_cpu = cpumask_first(current_mask); in start_kthread() 387 cpumask_set_cpu(next_cpu, current_mask); in start_kthread()
|
| H A D | trace_entries.h | 117 __field( unsigned int, next_cpu ) \ 134 __entry->next_cpu) 152 __entry->next_cpu)
|
| H A D | trace_output.c | 924 field->next_cpu, in trace_ctxwake_print() 958 field->next_cpu, in trace_ctxwake_raw() 994 SEQ_PUT_HEX_FIELD(s, field->next_cpu); in trace_ctxwake_hex() 1025 SEQ_PUT_FIELD(s, field->next_cpu); in trace_ctxwake_bin()
|
| H A D | trace_sched_wakeup.c | 396 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 424 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
|
| H A D | trace.c | 3526 int next_cpu = -1; in __find_next_entry() local 3556 next_cpu = cpu; in __find_next_entry() 3566 *ent_cpu = next_cpu; in __find_next_entry()
|
| /OK3568_Linux_fs/kernel/arch/parisc/kernel/ |
| H A D | irq.c | 347 static int next_cpu = -1; in txn_alloc_addr() local 349 next_cpu++; /* assign to "next" CPU we want this bugger on */ in txn_alloc_addr() 352 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr() 353 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 354 !cpu_online(next_cpu))) in txn_alloc_addr() 355 next_cpu++; in txn_alloc_addr() 357 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr() 358 next_cpu = 0; /* nothing else, assign monarch */ in txn_alloc_addr() 360 return txn_affinity_addr(virt_irq, next_cpu); in txn_alloc_addr()
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/ |
| H A D | tsc_sync.c | 95 int next_cpu; in tsc_sync_check_timer_fn() local 100 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in tsc_sync_check_timer_fn() 101 if (next_cpu >= nr_cpu_ids) in tsc_sync_check_timer_fn() 102 next_cpu = cpumask_first(cpu_online_mask); in tsc_sync_check_timer_fn() 105 add_timer_on(&tsc_sync_check_timer, next_cpu); in tsc_sync_check_timer_fn()
|
| /OK3568_Linux_fs/kernel/drivers/net/wireguard/ |
| H A D | queueing.h | 164 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) in wg_queue_enqueue_per_device_and_peer() argument 178 cpu = wg_cpumask_next_online(next_cpu); in wg_queue_enqueue_per_device_and_peer()
|
| /OK3568_Linux_fs/kernel/kernel/time/ |
| H A D | clocksource.c | 284 int next_cpu, reset_pending; in clocksource_watchdog() local 388 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog() 389 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog() 390 next_cpu = cpumask_first(cpu_online_mask); in clocksource_watchdog() 398 add_timer_on(&watchdog_timer, next_cpu); in clocksource_watchdog()
|
| H A D | tick-broadcast.c | 689 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local 718 next_cpu = cpu; in tick_handle_oneshot_broadcast() 755 tick_broadcast_set_event(dev, next_cpu, next_event); in tick_handle_oneshot_broadcast()
|
| /OK3568_Linux_fs/kernel/block/ |
| H A D | blk-mq.c | 1508 cpu_online(hctx->next_cpu)) { in __blk_mq_run_hw_queue() 1546 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu() local 1553 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu() 1555 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu() 1556 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu() 1564 if (!cpu_online(next_cpu)) { in blk_mq_hctx_next_cpu() 1574 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu() 1579 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu() 1580 return next_cpu; in blk_mq_hctx_next_cpu() 3008 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/ |
| H A D | mali_kernel_linux.c | 373 int next_cpu; in mali_init_cpu_time_counters_on_all_cpus() local 375 next_cpu = cpumask_next(cpu_number, cpu_online_mask); in mali_init_cpu_time_counters_on_all_cpus() 376 if (next_cpu >= nr_cpu_ids) break; in mali_init_cpu_time_counters_on_all_cpus() 377 cpu_number = next_cpu; in mali_init_cpu_time_counters_on_all_cpus()
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | blk-mq.h | 45 int next_cpu; member
|
| /OK3568_Linux_fs/kernel/drivers/irqchip/ |
| H A D | irq-gic-v3.c | 1148 int next_cpu, cpu = *base_cpu; in gic_compute_target_list() local 1155 next_cpu = cpumask_next(cpu, mask); in gic_compute_target_list() 1156 if (next_cpu >= nr_cpu_ids) in gic_compute_target_list() 1158 cpu = next_cpu; in gic_compute_target_list()
|
| /OK3568_Linux_fs/kernel/net/core/ |
| H A D | dev.c | 4299 struct rps_dev_flow *rflow, u16 next_cpu) in set_rps_cpu() argument 4301 if (next_cpu < nr_cpu_ids) { in set_rps_cpu() 4314 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu() 4335 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu() 4338 rflow->cpu = next_cpu; in set_rps_cpu() 4386 u32 next_cpu; in get_rps_cpu() local 4394 next_cpu = ident & rps_cpu_mask; in get_rps_cpu() 4413 if (unlikely(tcpu != next_cpu) && in get_rps_cpu() 4417 tcpu = next_cpu; in get_rps_cpu() 4418 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
|
| /OK3568_Linux_fs/kernel/drivers/net/ethernet/mediatek/ |
| H A D | mtk_eth_soc.c | 1404 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma() local 1429 cpu = next_cpu; in mtk_poll_tx_qdma()
|