| /OK3568_Linux_fs/kernel/kernel/sched/ |
| H A D | loadavg.c | 82 long nr_active, delta = 0; in calc_load_fold_active() local 84 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active() 85 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active() 87 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active() 88 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 89 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
|
| /OK3568_Linux_fs/kernel/include/trace/events/ |
| H A D | vmscan.h | 393 unsigned long nr_active, unsigned long nr_deactivated, 396 TP_ARGS(nid, nr_taken, nr_active, nr_deactivated, nr_referenced, priority, file), 401 __field(unsigned long, nr_active) 411 __entry->nr_active = nr_active; 421 __entry->nr_active, __entry->nr_deactivated, __entry->nr_referenced,
|
| /OK3568_Linux_fs/kernel/arch/powerpc/platforms/cell/spufs/ |
| H A D | sched.c | 686 cbe_spu_info[node].nr_active--; in find_victim() 715 cbe_spu_info[node].nr_active++; in __spu_schedule() 756 cbe_spu_info[node].nr_active--; in spu_unschedule() 957 int nr_active = 0, node; in count_active_contexts() local 960 nr_active += cbe_spu_info[node].nr_active; in count_active_contexts() 961 nr_active += spu_prio->nr_waiting; in count_active_contexts() 963 return nr_active; in count_active_contexts()
|
| /OK3568_Linux_fs/kernel/block/ |
| H A D | blk-mq.h | 210 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests() 218 atomic_dec(&hctx->nr_active); in __blk_mq_dec_active_requests() 225 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
|
| H A D | blk-mq-debugfs.c | 619 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); in hctx_active_show()
|
| H A D | blk-mq.c | 2795 atomic_set(&hctx->nr_active, 0); in blk_mq_alloc_hctx()
|
| /OK3568_Linux_fs/kernel/fs/ceph/ |
| H A D | mdsmap.c | 407 int i, nr_active = 0; in ceph_mdsmap_is_cluster_available() local 416 nr_active++; in ceph_mdsmap_is_cluster_available() 418 return nr_active > 0; in ceph_mdsmap_is_cluster_available()
|
| /OK3568_Linux_fs/kernel/kernel/ |
| H A D | workqueue.c | 213 int nr_active; /* L: nr of active works */ member 1165 pwq->nr_active++; in pwq_activate_delayed_work() 1195 pwq->nr_active--; in pwq_dec_nr_in_flight() 1198 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight() 1503 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work() 1505 pwq->nr_active++; in __queue_work() 2570 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread() 2990 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); in drain_workqueue() 3774 pwq->nr_active < pwq->max_active) { in pwq_adjust_max_active() 4393 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) in pwq_busy() [all …]
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | blk-mq.h | 141 atomic_t nr_active; member
|
| H A D | perf_event.h | 819 int nr_active; member
|
| /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/ |
| H A D | spu.h | 183 int nr_active; member
|
| /OK3568_Linux_fs/kernel/net/mptcp/ |
| H A D | protocol.c | 1092 int i, nr_active = 0; in mptcp_subflow_get_send() local 1131 nr_active += !subflow->backup; in mptcp_subflow_get_send() 1149 msk, nr_active, send_info[0].ssk, send_info[0].ratio, in mptcp_subflow_get_send() 1153 if (!nr_active) in mptcp_subflow_get_send()
|
| /OK3568_Linux_fs/kernel/kernel/events/ |
| H A D | core.c | 2285 if (!--ctx->nr_active) in event_sched_out() 2557 if (!ctx->nr_active++) in event_sched_in() 3247 if (!ctx->nr_active || !(is_active & EVENT_ALL)) in ctx_sched_out()
|
| /OK3568_Linux_fs/kernel/drivers/scsi/megaraid/ |
| H A D | megaraid_sas_fusion.c | 366 sdev_busy = atomic_read(&hctx->nr_active); in megasas_get_msix_index()
|
| /OK3568_Linux_fs/kernel/drivers/scsi/mpt3sas/ |
| H A D | mpt3sas_base.c | 3582 return atomic_read(&hctx->nr_active); in _base_sdev_nr_inflight_request()
|