| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/ |
| H A D | mali_kbase_hwcnt.c | 132 struct kbase_hwcnt_context *hctx = NULL; in kbase_hwcnt_context_init() local 137 hctx = kzalloc(sizeof(*hctx), GFP_KERNEL); in kbase_hwcnt_context_init() 138 if (!hctx) in kbase_hwcnt_context_init() 141 hctx->iface = iface; in kbase_hwcnt_context_init() 142 spin_lock_init(&hctx->state_lock); in kbase_hwcnt_context_init() 143 hctx->disable_count = 1; in kbase_hwcnt_context_init() 144 mutex_init(&hctx->accum_lock); in kbase_hwcnt_context_init() 145 hctx->accum_inited = false; in kbase_hwcnt_context_init() 147 hctx->wq = alloc_workqueue("mali_kbase_hwcnt", WQ_HIGHPRI | WQ_UNBOUND, 0); in kbase_hwcnt_context_init() 148 if (!hctx->wq) in kbase_hwcnt_context_init() [all …]
|
| H A D | mali_kbase_hwcnt_context.h | 53 void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx); 63 const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(struct kbase_hwcnt_context *hctx); 79 void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx); 96 bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx); 115 void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx); 146 bool kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context *hctx, struct work_struct *work);
|
| H A D | mali_kbase_hwcnt_virtualizer.c | 48 struct kbase_hwcnt_context *hctx; member 201 errcode = kbase_hwcnt_accumulator_acquire(hvirt->hctx, &hvirt->accum); in kbasep_hwcnt_virtualizer_accumulator_init() 690 int kbase_hwcnt_virtualizer_init(struct kbase_hwcnt_context *hctx, u64 dump_threshold_ns, in kbase_hwcnt_virtualizer_init() argument 696 if (!hctx || !out_hvirt) in kbase_hwcnt_virtualizer_init() 699 metadata = kbase_hwcnt_context_metadata(hctx); in kbase_hwcnt_virtualizer_init() 707 virt->hctx = hctx; in kbase_hwcnt_virtualizer_init() 743 return kbase_hwcnt_context_queue_work(hvirt->hctx, work); in kbase_hwcnt_virtualizer_queue_work()
|
| H A D | mali_kbase_hwcnt_accumulator.h | 70 int kbase_hwcnt_accumulator_acquire(struct kbase_hwcnt_context *hctx,
|
| H A D | mali_kbase_hwcnt_virtualizer.h | 54 int kbase_hwcnt_virtualizer_init(struct kbase_hwcnt_context *hctx, u64 dump_threshold_ns,
|
| /OK3568_Linux_fs/kernel/block/ |
| H A D | blk-mq-sched.c | 51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument 53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx() 56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx() 60 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 62 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 64 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_restart() 75 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_restart() 88 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local 95 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 104 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list() [all …]
|
| H A D | blk-mq.c | 72 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument 74 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending() 75 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending() 76 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending() 82 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument 85 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 87 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending() 88 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending() 91 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument 94 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending() [all …]
|
| H A D | blk-mq.h | 45 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 49 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 50 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 71 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 75 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 80 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 132 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 162 struct blk_mq_hw_ctx *hctx; member 173 return data->hctx->sched_tags; in blk_mq_tags_from_data() 175 return data->hctx->tags; in blk_mq_tags_from_data() [all …]
|
| H A D | blk-mq-sysfs.c | 36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local 39 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_hw_sysfs_release() 40 cleanup_srcu_struct(hctx->srcu); in blk_mq_hw_sysfs_release() 41 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release() 42 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 43 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release() 44 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 45 kfree(hctx); in blk_mq_hw_sysfs_release() 106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() [all …]
|
| H A D | blk-mq-debugfs.c | 228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local 230 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show() 256 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local 257 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show() 267 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show() 365 __acquires(&hctx->lock) in hctx_dispatch_start() 367 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local 369 spin_lock(&hctx->lock); in hctx_dispatch_start() 370 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start() 375 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local [all …]
|
| H A D | blk-mq-tag.c | 24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument 26 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_busy() 27 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() 34 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy() 35 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy() 36 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy() 56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument 58 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 59 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle() 62 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_idle() [all …]
|
| H A D | kyber-iosched.c | 461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 463 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_init_hctx() 467 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 471 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx() 473 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 477 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx() 481 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx() 482 ilog2(8), GFP_KERNEL, hctx->numa_node)) { in kyber_init_hctx() 496 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx() 504 hctx->sched_data = khd; in kyber_init_hctx() [all …]
|
| H A D | blk-mq-tag.h | 45 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 58 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 60 if (!hctx) in bt_wait_ptr() 62 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr() 74 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument 76 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_busy() 79 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy() 82 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument 84 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle() 87 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
|
| H A D | blk-mq-sched.h | 16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 17 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 21 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 25 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 72 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 75 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() 80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument 82 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_needs_restart()
|
| H A D | blk-mq-debugfs.h | 24 struct blk_mq_hw_ctx *hctx); 25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 32 struct blk_mq_hw_ctx *hctx); 33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
| H A D | mq-deadline-main.c | 493 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument 495 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 547 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument 549 struct request_queue *q = hctx->queue; in dd_depth_updated() 551 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() 559 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument 561 dd_depth_updated(hctx); in dd_init_hctx() 711 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument 714 struct request_queue *q = hctx->queue; in dd_insert_request() 773 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument [all …]
|
| H A D | bfq-iosched.c | 4667 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument 4669 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 4679 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument 4681 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() 4819 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument 4821 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() 4831 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request() 4838 bfq_update_dispatch_stats(hctx->queue, rq, in bfq_dispatch_request() 5512 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request() argument 5515 struct request_queue *q = hctx->queue; in bfq_insert_request() [all …]
|
| H A D | blk-flush.c | 355 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local 373 blk_mq_sched_restart(hctx); in mq_flush_data_end_io()
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | blk-mq.h | 508 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 509 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 512 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 516 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 517 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 574 #define queue_for_each_hw_ctx(q, hctx, i) \ argument 576 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 578 #define hctx_for_each_ctx(hctx, ctx, i) \ argument 579 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 580 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) [all …]
|
| /OK3568_Linux_fs/kernel/net/dccp/ccids/ |
| H A D | ccid3.h | 104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local 105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk() 106 return hctx; in ccid3_hc_tx_sk()
|
| /OK3568_Linux_fs/kernel/drivers/s390/block/ |
| H A D | scm_blk.c | 283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument 286 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request() 288 struct scm_queue *sq = hctx->driver_data; in scm_blk_request() 332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument 341 hctx->driver_data = qd; in scm_blk_init_hctx() 346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument 348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() 351 kfree(hctx->driver_data); in scm_blk_exit_hctx() 352 hctx->driver_data = NULL; in scm_blk_exit_hctx()
|
| /OK3568_Linux_fs/kernel/drivers/block/rnbd/ |
| H A D | rnbd-clt.c | 163 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue() 167 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue() 1106 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument 1109 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue() 1112 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue() 1118 blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); in rnbd_clt_dev_kick_mq_queue() 1121 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq() argument 1135 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); in rnbd_queue_rq() 1144 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq() 1252 struct blk_mq_hw_ctx *hctx) in rnbd_init_hw_queue() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/nvme/target/ |
| H A D | loop.c | 131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument 134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq() 135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq() 214 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument 222 hctx->driver_data = queue; in nvme_loop_init_hctx() 226 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument 234 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
|
| /OK3568_Linux_fs/kernel/drivers/block/ |
| H A D | virtio_blk.c | 203 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument 205 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() 206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs() 217 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument 220 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq() 225 int qid = hctx->queue_num; in virtio_queue_rq() 271 num = blk_rq_map_sg(hctx->queue, req, vbr->sg); in virtio_queue_rq() 287 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
|
| /OK3568_Linux_fs/external/security/rk_tee_user/v2/host/xtest/ |
| H A D | regression_4100.c | 330 uint32_t hn, uint32_t hctx, uint32_t hres) in cmd_to_fmm() argument 338 op.params[1].value.a = hctx; in cmd_to_fmm() 349 uint32_t hn, uint32_t hctx, uint32_t hres) in cmd_from_fmm() argument 357 op.params[1].value.a = hctx; in cmd_from_fmm() 370 uint32_t hctx, uint32_t hres) in cmd_compute_fmm() argument 379 op.params[1].value.b = hctx; in cmd_compute_fmm() 1888 uint32_t hctx = TA_CRYPT_ARITH_INVALID_HANDLE; in test_4110_fmm() local 1923 cmd_new_fmm_ctx(c, s, nbitsize, hn, &hctx))) in test_4110_fmm() 1930 if (!ADBG_EXPECT_TEEC_SUCCESS(c, cmd_to_fmm(c, s, hc, hn, hctx, hop1))) in test_4110_fmm() 1937 if (!ADBG_EXPECT_TEEC_SUCCESS(c, cmd_to_fmm(c, s, hc, hn, hctx, hop2))) in test_4110_fmm() [all …]
|