Lines Matching refs:cs

108 static void cs_get(struct hl_cs *cs)  in cs_get()  argument
110 kref_get(&cs->refcount); in cs_get()
113 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
115 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
118 static void cs_put(struct hl_cs *cs) in cs_put() argument
120 kref_put(&cs->refcount, cs_do_release); in cs_put()
152 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
153 parser.cs_sequence = job->cs->sequence; in cs_parser()
197 struct hl_cs *cs = job->cs; in free_job() local
232 spin_lock(&cs->job_lock); in free_job()
234 spin_unlock(&cs->job_lock); in free_job()
240 cs_put(cs); in free_job()
261 struct hl_cs *cs = container_of(ref, struct hl_cs, in cs_do_release() local
263 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
266 cs->completed = true; in cs_do_release()
276 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
280 if (cs->submitted) { in cs_do_release()
299 hl_int_hw_queue_update_ci(cs); in cs_do_release()
303 list_del_init(&cs->mirror_node); in cs_do_release()
310 if ((!cs->timedout) && in cs_do_release()
314 if (cs->tdr_active) in cs_do_release()
315 cancel_delayed_work_sync(&cs->work_tdr); in cs_do_release()
332 } else if (cs->type == CS_TYPE_WAIT) { in cs_do_release()
337 hl_fence_put(cs->signal_fence); in cs_do_release()
344 hl_debugfs_remove_cs(cs); in cs_do_release()
346 hl_ctx_put(cs->ctx); in cs_do_release()
352 if (cs->timedout) in cs_do_release()
353 cs->fence->error = -ETIMEDOUT; in cs_do_release()
354 else if (cs->aborted) in cs_do_release()
355 cs->fence->error = -EIO; in cs_do_release()
356 else if (!cs->submitted) in cs_do_release()
357 cs->fence->error = -EBUSY; in cs_do_release()
359 complete_all(&cs->fence->completion); in cs_do_release()
360 hl_fence_put(cs->fence); in cs_do_release()
361 cs_counters_aggregate(hdev, cs->ctx); in cs_do_release()
363 kfree(cs->jobs_in_queue_cnt); in cs_do_release()
364 kfree(cs); in cs_do_release()
371 struct hl_cs *cs = container_of(work, struct hl_cs, in cs_timedout() local
373 rc = cs_get_unless_zero(cs); in cs_timedout()
377 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
378 cs_put(cs); in cs_timedout()
383 cs->timedout = true; in cs_timedout()
385 hdev = cs->ctx->hdev; in cs_timedout()
389 cs->sequence); in cs_timedout()
391 cs_put(cs); in cs_timedout()
402 struct hl_cs *cs; in allocate_cs() local
405 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
406 if (!cs) in allocate_cs()
409 cs->ctx = ctx; in allocate_cs()
410 cs->submitted = false; in allocate_cs()
411 cs->completed = false; in allocate_cs()
412 cs->type = cs_type; in allocate_cs()
413 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
414 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
415 kref_init(&cs->refcount); in allocate_cs()
416 spin_lock_init(&cs->job_lock); in allocate_cs()
425 cs_cmpl->type = cs->type; in allocate_cs()
427 cs->fence = &cs_cmpl->base_fence; in allocate_cs()
443 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
444 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); in allocate_cs()
445 if (!cs->jobs_in_queue_cnt) { in allocate_cs()
453 cs->sequence = cs_cmpl->cs_seq; in allocate_cs()
466 *cs_new = cs; in allocate_cs()
474 kfree(cs); in allocate_cs()
478 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
482 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
489 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
496 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, in hl_cs_rollback_all()
498 cs_get(cs); in hl_cs_rollback_all()
499 cs->aborted = true; in hl_cs_rollback_all()
501 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
502 cs_rollback(hdev, cs); in hl_cs_rollback_all()
503 cs_put(cs); in hl_cs_rollback_all()
511 struct hl_cs *cs = job->cs; in job_wq_completion() local
512 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
614 struct hl_cs *cs; in cs_ioctl_default() local
647 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs); in cs_ioctl_default()
653 *cs_seq = cs->sequence; in cs_ioctl_default()
655 hl_debugfs_add_cs(cs); in cs_ioctl_default()
697 job->cs = cs; in cs_ioctl_default()
702 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_default()
704 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_default()
714 cs_get(cs); in cs_ioctl_default()
723 cs->ctx->asid, cs->sequence, job->id, rc); in cs_ioctl_default()
732 cs->ctx->asid, cs->sequence); in cs_ioctl_default()
737 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_default()
742 cs->ctx->asid, cs->sequence, rc); in cs_ioctl_default()
755 cs_rollback(hdev, cs); in cs_ioctl_default()
760 cs_put(cs); in cs_ioctl_default()
777 struct hl_cs *cs; in cs_ioctl_signal_wait() local
901 rc = allocate_cs(hdev, ctx, cs_type, &cs); in cs_ioctl_signal_wait()
913 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait()
914 cs->signal_fence = sig_fence; in cs_ioctl_signal_wait()
916 hl_debugfs_add_cs(cs); in cs_ioctl_signal_wait()
918 *cs_seq = cs->sequence; in cs_ioctl_signal_wait()
928 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait()
943 job->cs = cs; in cs_ioctl_signal_wait()
959 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_signal_wait()
961 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_signal_wait()
964 cs_get(cs); in cs_ioctl_signal_wait()
968 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_signal_wait()
973 ctx->asid, cs->sequence, rc); in cs_ioctl_signal_wait()
981 cs_rollback(hdev, cs); in cs_ioctl_signal_wait()
986 cs_put(cs); in cs_ioctl_signal_wait()