Home
last modified time | relevance | path

Searched full:job (Results 1 – 25 of 1261) sorted by relevance

12345678910>>...51

/OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/common/
H A Dmali_pp_job.h39 * This structure represents a PP job, including all sub jobs.
41 * The PP job object itself is not protected by any single lock,
43 * Think of the job object as moving between these sub systems through-out
44 * its lifetime. Different part of the PP job struct is used by different
55 struct mali_session_data *session; /**< Session which submitted this job */
58 …u32 id; /**< Identifier for this job in kernel space (s…
60 struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
61 …_mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job
63 u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
64 u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
[all …]
H A Dmali_soft_job.c24 MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system)); in mali_soft_job_system_lock()
32 MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system)); in mali_soft_job_system_unlock()
90 … void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job) in mali_soft_job_system_free_job() argument
92 MALI_DEBUG_ASSERT_POINTER(job); in mali_soft_job_system_free_job()
95 mali_soft_job_system_lock(job->system); in mali_soft_job_system_free_job()
97 MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); in mali_soft_job_system_free_job()
98 MALI_DEBUG_ASSERT(system == job->system); in mali_soft_job_system_free_job()
100 _mali_osk_list_del(&(job->system_list)); in mali_soft_job_system_free_job()
102 mali_soft_job_system_unlock(job->system); in mali_soft_job_system_free_job()
104 _mali_osk_free(job); in mali_soft_job_system_free_job()
[all …]
H A Dmali_gp_job.h26 * This structure represents a GP job
28 * The GP job object itself is not protected by any single lock,
30 * Think of the job object as moving between these sub systems through-out
31 * its lifetime. Different part of the GP job struct is used by different
42 struct mali_session_data *session; /**< Session which submitted this job */
45 …u32 id; /**< Identifier for this job in kernel space (s…
47 struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
48 …cker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
49 …_mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job
64 * Set by executor/group on job completion, read by scheduler when
[all …]
H A Dmali_pp_job.c43 struct mali_pp_job *job; in mali_pp_job_create() local
46 job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); in mali_pp_job_create()
47 if (NULL != job) { in mali_pp_job_create()
49 _mali_osk_list_init(&job->list); in mali_pp_job_create()
50 _mali_osk_list_init(&job->session_fb_lookup_list); in mali_pp_job_create()
53 if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { in mali_pp_job_create()
57 if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { in mali_pp_job_create()
58 MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); in mali_pp_job_create()
62 if (!mali_pp_job_use_no_notification(job)) { in mali_pp_job_create()
63job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(… in mali_pp_job_create()
[all …]
H A Dmali_scheduler.c42 * If dma_buf with map on demand is used, we defer job queue
86 struct mali_session_data *session, struct mali_gp_job *job);
88 struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point);
90 static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
91 static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
93 static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
96 static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
100 static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
202 * until the first virtual job is present. in mali_scheduler_job_physical_head_count()
206 struct mali_pp_job *job; in mali_scheduler_job_physical_head_count() local
[all …]
H A Dmali_gp_job.c20 static void _mali_gp_del_varying_allocations(struct mali_gp_job *job);
24 struct mali_gp_job *job, in _mali_gp_add_varying_allocations() argument
50 /* add to gp job varying alloc list*/ in _mali_gp_add_varying_allocations()
51 list_move(&alloc_node->node, &job->varying_alloc); in _mali_gp_add_varying_allocations()
59 _mali_gp_del_varying_allocations(job); in _mali_gp_add_varying_allocations()
64 static void _mali_gp_del_varying_allocations(struct mali_gp_job *job) in _mali_gp_del_varying_allocations() argument
68 list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { in _mali_gp_del_varying_allocations()
72 INIT_LIST_HEAD(&job->varying_alloc); in _mali_gp_del_varying_allocations()
77 struct mali_gp_job *job; in mali_gp_job_create() local
83 job = _mali_osk_calloc(1, sizeof(struct mali_gp_job)); in mali_gp_job_create()
[all …]
H A Dmali_soft_job.h24 * Soft job types.
29 * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
30 * in kernel when the job is activated.
38 * Soft job state.
40 …* mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JO…
41 * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
45 * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
47 * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
62 * Soft job struct.
64 * Soft job can be used to represent any kind of CPU work done in kernel-space.
[all …]
/OK3568_Linux_fs/kernel/drivers/rknpu/
H A Drknpu_job.c61 static int rknn_get_task_number(struct rknpu_job *job, int core_index) in rknn_get_task_number() argument
63 int task_num = job->args->task_number; in rknn_get_task_number()
65 if (job->use_core_num == 2) in rknn_get_task_number()
66 task_num = job->args->subcore_task[core_index].task_number; in rknn_get_task_number()
67 else if (job->use_core_num == 3) in rknn_get_task_number()
68 task_num = job->args->subcore_task[core_index + 2].task_number; in rknn_get_task_number()
73 static void rknpu_job_free(struct rknpu_job *job) in rknpu_job_free() argument
79 (struct rknpu_gem_object *)(uintptr_t)job->args->task_obj_addr; in rknpu_job_free()
84 if (job->fence) in rknpu_job_free()
85 dma_fence_put(job->fence); in rknpu_job_free()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/host1x/
H A Djob.c3 * Tegra host1x Job
21 #include "job.h"
29 struct host1x_job *job = NULL; in host1x_job_alloc() local
47 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
48 if (!job) in host1x_job_alloc()
51 kref_init(&job->ref); in host1x_job_alloc()
52 job->channel = ch; in host1x_job_alloc()
56 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
58 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
60 job->gathers = num_cmdbufs ? mem : NULL; in host1x_job_alloc()
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rve/
H A Drve_job.c18 struct rve_job *job; in rve_scheduler_get_pending_job_list() local
22 job = list_first_entry_or_null(&scheduler->todo_list, in rve_scheduler_get_pending_job_list()
27 return job; in rve_scheduler_get_pending_job_list()
34 struct rve_job *job; in rve_scheduler_get_running_job() local
38 job = scheduler->running_job; in rve_scheduler_get_running_job()
42 return job; in rve_scheduler_get_running_job()
45 static void rve_scheduler_set_pid_info(struct rve_job *job, ktime_t now) in rve_scheduler_set_pid_info() argument
52 scheduler = rve_job_get_scheduler(job); in rve_scheduler_set_pid_info()
56 scheduler->session.pid_info[i].pid = job->pid; in rve_scheduler_set_pid_info()
58 if (scheduler->session.pid_info[i].pid == job->pid) { in rve_scheduler_set_pid_info()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c34 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
41 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
47 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
49 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
55 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout()
64 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument
73 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc()
74 if (!*job) in amdgpu_job_alloc()
81 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
82 (*job)->vm = vm; in amdgpu_job_alloc()
[all …]
/OK3568_Linux_fs/kernel/block/
H A Dbsg-lib.c39 struct bsg_job *job = blk_mq_rq_to_pdu(rq); in bsg_transport_fill_hdr() local
42 job->request_len = hdr->request_len; in bsg_transport_fill_hdr()
43 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_fill_hdr()
44 if (IS_ERR(job->request)) in bsg_transport_fill_hdr()
45 return PTR_ERR(job->request); in bsg_transport_fill_hdr()
48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); in bsg_transport_fill_hdr()
49 if (IS_ERR(job->bidi_rq)) { in bsg_transport_fill_hdr()
50 ret = PTR_ERR(job->bidi_rq); in bsg_transport_fill_hdr()
54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_fill_hdr()
60 job->bidi_bio = job->bidi_rq->bio; in bsg_transport_fill_hdr()
[all …]
/OK3568_Linux_fs/kernel/drivers/md/
H A Ddm-kcopyd.c42 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
461 * Error state of the job.
481 * Set this to ensure you are notified when the job has
488 * These fields are only used if the job has been split
525 * Functions to push and pop a job onto the head of a given job
531 struct kcopyd_job *job; in pop_io_job() local
537 list_for_each_entry(job, jobs, list) { in pop_io_job()
538 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { in pop_io_job()
539 list_del(&job->list); in pop_io_job()
540 return job; in pop_io_job()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/v3d/
H A Dv3d_sched.c10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local
63 v3d_job_put(job); in v3d_job_free()
67 * Returns the fences that the job depends on, one by one.
76 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local
82 if (!xa_empty(&job->deps)) in v3d_job_dependency()
83 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency()
90 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
91 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
[all …]
H A Dv3d_gem.c167 * need to wait for completion before dispatching the job -- in v3d_flush_l2t()
171 * synchronously clean after a job. in v3d_flush_l2t()
184 * signaling job completion. So, we synchronously wait before
250 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
255 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
259 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
260 ret = drm_gem_fence_array_add_implicit(&job->deps, in v3d_lock_bo_reservations()
261 job->bo[i], true); in v3d_lock_bo_reservations()
263 drm_gem_unlock_reservations(job->bo, job->bo_count, in v3d_lock_bo_reservations()
273 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
[all …]
/OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/
H A Dphl_cmd_job.c37 struct phl_cmd_job *job = (struct phl_cmd_job *)msg->param; in cmd_discard_msg_job() local
40 if (job->id == JOB_RUN_FUNC && in cmd_discard_msg_job()
41 (job->u.cmd.fptr || job->u.cmd.func)) { in cmd_discard_msg_job()
44 job_name(pcmd, (u8)job->id), in cmd_discard_msg_job()
45 (char *)job->u.cmd.name); in cmd_discard_msg_job()
47 /* we have to callback to core to free job in cmd_discard_msg_job()
50 job->u.cmd.fptr(job->u.cmd.priv, job->u.cmd.parm, true); in cmd_discard_msg_job()
54 job_name(pcmd, (u8)job->id)); in cmd_discard_msg_job()
60 enum rtw_phl_status rtw_phl_job_fill_fptr(void *phl, struct phl_cmd_job *job, in rtw_phl_job_fill_fptr() argument
66 job->id = JOB_RUN_FUNC; in rtw_phl_job_fill_fptr()
[all …]
H A Dphl_cmd_fsm.c117 struct phl_cmd_job *job; in cmd_do_wdog_job() local
124 phl_list_for_loop(job, struct phl_cmd_job, &pcmd->wd_q, list) { in cmd_do_wdog_job()
125 job->u.cmd.fptr(job->u.cmd.priv, job->u.cmd.parm, false); in cmd_do_wdog_job()
130 job_name(pcmd, (u8)job->id), in cmd_do_wdog_job()
131 (char *)job->u.cmd.name); in cmd_do_wdog_job()
156 * This state has no power; Able to run no_io job
157 * For jobs don't need to request power (no_io job)
266 /* enqueue watchdog job */ in cmd_req_pwr_st_hdl()
287 struct phl_cmd_job *job; in cmd_service_st_hdl() local
301 job = (struct phl_cmd_job *)param; in cmd_service_st_hdl()
[all …]
/OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/
H A Dphl_cmd_job.c37 struct phl_cmd_job *job = (struct phl_cmd_job *)msg->param; in cmd_discard_msg_job() local
40 if (job->id == JOB_RUN_FUNC && in cmd_discard_msg_job()
41 (job->u.cmd.fptr || job->u.cmd.func)) { in cmd_discard_msg_job()
44 job_name(pcmd, (u8)job->id), in cmd_discard_msg_job()
45 (char *)job->u.cmd.name); in cmd_discard_msg_job()
47 /* we have to callback to core to free job in cmd_discard_msg_job()
50 job->u.cmd.fptr(job->u.cmd.priv, job->u.cmd.parm, true); in cmd_discard_msg_job()
54 job_name(pcmd, (u8)job->id)); in cmd_discard_msg_job()
60 enum rtw_phl_status rtw_phl_job_fill_fptr(void *phl, struct phl_cmd_job *job, in rtw_phl_job_fill_fptr() argument
66 job->id = JOB_RUN_FUNC; in rtw_phl_job_fill_fptr()
[all …]
H A Dphl_cmd_fsm.c117 struct phl_cmd_job *job; in cmd_do_wdog_job() local
124 phl_list_for_loop(job, struct phl_cmd_job, &pcmd->wd_q, list) { in cmd_do_wdog_job()
125 job->u.cmd.fptr(job->u.cmd.priv, job->u.cmd.parm, false); in cmd_do_wdog_job()
130 job_name(pcmd, (u8)job->id), in cmd_do_wdog_job()
131 (char *)job->u.cmd.name); in cmd_do_wdog_job()
156 * This state has no power; Able to run no_io job
157 * For jobs don't need to request power (no_io job)
266 /* enqueue watchdog job */ in cmd_req_pwr_st_hdl()
287 struct phl_cmd_job *job; in cmd_service_st_hdl() local
301 job = (struct phl_cmd_job *)param; in cmd_service_st_hdl()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/panfrost/
H A Dpanfrost_job.c112 static int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
118 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
125 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
151 static void panfrost_job_hw_submit(struct panfrost_job *job, int js) in panfrost_job_hw_submit() argument
153 struct panfrost_device *pfdev = job->pfdev; in panfrost_job_hw_submit()
155 u64 jc_head = job->jc; in panfrost_job_hw_submit()
168 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); in panfrost_job_hw_submit()
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/
H A Drga_job.c17 static void rga_job_free(struct rga_job *job) in rga_job_free() argument
19 free_page((unsigned long)job); in rga_job_free()
24 struct rga_job *job; in rga_job_kref_release() local
26 job = container_of(ref, struct rga_job, refcount); in rga_job_kref_release()
28 rga_job_free(job); in rga_job_kref_release()
31 static int rga_job_put(struct rga_job *job) in rga_job_put() argument
33 return kref_put(&job->refcount, rga_job_kref_release); in rga_job_put()
36 static void rga_job_get(struct rga_job *job) in rga_job_get() argument
38 kref_get(&job->refcount); in rga_job_get()
41 static int rga_job_cleanup(struct rga_job *job) in rga_job_cleanup() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_replay.c20 * Replay soft job handlers
68 struct job_descriptor_header *job) in dump_job_head() argument
86 job, job->exception_status, in dump_job_head()
87 JOB_SOURCE_ID(job->exception_status), in dump_job_head()
88 (job->exception_status >> 8) & 0x3, in dump_job_head()
89 job->exception_status & 0xFF, in dump_job_head()
90 job->first_incomplete_task, in dump_job_head()
91 job->fault_pointer, job->job_descriptor_size, in dump_job_head()
92 job->job_type, job->job_barrier, job->_reserved_01, in dump_job_head()
93 job->_reserved_02, job->_reserved_03, in dump_job_head()
[all …]
/OK3568_Linux_fs/kernel/drivers/misc/habanalabs/common/
H A Dcommand_submission.c123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) in is_cb_patched() argument
129 return (job->queue_type == QUEUE_TYPE_EXT || in is_cb_patched()
130 (job->queue_type == QUEUE_TYPE_HW && in is_cb_patched()
131 job->is_kernel_allocated_cb && in is_cb_patched()
139 * @job : pointer to the job that holds the command submission info
146 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) in cs_parser() argument
152 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
153 parser.cs_sequence = job->cs->sequence; in cs_parser()
154 parser.job_id = job->id; in cs_parser()
156 parser.hw_queue_id = job->hw_queue_id; in cs_parser()
[all …]
/OK3568_Linux_fs/kernel/include/uapi/gpu/arm/bifrost/jm/
H A Dmali_base_jm_kernel.h111 /* Private flag tracking whether job descriptor dumping is disabled */
134 * struct base_jd_udata - Per-job data
136 * @blob: per-job data array
138 * This structure is used to store per-job data, and is completely unused
140 * function pointer, data to handle job completion. It is guaranteed to be
148 * typedef base_jd_dep_type - Job dependency type.
164 * typedef base_jd_core_req - Job chain hardware requirements.
166 * A job chain must specify what GPU features it needs to allow the
167 * driver to schedule the job correctly. By not specifying the
168 * correct settings can/will cause an early job termination. Multiple
[all …]
/OK3568_Linux_fs/kernel/include/uapi/gpu/arm/bifrost/gpu/backend/
H A Dmali_kbase_gpu_regmap_jm.h29 /* Job control registers */
31 #define JS_HEAD_LO 0x00 /* (RO) Job queue head pointer for job slot n, low word */
32 #define JS_HEAD_HI 0x04 /* (RO) Job queue head pointer for job slot n, high word */
33 #define JS_TAIL_LO 0x08 /* (RO) Job queue tail pointer for job slot n, low word */
34 #define JS_TAIL_HI 0x0C /* (RO) Job queue tail pointer for job slot n, high word */
35 #define JS_AFFINITY_LO 0x10 /* (RO) Core affinity mask for job slot n, low word */
36 #define JS_AFFINITY_HI 0x14 /* (RO) Core affinity mask for job slot n, high word */
37 #define JS_CONFIG 0x18 /* (RO) Configuration settings for job slot n */
39 #define JS_HEAD_NEXT_LO 0x40 /* (RW) Next job queue head pointer for job slot n, low word */
40 #define JS_HEAD_NEXT_HI 0x44 /* (RW) Next job queue head pointer for job slot n, high word */
[all …]

12345678910>>...51