1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) Rockchip Electronics Co., Ltd.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Huang Lee <Putin.li@rock-chips.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "rve_job: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "rve_job.h"
11*4882a593Smuzhiyun #include "rve_fence.h"
12*4882a593Smuzhiyun #include "rve_reg.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct rve_job *
rve_scheduler_get_pending_job_list(struct rve_scheduler_t * scheduler)15*4882a593Smuzhiyun rve_scheduler_get_pending_job_list(struct rve_scheduler_t *scheduler)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun unsigned long flags;
18*4882a593Smuzhiyun struct rve_job *job;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun job = list_first_entry_or_null(&scheduler->todo_list,
23*4882a593Smuzhiyun struct rve_job, head);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun return job;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct rve_job *
rve_scheduler_get_running_job(struct rve_scheduler_t * scheduler)31*4882a593Smuzhiyun rve_scheduler_get_running_job(struct rve_scheduler_t *scheduler)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun unsigned long flags;
34*4882a593Smuzhiyun struct rve_job *job;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun job = scheduler->running_job;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return job;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
rve_scheduler_set_pid_info(struct rve_job * job,ktime_t now)45*4882a593Smuzhiyun static void rve_scheduler_set_pid_info(struct rve_job *job, ktime_t now)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
48*4882a593Smuzhiyun bool pid_match_flag = false;
49*4882a593Smuzhiyun ktime_t tmp = 0;
50*4882a593Smuzhiyun int pid_mark = 0, i;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun for (i = 0; i < RVE_MAX_PID_INFO; i++) {
55*4882a593Smuzhiyun if (scheduler->session.pid_info[i].pid == 0)
56*4882a593Smuzhiyun scheduler->session.pid_info[i].pid = job->pid;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (scheduler->session.pid_info[i].pid == job->pid) {
59*4882a593Smuzhiyun pid_match_flag = true;
60*4882a593Smuzhiyun scheduler->session.pid_info[i].hw_time_total +=
61*4882a593Smuzhiyun (job->hw_running_time - now);
62*4882a593Smuzhiyun break;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (!pid_match_flag) {
67*4882a593Smuzhiyun for (i = 0; i < RVE_MAX_PID_INFO; i++) {
68*4882a593Smuzhiyun if (i == 0) {
69*4882a593Smuzhiyun tmp = scheduler->session.pid_info[i].hw_time_total;
70*4882a593Smuzhiyun continue;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (tmp > scheduler->session.pid_info[i].hw_time_total)
74*4882a593Smuzhiyun pid_mark = i;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun scheduler->session.pid_info[pid_mark].pid = job->pid;
78*4882a593Smuzhiyun scheduler->session.pid_info[pid_mark].hw_time_total +=
79*4882a593Smuzhiyun ktime_us_delta(now, job->hw_running_time);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
rve_job_get_scheduler(struct rve_job * job)83*4882a593Smuzhiyun struct rve_scheduler_t *rve_job_get_scheduler(struct rve_job *job)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun return job->scheduler;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
rve_job_get_internal_ctx(struct rve_job * job)88*4882a593Smuzhiyun struct rve_internal_ctx_t *rve_job_get_internal_ctx(struct rve_job *job)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return job->ctx;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
rve_job_free(struct rve_job * job)93*4882a593Smuzhiyun static void rve_job_free(struct rve_job *job)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
96*4882a593Smuzhiyun if (job->out_fence)
97*4882a593Smuzhiyun dma_fence_put(job->out_fence);
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun free_page((unsigned long)job);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
rve_job_cleanup(struct rve_job * job)103*4882a593Smuzhiyun static int rve_job_cleanup(struct rve_job *job)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun ktime_t now = ktime_get();
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (DEBUGGER_EN(TIME)) {
108*4882a593Smuzhiyun pr_info("(pid:%d) job clean use time = %lld\n", job->pid,
109*4882a593Smuzhiyun ktime_us_delta(now, job->timestamp));
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun rve_job_free(job);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
rve_job_session_destroy(struct rve_session * session)116*4882a593Smuzhiyun void rve_job_session_destroy(struct rve_session *session)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct rve_scheduler_t *scheduler = NULL;
119*4882a593Smuzhiyun struct rve_job *job_pos, *job_q;
120*4882a593Smuzhiyun int i;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun unsigned long flags;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for (i = 0; i < rve_drvdata->num_of_scheduler; i++) {
125*4882a593Smuzhiyun scheduler = rve_drvdata->scheduler[i];
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
130*4882a593Smuzhiyun if (session == job_pos->session) {
131*4882a593Smuzhiyun list_del(&job_pos->head);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun rve_job_free(job_pos);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
rve_job_alloc(struct rve_internal_ctx_t * ctx)145*4882a593Smuzhiyun static struct rve_job *rve_job_alloc(struct rve_internal_ctx_t *ctx)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct rve_job *job = NULL;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun job = (struct rve_job *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
150*4882a593Smuzhiyun if (!job)
151*4882a593Smuzhiyun return NULL;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
154*4882a593Smuzhiyun spin_lock_init(&job->fence_lock);
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun INIT_LIST_HEAD(&job->head);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun job->timestamp = ktime_get();
159*4882a593Smuzhiyun job->pid = current->pid;
160*4882a593Smuzhiyun job->regcmd_data = &ctx->regcmd_data[ctx->running_job_count];
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun job->scheduler = rve_drvdata->scheduler[0];
163*4882a593Smuzhiyun job->core = rve_drvdata->scheduler[0]->core;
164*4882a593Smuzhiyun job->ctx = ctx;
165*4882a593Smuzhiyun ctx->scheduler = job->scheduler;
166*4882a593Smuzhiyun job->session = ctx->session;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (ctx->priority > 0) {
169*4882a593Smuzhiyun if (ctx->priority > RVE_SCHED_PRIORITY_MAX)
170*4882a593Smuzhiyun job->priority = RVE_SCHED_PRIORITY_MAX;
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun job->priority = ctx->priority;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun return job;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
rve_job_dump_info(struct rve_job * job)178*4882a593Smuzhiyun static void rve_job_dump_info(struct rve_job *job)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun pr_info("job: priority = %d, core = %d\n",
181*4882a593Smuzhiyun job->priority, job->core);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
rve_job_run(struct rve_job * job)184*4882a593Smuzhiyun static int rve_job_run(struct rve_job *job)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
187*4882a593Smuzhiyun int ret = 0;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #ifndef RVE_PD_AWAYS_ON
192*4882a593Smuzhiyun /* enable power */
193*4882a593Smuzhiyun ret = rve_power_enable(scheduler);
194*4882a593Smuzhiyun if (ret < 0) {
195*4882a593Smuzhiyun pr_err("power enable failed");
196*4882a593Smuzhiyun return ret;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ret = scheduler->ops->init_reg(job);
201*4882a593Smuzhiyun if (ret < 0) {
202*4882a593Smuzhiyun pr_err("init reg failed");
203*4882a593Smuzhiyun goto failed;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun ret = scheduler->ops->set_reg(job, scheduler);
207*4882a593Smuzhiyun if (ret < 0) {
208*4882a593Smuzhiyun pr_err("set reg failed");
209*4882a593Smuzhiyun goto failed;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* for debug */
213*4882a593Smuzhiyun if (DEBUGGER_EN(MSG))
214*4882a593Smuzhiyun rve_job_dump_info(job);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return ret;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun failed:
219*4882a593Smuzhiyun #ifndef RVE_PD_AWAYS_ON
220*4882a593Smuzhiyun rve_power_disable(scheduler);
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return ret;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
rve_job_next(struct rve_scheduler_t * scheduler)226*4882a593Smuzhiyun static void rve_job_next(struct rve_scheduler_t *scheduler)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct rve_job *job = NULL;
229*4882a593Smuzhiyun unsigned long flags;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun next_job:
232*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (scheduler->running_job ||
235*4882a593Smuzhiyun list_empty(&scheduler->todo_list)) {
236*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
237*4882a593Smuzhiyun return;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun job = list_first_entry(&scheduler->todo_list, struct rve_job, head);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun list_del_init(&job->head);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun scheduler->job_count--;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun scheduler->running_job = job;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun job->ret = rve_job_run(job);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* If some error before hw run */
253*4882a593Smuzhiyun if (job->ret < 0) {
254*4882a593Smuzhiyun pr_err("some error on rve_job_run before hw start, %s(%d)\n",
255*4882a593Smuzhiyun __func__, __LINE__);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun scheduler->running_job = NULL;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun rve_internal_ctx_signal(job);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun goto next_job;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
rve_job_finish_and_next(struct rve_job * job,int ret)269*4882a593Smuzhiyun static void rve_job_finish_and_next(struct rve_job *job, int ret)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun ktime_t now = ktime_get();
272*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun job->ret = ret;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (DEBUGGER_EN(TIME)) {
279*4882a593Smuzhiyun pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
280*4882a593Smuzhiyun pr_info("(pid:%d) job done use time = %lld\n", job->pid,
281*4882a593Smuzhiyun ktime_us_delta(now, job->timestamp));
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun rve_internal_ctx_signal(job);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun rve_job_next(scheduler);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun #ifndef RVE_PD_AWAYS_ON
289*4882a593Smuzhiyun rve_power_disable(scheduler);
290*4882a593Smuzhiyun #endif
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
rve_job_done(struct rve_scheduler_t * scheduler,int ret)293*4882a593Smuzhiyun void rve_job_done(struct rve_scheduler_t *scheduler, int ret)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct rve_job *job;
296*4882a593Smuzhiyun unsigned long flags;
297*4882a593Smuzhiyun u32 error_flag;
298*4882a593Smuzhiyun uint32_t *cmd_reg;
299*4882a593Smuzhiyun int i;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun ktime_t now = ktime_get();
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun job = scheduler->running_job;
306*4882a593Smuzhiyun scheduler->running_job = NULL;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun rve_scheduler_set_pid_info(job, now);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun spin_lock_irqsave(&job->ctx->lock, flags);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun job->ctx->debug_info.max_cost_time_per_sec =
317*4882a593Smuzhiyun max(job->ctx->debug_info.last_job_hw_use_time,
318*4882a593Smuzhiyun job->ctx->debug_info.max_cost_time_per_sec);
319*4882a593Smuzhiyun job->ctx->debug_info.last_job_hw_use_time = ktime_us_delta(now, job->hw_running_time);
320*4882a593Smuzhiyun job->ctx->debug_info.hw_time_total += job->ctx->debug_info.last_job_hw_use_time;
321*4882a593Smuzhiyun job->ctx->debug_info.last_job_use_time = ktime_us_delta(now, job->timestamp);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun spin_unlock_irqrestore(&job->ctx->lock, flags);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* record CFG REG copy to user */
326*4882a593Smuzhiyun cmd_reg = job->regcmd_data->cmd_reg;
327*4882a593Smuzhiyun for (i = 0; i < 40; i++)
328*4882a593Smuzhiyun cmd_reg[18 + i] = rve_read(RVE_CFG_REG + i * 4, scheduler);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun error_flag = rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun rve_get_monitor_info(job);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (DEBUGGER_EN(MSG))
335*4882a593Smuzhiyun pr_info("irq thread work_status[%.8x]\n", error_flag);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* disable llp enable, TODO: support pause mode */
338*4882a593Smuzhiyun rve_write(0, RVE_SWLTB3_ENABLE, scheduler);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun rve_job_finish_and_next(job, ret);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
rve_job_timeout_clean(struct rve_scheduler_t * scheduler)343*4882a593Smuzhiyun static void rve_job_timeout_clean(struct rve_scheduler_t *scheduler)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun unsigned long flags;
346*4882a593Smuzhiyun struct rve_job *job = NULL;
347*4882a593Smuzhiyun ktime_t now = ktime_get();
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun job = scheduler->running_job;
352*4882a593Smuzhiyun if (job && (job->flags & RVE_ASYNC) &&
353*4882a593Smuzhiyun (ktime_to_ms(ktime_sub(now, job->hw_running_time)) >= RVE_ASYNC_TIMEOUT_DELAY)) {
354*4882a593Smuzhiyun scheduler->running_job = NULL;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun scheduler->ops->soft_reset(scheduler);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun rve_internal_ctx_signal(job);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun #ifndef RVE_PD_AWAYS_ON
363*4882a593Smuzhiyun rve_power_disable(scheduler);
364*4882a593Smuzhiyun #endif
365*4882a593Smuzhiyun } else {
366*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
rve_job_schedule(struct rve_job * job)370*4882a593Smuzhiyun static struct rve_scheduler_t *rve_job_schedule(struct rve_job *job)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun unsigned long flags;
373*4882a593Smuzhiyun struct rve_scheduler_t *scheduler = NULL;
374*4882a593Smuzhiyun struct rve_job *job_pos;
375*4882a593Smuzhiyun bool first_match = 0;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
378*4882a593Smuzhiyun if (scheduler == NULL) {
379*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
380*4882a593Smuzhiyun return NULL;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Only async will timeout clean */
384*4882a593Smuzhiyun rve_job_timeout_clean(scheduler);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* priority policy set by userspace */
389*4882a593Smuzhiyun if (list_empty(&scheduler->todo_list)
390*4882a593Smuzhiyun || (job->priority == RVE_SCHED_PRIORITY_DEFAULT)) {
391*4882a593Smuzhiyun list_add_tail(&job->head, &scheduler->todo_list);
392*4882a593Smuzhiyun } else {
393*4882a593Smuzhiyun list_for_each_entry(job_pos, &scheduler->todo_list, head) {
394*4882a593Smuzhiyun if (job->priority > job_pos->priority &&
395*4882a593Smuzhiyun (!first_match)) {
396*4882a593Smuzhiyun list_add(&job->head, &job_pos->head);
397*4882a593Smuzhiyun first_match = true;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Increase the priority of subsequent tasks
402*4882a593Smuzhiyun * after inserting into the list
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun if (first_match)
405*4882a593Smuzhiyun job_pos->priority++;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (!first_match)
409*4882a593Smuzhiyun list_add_tail(&job->head, &scheduler->todo_list);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun scheduler->job_count++;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun rve_job_next(scheduler);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return scheduler;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
rve_job_abort_running(struct rve_job * job)421*4882a593Smuzhiyun static void rve_job_abort_running(struct rve_job *job)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun unsigned long flags;
424*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* invalid job */
431*4882a593Smuzhiyun if (job == scheduler->running_job)
432*4882a593Smuzhiyun scheduler->running_job = NULL;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun rve_job_cleanup(job);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
rve_job_abort_invalid(struct rve_job * job)439*4882a593Smuzhiyun static void rve_job_abort_invalid(struct rve_job *job)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun rve_job_cleanup(job);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
rve_job_wait(struct rve_job * job)444*4882a593Smuzhiyun static inline int rve_job_wait(struct rve_job *job)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun int left_time;
449*4882a593Smuzhiyun ktime_t now;
450*4882a593Smuzhiyun int ret;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun left_time = wait_event_timeout(scheduler->job_done_wq,
455*4882a593Smuzhiyun job->ctx->finished_job_count == job->ctx->cmd_num,
456*4882a593Smuzhiyun RVE_SYNC_TIMEOUT_DELAY * job->ctx->cmd_num);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun switch (left_time) {
459*4882a593Smuzhiyun case 0:
460*4882a593Smuzhiyun pr_err("%s timeout", __func__);
461*4882a593Smuzhiyun scheduler->ops->soft_reset(scheduler);
462*4882a593Smuzhiyun ret = -EBUSY;
463*4882a593Smuzhiyun break;
464*4882a593Smuzhiyun case -ERESTARTSYS:
465*4882a593Smuzhiyun ret = -ERESTARTSYS;
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun default:
468*4882a593Smuzhiyun ret = 0;
469*4882a593Smuzhiyun break;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun now = ktime_get();
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (DEBUGGER_EN(TIME))
475*4882a593Smuzhiyun pr_info("%s use time = %lld\n", __func__,
476*4882a593Smuzhiyun ktime_to_us(ktime_sub(now, job->hw_running_time)));
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
rve_job_input_fence_signaled(struct dma_fence * fence,struct dma_fence_cb * _waiter)482*4882a593Smuzhiyun static void rve_job_input_fence_signaled(struct dma_fence *fence,
483*4882a593Smuzhiyun struct dma_fence_cb *_waiter)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct rve_fence_waiter *waiter = (struct rve_fence_waiter *)_waiter;
486*4882a593Smuzhiyun struct rve_scheduler_t *scheduler = NULL;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun ktime_t now;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun now = ktime_get();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (DEBUGGER_EN(TIME))
493*4882a593Smuzhiyun pr_err("rve job wait in_fence signal use time = %lld\n",
494*4882a593Smuzhiyun ktime_to_us(ktime_sub(now, waiter->job->timestamp)));
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun scheduler = rve_job_schedule(waiter->job);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (scheduler == NULL)
499*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun kfree(waiter);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun
rve_job_config_by_user_ctx(struct rve_user_ctx_t * user_ctx)505*4882a593Smuzhiyun int rve_job_config_by_user_ctx(struct rve_user_ctx_t *user_ctx)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager;
508*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
509*4882a593Smuzhiyun int ret = 0;
510*4882a593Smuzhiyun unsigned long flags;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ctx_manager = rve_drvdata->pend_ctx_manager;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun ctx = rve_internal_ctx_lookup(ctx_manager, user_ctx->id);
515*4882a593Smuzhiyun if (IS_ERR_OR_NULL(ctx)) {
516*4882a593Smuzhiyun pr_err("can not find internal ctx from id[%d]", user_ctx->id);
517*4882a593Smuzhiyun return -EINVAL;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (ctx->is_running) {
523*4882a593Smuzhiyun pr_err("can not re-config when ctx is running");
524*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
525*4882a593Smuzhiyun return -EFAULT;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* TODO: user cmd_num */
531*4882a593Smuzhiyun user_ctx->cmd_num = 1;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (ctx->regcmd_data == NULL) {
534*4882a593Smuzhiyun ctx->regcmd_data = kmalloc_array(user_ctx->cmd_num,
535*4882a593Smuzhiyun sizeof(struct rve_cmd_reg_array_t), GFP_KERNEL);
536*4882a593Smuzhiyun if (ctx->regcmd_data == NULL) {
537*4882a593Smuzhiyun pr_err("regcmd_data alloc error!\n");
538*4882a593Smuzhiyun return -ENOMEM;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (unlikely(copy_from_user(ctx->regcmd_data,
543*4882a593Smuzhiyun u64_to_user_ptr(user_ctx->regcmd_data),
544*4882a593Smuzhiyun sizeof(struct rve_cmd_reg_array_t) * user_ctx->cmd_num))) {
545*4882a593Smuzhiyun pr_err("regcmd_data copy_from_user failed\n");
546*4882a593Smuzhiyun ret = -EFAULT;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun goto err_free_regcmd_data;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun ctx->sync_mode = user_ctx->sync_mode;
552*4882a593Smuzhiyun ctx->cmd_num = user_ctx->cmd_num;
553*4882a593Smuzhiyun ctx->priority = user_ctx->priority;
554*4882a593Smuzhiyun ctx->in_fence_fd = user_ctx->in_fence_fd;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* TODO: cmd addr */
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun return ret;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun err_free_regcmd_data:
561*4882a593Smuzhiyun kfree(ctx->regcmd_data);
562*4882a593Smuzhiyun return ret;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
rve_job_commit_by_user_ctx(struct rve_user_ctx_t * user_ctx)565*4882a593Smuzhiyun int rve_job_commit_by_user_ctx(struct rve_user_ctx_t *user_ctx)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager;
568*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
569*4882a593Smuzhiyun int ret = 0;
570*4882a593Smuzhiyun unsigned long flags;
571*4882a593Smuzhiyun int i;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun ctx_manager = rve_drvdata->pend_ctx_manager;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun ctx = rve_internal_ctx_lookup(ctx_manager, user_ctx->id);
576*4882a593Smuzhiyun if (IS_ERR_OR_NULL(ctx)) {
577*4882a593Smuzhiyun pr_err("can not find internal ctx from id[%d]", user_ctx->id);
578*4882a593Smuzhiyun return -EINVAL;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (ctx->is_running) {
584*4882a593Smuzhiyun pr_err("can not re-config when ctx is running");
585*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
586*4882a593Smuzhiyun return -EFAULT;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* Reset */
590*4882a593Smuzhiyun ctx->finished_job_count = 0;
591*4882a593Smuzhiyun ctx->running_job_count = 0;
592*4882a593Smuzhiyun ctx->is_running = true;
593*4882a593Smuzhiyun ctx->disable_auto_cancel = user_ctx->disable_auto_cancel;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun ctx->sync_mode = user_ctx->sync_mode;
596*4882a593Smuzhiyun if (ctx->sync_mode == 0)
597*4882a593Smuzhiyun ctx->sync_mode = RVE_SYNC;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun for (i = 0; i < ctx->cmd_num; i++) {
602*4882a593Smuzhiyun ret = rve_job_commit(ctx);
603*4882a593Smuzhiyun if (ret < 0) {
604*4882a593Smuzhiyun pr_err("rve_job_commit failed, i = %d\n", i);
605*4882a593Smuzhiyun return -EFAULT;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun ctx->running_job_count++;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun user_ctx->out_fence_fd = ctx->out_fence_fd;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (unlikely(copy_to_user(u64_to_user_ptr(user_ctx->regcmd_data),
614*4882a593Smuzhiyun ctx->regcmd_data,
615*4882a593Smuzhiyun sizeof(struct rve_cmd_reg_array_t) * ctx->cmd_num))) {
616*4882a593Smuzhiyun pr_err("ctx->regcmd_data copy_to_user failed\n");
617*4882a593Smuzhiyun return -EFAULT;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (!ctx->disable_auto_cancel && ctx->sync_mode == RVE_SYNC)
621*4882a593Smuzhiyun kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return ret;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
rve_job_cancel_by_user_ctx(uint32_t ctx_id)626*4882a593Smuzhiyun int rve_job_cancel_by_user_ctx(uint32_t ctx_id)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager;
629*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
630*4882a593Smuzhiyun int ret = 0;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun ctx_manager = rve_drvdata->pend_ctx_manager;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun ctx = rve_internal_ctx_lookup(ctx_manager, ctx_id);
635*4882a593Smuzhiyun if (IS_ERR_OR_NULL(ctx)) {
636*4882a593Smuzhiyun pr_err("can not find internal ctx from id[%d]", ctx_id);
637*4882a593Smuzhiyun return -EINVAL;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return ret;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
rve_job_commit(struct rve_internal_ctx_t * ctx)645*4882a593Smuzhiyun int rve_job_commit(struct rve_internal_ctx_t *ctx)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct rve_job *job = NULL;
648*4882a593Smuzhiyun struct rve_scheduler_t *scheduler = NULL;
649*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
650*4882a593Smuzhiyun struct dma_fence *in_fence;
651*4882a593Smuzhiyun #endif
652*4882a593Smuzhiyun int ret = 0;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun job = rve_job_alloc(ctx);
655*4882a593Smuzhiyun if (!job) {
656*4882a593Smuzhiyun pr_err("failed to alloc rve job!\n");
657*4882a593Smuzhiyun return -ENOMEM;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (ctx->sync_mode == RVE_ASYNC) {
661*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
662*4882a593Smuzhiyun job->flags |= RVE_ASYNC;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (!ctx->out_fence) {
665*4882a593Smuzhiyun ret = rve_out_fence_alloc(job);
666*4882a593Smuzhiyun if (ret) {
667*4882a593Smuzhiyun rve_job_free(job);
668*4882a593Smuzhiyun return ret;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun ctx->out_fence = job->out_fence;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun ctx->out_fence_fd = rve_out_fence_get_fd(job);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (ctx->out_fence_fd < 0)
677*4882a593Smuzhiyun pr_err("out fence get fd failed");
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (DEBUGGER_EN(MSG))
680*4882a593Smuzhiyun pr_info("in_fence_fd = %d", ctx->in_fence_fd);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* if input fence is valiable */
683*4882a593Smuzhiyun if (ctx->in_fence_fd > 0) {
684*4882a593Smuzhiyun in_fence = rve_get_input_fence(
685*4882a593Smuzhiyun ctx->in_fence_fd);
686*4882a593Smuzhiyun if (!in_fence) {
687*4882a593Smuzhiyun pr_err("%s: failed to get input dma_fence\n",
688*4882a593Smuzhiyun __func__);
689*4882a593Smuzhiyun rve_job_free(job);
690*4882a593Smuzhiyun return ret;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* close input fence fd */
694*4882a593Smuzhiyun ksys_close(ctx->in_fence_fd);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ret = dma_fence_get_status(in_fence);
697*4882a593Smuzhiyun /* ret = 1: fence has been signaled */
698*4882a593Smuzhiyun if (ret == 1) {
699*4882a593Smuzhiyun scheduler = rve_job_schedule(job);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (scheduler == NULL) {
702*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n",
703*4882a593Smuzhiyun __func__, __LINE__);
704*4882a593Smuzhiyun goto invalid_job;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun /* if input fence is valid */
707*4882a593Smuzhiyun } else if (ret == 0) {
708*4882a593Smuzhiyun ret = rve_add_dma_fence_callback(job,
709*4882a593Smuzhiyun in_fence, rve_job_input_fence_signaled);
710*4882a593Smuzhiyun if (ret < 0) {
711*4882a593Smuzhiyun pr_err("%s: failed to add fence callback\n",
712*4882a593Smuzhiyun __func__);
713*4882a593Smuzhiyun rve_job_free(job);
714*4882a593Smuzhiyun return ret;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun } else {
717*4882a593Smuzhiyun pr_err("%s: fence status error\n", __func__);
718*4882a593Smuzhiyun rve_job_free(job);
719*4882a593Smuzhiyun return ret;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun } else {
722*4882a593Smuzhiyun scheduler = rve_job_schedule(job);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun if (scheduler == NULL) {
725*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n",
726*4882a593Smuzhiyun __func__, __LINE__);
727*4882a593Smuzhiyun goto invalid_job;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return ret;
732*4882a593Smuzhiyun #else
733*4882a593Smuzhiyun pr_err("can not support ASYNC mode, please enable CONFIG_SYNC_FILE");
734*4882a593Smuzhiyun return -EFAULT;
735*4882a593Smuzhiyun #endif
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* RVE_SYNC: wait until job finish */
738*4882a593Smuzhiyun } else if (ctx->sync_mode == RVE_SYNC) {
739*4882a593Smuzhiyun scheduler = rve_job_schedule(job);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (scheduler == NULL) {
742*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n", __func__,
743*4882a593Smuzhiyun __LINE__);
744*4882a593Smuzhiyun goto invalid_job;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun ret = job->ret;
748*4882a593Smuzhiyun if (ret < 0) {
749*4882a593Smuzhiyun pr_err("some error on job, %s(%d)\n", __func__,
750*4882a593Smuzhiyun __LINE__);
751*4882a593Smuzhiyun goto running_job_abort;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun ret = rve_job_wait(job);
755*4882a593Smuzhiyun if (ret < 0)
756*4882a593Smuzhiyun goto running_job_abort;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun rve_job_cleanup(job);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun return ret;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun invalid_job:
763*4882a593Smuzhiyun rve_job_abort_invalid(job);
764*4882a593Smuzhiyun return ret;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /* only used by SYNC mode */
767*4882a593Smuzhiyun running_job_abort:
768*4882a593Smuzhiyun rve_job_abort_running(job);
769*4882a593Smuzhiyun return ret;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun struct rve_internal_ctx_t *
rve_internal_ctx_lookup(struct rve_pending_ctx_manager * ctx_manager,uint32_t id)773*4882a593Smuzhiyun rve_internal_ctx_lookup(struct rve_pending_ctx_manager *ctx_manager, uint32_t id)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx = NULL;
776*4882a593Smuzhiyun unsigned long flags;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun spin_lock_irqsave(&ctx_manager->lock, flags);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ctx = idr_find(&ctx_manager->ctx_id_idr, id);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx_manager->lock, flags);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (ctx == NULL)
785*4882a593Smuzhiyun pr_err("can not find internal ctx from id[%d]", id);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return ctx;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Called at driver close to release the internal ctx's id references.
792*4882a593Smuzhiyun */
rve_internal_ctx_free_remove_idr_cb(int id,void * ptr,void * data)793*4882a593Smuzhiyun static int rve_internal_ctx_free_remove_idr_cb(int id, void *ptr, void *data)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx = ptr;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun idr_remove(&rve_drvdata->pend_ctx_manager->ctx_id_idr, ctx->id);
798*4882a593Smuzhiyun kfree(ctx);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun return 0;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
rve_internal_ctx_free_remove_idr(struct rve_internal_ctx_t * ctx)803*4882a593Smuzhiyun static int rve_internal_ctx_free_remove_idr(struct rve_internal_ctx_t *ctx)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager;
806*4882a593Smuzhiyun unsigned long flags;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun ctx_manager = rve_drvdata->pend_ctx_manager;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun spin_lock_irqsave(&ctx_manager->lock, flags);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun ctx_manager->ctx_count--;
813*4882a593Smuzhiyun idr_remove(&ctx_manager->ctx_id_idr, ctx->id);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx_manager->lock, flags);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun kfree(ctx);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
rve_internal_ctx_signal(struct rve_job * job)822*4882a593Smuzhiyun int rve_internal_ctx_signal(struct rve_job *job)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
825*4882a593Smuzhiyun struct rve_scheduler_t *scheduler;
826*4882a593Smuzhiyun int finished_job_count;
827*4882a593Smuzhiyun unsigned long flags;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun scheduler = rve_job_get_scheduler(job);
830*4882a593Smuzhiyun if (scheduler == NULL) {
831*4882a593Smuzhiyun pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
832*4882a593Smuzhiyun return -EFAULT;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun ctx = rve_job_get_internal_ctx(job);
836*4882a593Smuzhiyun if (IS_ERR_OR_NULL(ctx)) {
837*4882a593Smuzhiyun pr_err("can not find internal ctx");
838*4882a593Smuzhiyun return -EINVAL;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun ctx->regcmd_data = job->regcmd_data;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun finished_job_count = ++ctx->finished_job_count;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun if (finished_job_count >= ctx->cmd_num) {
850*4882a593Smuzhiyun #ifdef CONFIG_SYNC_FILE
851*4882a593Smuzhiyun if (ctx->out_fence)
852*4882a593Smuzhiyun dma_fence_signal(ctx->out_fence);
853*4882a593Smuzhiyun #endif
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun job->flags |= RVE_JOB_DONE;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun wake_up(&scheduler->job_done_wq);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun ctx->is_running = false;
862*4882a593Smuzhiyun ctx->out_fence = NULL;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (job->flags & RVE_ASYNC) {
867*4882a593Smuzhiyun rve_job_cleanup(job);
868*4882a593Smuzhiyun if (!ctx->disable_auto_cancel)
869*4882a593Smuzhiyun kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun return 0;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
rve_internal_ctx_alloc_to_get_idr_id(struct rve_session * session)876*4882a593Smuzhiyun int rve_internal_ctx_alloc_to_get_idr_id(struct rve_session *session)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager;
879*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
880*4882a593Smuzhiyun unsigned long flags;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
883*4882a593Smuzhiyun if (ctx == NULL) {
884*4882a593Smuzhiyun pr_err("can not kzalloc for rve_pending_ctx_manager\n");
885*4882a593Smuzhiyun return -ENOMEM;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun ctx_manager = rve_drvdata->pend_ctx_manager;
889*4882a593Smuzhiyun if (ctx_manager == NULL) {
890*4882a593Smuzhiyun pr_err("rve_pending_ctx_manager is null!\n");
891*4882a593Smuzhiyun goto failed;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun spin_lock_init(&ctx->lock);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /*
897*4882a593Smuzhiyun * Get the user-visible handle using idr. Preload and perform
898*4882a593Smuzhiyun * allocation under our spinlock.
899*4882a593Smuzhiyun */
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun spin_lock_irqsave(&ctx_manager->lock, flags);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun ctx->id = idr_alloc(&ctx_manager->ctx_id_idr, ctx, 1, 0, GFP_ATOMIC);
906*4882a593Smuzhiyun if (ctx->id < 0) {
907*4882a593Smuzhiyun pr_err("idr_alloc failed");
908*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx_manager->lock, flags);
909*4882a593Smuzhiyun goto failed;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun ctx_manager->ctx_count++;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun ctx->debug_info.pid = current->pid;
915*4882a593Smuzhiyun ctx->debug_info.timestamp = ktime_get();
916*4882a593Smuzhiyun ctx->session = session;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx_manager->lock, flags);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun idr_preload_end();
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun ctx->regcmd_data = NULL;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun kref_init(&ctx->refcount);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun return ctx->id;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun failed:
929*4882a593Smuzhiyun kfree(ctx);
930*4882a593Smuzhiyun return -EFAULT;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
rve_internal_ctx_kref_release(struct kref * ref)933*4882a593Smuzhiyun void rve_internal_ctx_kref_release(struct kref *ref)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun struct rve_internal_ctx_t *ctx;
936*4882a593Smuzhiyun struct rve_scheduler_t *scheduler = NULL;
937*4882a593Smuzhiyun struct rve_job *job_pos, *job_q, *job;
938*4882a593Smuzhiyun int i;
939*4882a593Smuzhiyun bool need_reset = false;
940*4882a593Smuzhiyun unsigned long flags;
941*4882a593Smuzhiyun ktime_t now = ktime_get();
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun ctx = container_of(ref, struct rve_internal_ctx_t, refcount);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
946*4882a593Smuzhiyun if (!ctx->is_running || ctx->finished_job_count >= ctx->cmd_num) {
947*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
948*4882a593Smuzhiyun goto free_ctx;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun for (i = 0; i < rve_drvdata->num_of_scheduler; i++) {
953*4882a593Smuzhiyun scheduler = rve_drvdata->scheduler[i];
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun spin_lock_irqsave(&scheduler->irq_lock, flags);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
958*4882a593Smuzhiyun if (ctx->id == job_pos->ctx->id) {
959*4882a593Smuzhiyun job = job_pos;
960*4882a593Smuzhiyun list_del_init(&job_pos->head);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun scheduler->job_count--;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* for load */
967*4882a593Smuzhiyun if (scheduler->running_job) {
968*4882a593Smuzhiyun job = scheduler->running_job;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if (job->ctx->id == ctx->id) {
971*4882a593Smuzhiyun scheduler->running_job = NULL;
972*4882a593Smuzhiyun scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
973*4882a593Smuzhiyun need_reset = true;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun spin_unlock_irqrestore(&scheduler->irq_lock, flags);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun if (need_reset) {
980*4882a593Smuzhiyun pr_err("reset core[%d] by user cancel", scheduler->core);
981*4882a593Smuzhiyun scheduler->ops->soft_reset(scheduler);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun rve_job_finish_and_next(job, 0);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun free_ctx:
988*4882a593Smuzhiyun kfree(ctx->regcmd_data);
989*4882a593Smuzhiyun rve_internal_ctx_free_remove_idr(ctx);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
rve_ctx_manager_init(struct rve_pending_ctx_manager ** ctx_manager_session)992*4882a593Smuzhiyun int rve_ctx_manager_init(struct rve_pending_ctx_manager **ctx_manager_session)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager = NULL;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun *ctx_manager_session = kzalloc(sizeof(struct rve_pending_ctx_manager), GFP_KERNEL);
997*4882a593Smuzhiyun if (*ctx_manager_session == NULL) {
998*4882a593Smuzhiyun pr_err("can not kzalloc for rve_pending_ctx_manager\n");
999*4882a593Smuzhiyun return -ENOMEM;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun ctx_manager = *ctx_manager_session;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun spin_lock_init(&ctx_manager->lock);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun idr_init_base(&ctx_manager->ctx_id_idr, 1);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
rve_ctx_manager_remove(struct rve_pending_ctx_manager ** ctx_manager_session)1011*4882a593Smuzhiyun int rve_ctx_manager_remove(struct rve_pending_ctx_manager **ctx_manager_session)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun struct rve_pending_ctx_manager *ctx_manager = *ctx_manager_session;
1014*4882a593Smuzhiyun unsigned long flags;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun spin_lock_irqsave(&ctx_manager->lock, flags);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun idr_for_each(&ctx_manager->ctx_id_idr, &rve_internal_ctx_free_remove_idr_cb, ctx_manager);
1019*4882a593Smuzhiyun idr_destroy(&ctx_manager->ctx_id_idr);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx_manager->lock, flags);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun kfree(*ctx_manager_session);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun *ctx_manager_session = NULL;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun return 0;
1028*4882a593Smuzhiyun }
1029