1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun * of such GNU licence.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun * Boston, MA 02110-1301, USA.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <mali_kbase.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #if defined(CONFIG_DMA_SHARED_BUFFER)
23*4882a593Smuzhiyun #include <linux/dma-buf.h>
24*4882a593Smuzhiyun #include <asm/cacheflush.h>
25*4882a593Smuzhiyun #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
26*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
27*4882a593Smuzhiyun #include <mali_kbase_sync.h>
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun #include <linux/dma-mapping.h>
30*4882a593Smuzhiyun #include <mali_base_kernel.h>
31*4882a593Smuzhiyun #include <mali_kbase_hwaccess_time.h>
32*4882a593Smuzhiyun #include <mali_kbase_mem_linux.h>
33*4882a593Smuzhiyun #include <mali_kbase_tlstream.h>
34*4882a593Smuzhiyun #include <linux/version.h>
35*4882a593Smuzhiyun #include <linux/ktime.h>
36*4882a593Smuzhiyun #include <linux/pfn.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Mask to check cache alignment of data structures */
40*4882a593Smuzhiyun #define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * @file mali_kbase_softjobs.c
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * This file implements the logic behind software only jobs that are
46*4882a593Smuzhiyun * executed within the driver rather than being handed over to the GPU.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
kbasep_add_waiting_soft_job(struct kbase_jd_atom * katom)49*4882a593Smuzhiyun static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
52*4882a593Smuzhiyun unsigned long lflags;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
55*4882a593Smuzhiyun list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
56*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
kbasep_remove_waiting_soft_job(struct kbase_jd_atom * katom)59*4882a593Smuzhiyun void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
62*4882a593Smuzhiyun unsigned long lflags;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
65*4882a593Smuzhiyun list_del(&katom->queue);
66*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
kbasep_add_waiting_with_timeout(struct kbase_jd_atom * katom)69*4882a593Smuzhiyun static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Record the start time of this atom so we could cancel it at
74*4882a593Smuzhiyun * the right time.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun katom->start_timestamp = ktime_get();
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Add the atom to the waiting list before the timer is
79*4882a593Smuzhiyun * (re)started to make sure that it gets processed.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun kbasep_add_waiting_soft_job(katom);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Schedule timeout of this atom after a period if it is not active */
84*4882a593Smuzhiyun if (!timer_pending(&kctx->soft_job_timeout)) {
85*4882a593Smuzhiyun int timeout_ms = atomic_read(
86*4882a593Smuzhiyun &kctx->kbdev->js_data.soft_job_timeout_ms);
87*4882a593Smuzhiyun mod_timer(&kctx->soft_job_timeout,
88*4882a593Smuzhiyun jiffies + msecs_to_jiffies(timeout_ms));
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
kbasep_read_soft_event_status(struct kbase_context * kctx,u64 evt,unsigned char * status)92*4882a593Smuzhiyun static int kbasep_read_soft_event_status(
93*4882a593Smuzhiyun struct kbase_context *kctx, u64 evt, unsigned char *status)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun unsigned char *mapped_evt;
96*4882a593Smuzhiyun struct kbase_vmap_struct map;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
99*4882a593Smuzhiyun if (!mapped_evt)
100*4882a593Smuzhiyun return -EFAULT;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun *status = *mapped_evt;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun kbase_vunmap(kctx, &map);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
kbasep_write_soft_event_status(struct kbase_context * kctx,u64 evt,unsigned char new_status)109*4882a593Smuzhiyun static int kbasep_write_soft_event_status(
110*4882a593Smuzhiyun struct kbase_context *kctx, u64 evt, unsigned char new_status)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned char *mapped_evt;
113*4882a593Smuzhiyun struct kbase_vmap_struct map;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
116*4882a593Smuzhiyun (new_status != BASE_JD_SOFT_EVENT_RESET))
117*4882a593Smuzhiyun return -EINVAL;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
120*4882a593Smuzhiyun if (!mapped_evt)
121*4882a593Smuzhiyun return -EFAULT;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun *mapped_evt = new_status;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun kbase_vunmap(kctx, &map);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
kbase_dump_cpu_gpu_time(struct kbase_jd_atom * katom)130*4882a593Smuzhiyun static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct kbase_vmap_struct map;
133*4882a593Smuzhiyun void *user_result;
134*4882a593Smuzhiyun struct timespec64 ts;
135*4882a593Smuzhiyun struct base_dump_cpu_gpu_counters data;
136*4882a593Smuzhiyun u64 system_time;
137*4882a593Smuzhiyun u64 cycle_counter;
138*4882a593Smuzhiyun u64 jc = katom->jc;
139*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
140*4882a593Smuzhiyun int pm_active_err;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun memset(&data, 0, sizeof(data));
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Take the PM active reference as late as possible - otherwise, it could
145*4882a593Smuzhiyun * delay suspend until we process the atom (which may be at the end of a
146*4882a593Smuzhiyun * long chain of dependencies */
147*4882a593Smuzhiyun pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
148*4882a593Smuzhiyun if (pm_active_err) {
149*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* We're suspended - queue this on the list of suspended jobs
152*4882a593Smuzhiyun * Use dep_item[1], because dep_item[0] was previously in use
153*4882a593Smuzhiyun * for 'waiting_soft_jobs'.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun mutex_lock(&js_devdata->runpool_mutex);
156*4882a593Smuzhiyun list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
157*4882a593Smuzhiyun mutex_unlock(&js_devdata->runpool_mutex);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Also adding this to the list of waiting soft job */
160*4882a593Smuzhiyun kbasep_add_waiting_soft_job(katom);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return pm_active_err;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
166*4882a593Smuzhiyun &ts);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun kbase_pm_context_idle(kctx->kbdev);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun data.sec = ts.tv_sec;
171*4882a593Smuzhiyun data.usec = ts.tv_nsec / 1000;
172*4882a593Smuzhiyun data.system_time = system_time;
173*4882a593Smuzhiyun data.cycle_counter = cycle_counter;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Assume this atom will be cancelled until we know otherwise */
176*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* GPU_WR access is checked on the range for returning the result to
179*4882a593Smuzhiyun * userspace for the following reasons:
180*4882a593Smuzhiyun * - security, this is currently how imported user bufs are checked.
181*4882a593Smuzhiyun * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
182*4882a593Smuzhiyun user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
183*4882a593Smuzhiyun if (!user_result)
184*4882a593Smuzhiyun return 0;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun memcpy(user_result, &data, sizeof(data));
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun kbase_vunmap(kctx, &map);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Atom was fine - mark it as done */
191*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_DONE;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
197*4882a593Smuzhiyun /* Called by the explicit fence mechanism when a fence wait has completed */
kbase_soft_event_wait_callback(struct kbase_jd_atom * katom)198*4882a593Smuzhiyun void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
203*4882a593Smuzhiyun kbasep_remove_waiting_soft_job(katom);
204*4882a593Smuzhiyun kbase_finish_soft_job(katom);
205*4882a593Smuzhiyun if (jd_done_nolock(katom, NULL))
206*4882a593Smuzhiyun kbase_js_sched_all(kctx->kbdev);
207*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun
kbasep_soft_event_complete_job(struct work_struct * work)211*4882a593Smuzhiyun static void kbasep_soft_event_complete_job(struct work_struct *work)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
214*4882a593Smuzhiyun work);
215*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
216*4882a593Smuzhiyun int resched;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
219*4882a593Smuzhiyun resched = jd_done_nolock(katom, NULL);
220*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (resched)
223*4882a593Smuzhiyun kbase_js_sched_all(kctx->kbdev);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
kbasep_complete_triggered_soft_events(struct kbase_context * kctx,u64 evt)226*4882a593Smuzhiyun void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun int cancel_timer = 1;
229*4882a593Smuzhiyun struct list_head *entry, *tmp;
230*4882a593Smuzhiyun unsigned long lflags;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
233*4882a593Smuzhiyun list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
234*4882a593Smuzhiyun struct kbase_jd_atom *katom = list_entry(
235*4882a593Smuzhiyun entry, struct kbase_jd_atom, queue);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
238*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_WAIT:
239*4882a593Smuzhiyun if (katom->jc == evt) {
240*4882a593Smuzhiyun list_del(&katom->queue);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_DONE;
243*4882a593Smuzhiyun INIT_WORK(&katom->work,
244*4882a593Smuzhiyun kbasep_soft_event_complete_job);
245*4882a593Smuzhiyun queue_work(kctx->jctx.job_done_wq,
246*4882a593Smuzhiyun &katom->work);
247*4882a593Smuzhiyun } else {
248*4882a593Smuzhiyun /* There are still other waiting jobs, we cannot
249*4882a593Smuzhiyun * cancel the timer yet.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun cancel_timer = 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun #ifdef CONFIG_MALI_FENCE_DEBUG
255*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
256*4882a593Smuzhiyun /* Keep the timer running if fence debug is enabled and
257*4882a593Smuzhiyun * there are waiting fence jobs.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun cancel_timer = 0;
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (cancel_timer)
266*4882a593Smuzhiyun del_timer(&kctx->soft_job_timeout);
267*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun #ifdef CONFIG_MALI_FENCE_DEBUG
kbase_fence_debug_check_atom(struct kbase_jd_atom * katom)271*4882a593Smuzhiyun static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
274*4882a593Smuzhiyun struct device *dev = kctx->kbdev->dev;
275*4882a593Smuzhiyun int i;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
278*4882a593Smuzhiyun struct kbase_jd_atom *dep;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
281*4882a593Smuzhiyun if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
282*4882a593Smuzhiyun dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
283*4882a593Smuzhiyun continue;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
286*4882a593Smuzhiyun == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
287*4882a593Smuzhiyun /* Found blocked trigger fence. */
288*4882a593Smuzhiyun struct kbase_sync_fence_info info;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (!kbase_sync_fence_in_info_get(dep, &info)) {
291*4882a593Smuzhiyun dev_warn(dev,
292*4882a593Smuzhiyun "\tVictim trigger atom %d fence [%p] %s: %s\n",
293*4882a593Smuzhiyun kbase_jd_atom_id(kctx, dep),
294*4882a593Smuzhiyun info.fence,
295*4882a593Smuzhiyun info.name,
296*4882a593Smuzhiyun kbase_sync_status_string(info.status));
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun kbase_fence_debug_check_atom(dep);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
kbase_fence_debug_wait_timeout(struct kbase_jd_atom * katom)305*4882a593Smuzhiyun static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
308*4882a593Smuzhiyun struct device *dev = katom->kctx->kbdev->dev;
309*4882a593Smuzhiyun int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
310*4882a593Smuzhiyun unsigned long lflags;
311*4882a593Smuzhiyun struct kbase_sync_fence_info info;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (kbase_sync_fence_in_info_get(katom, &info)) {
316*4882a593Smuzhiyun /* Fence must have signaled just after timeout. */
317*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
318*4882a593Smuzhiyun return;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
322*4882a593Smuzhiyun kctx->tgid, kctx->id,
323*4882a593Smuzhiyun kbase_jd_atom_id(kctx, katom),
324*4882a593Smuzhiyun info.fence, timeout_ms);
325*4882a593Smuzhiyun dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
326*4882a593Smuzhiyun info.fence, info.name,
327*4882a593Smuzhiyun kbase_sync_status_string(info.status));
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Search for blocked trigger atoms */
330*4882a593Smuzhiyun kbase_fence_debug_check_atom(katom);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun kbase_sync_fence_in_dump(katom);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun struct kbase_fence_debug_work {
338*4882a593Smuzhiyun struct kbase_jd_atom *katom;
339*4882a593Smuzhiyun struct work_struct work;
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun
kbase_fence_debug_wait_timeout_worker(struct work_struct * work)342*4882a593Smuzhiyun static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct kbase_fence_debug_work *w = container_of(work,
345*4882a593Smuzhiyun struct kbase_fence_debug_work, work);
346*4882a593Smuzhiyun struct kbase_jd_atom *katom = w->katom;
347*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
350*4882a593Smuzhiyun kbase_fence_debug_wait_timeout(katom);
351*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun kfree(w);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
kbase_fence_debug_timeout(struct kbase_jd_atom * katom)356*4882a593Smuzhiyun static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct kbase_fence_debug_work *work;
359*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Enqueue fence debug worker. Use job_done_wq to get
362*4882a593Smuzhiyun * debug print ordered with job completion.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
365*4882a593Smuzhiyun /* Ignore allocation failure. */
366*4882a593Smuzhiyun if (work) {
367*4882a593Smuzhiyun work->katom = katom;
368*4882a593Smuzhiyun INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
369*4882a593Smuzhiyun queue_work(kctx->jctx.job_done_wq, &work->work);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun #endif /* CONFIG_MALI_FENCE_DEBUG */
373*4882a593Smuzhiyun
kbasep_soft_job_timeout_worker(struct timer_list * t)374*4882a593Smuzhiyun void kbasep_soft_job_timeout_worker(struct timer_list *t)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct kbase_context *kctx = from_timer(kctx, t, soft_job_timeout);
377*4882a593Smuzhiyun u32 timeout_ms = (u32)atomic_read(
378*4882a593Smuzhiyun &kctx->kbdev->js_data.soft_job_timeout_ms);
379*4882a593Smuzhiyun struct timer_list *timer = &kctx->soft_job_timeout;
380*4882a593Smuzhiyun ktime_t cur_time = ktime_get();
381*4882a593Smuzhiyun bool restarting = false;
382*4882a593Smuzhiyun unsigned long lflags;
383*4882a593Smuzhiyun struct list_head *entry, *tmp;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
386*4882a593Smuzhiyun list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
387*4882a593Smuzhiyun struct kbase_jd_atom *katom = list_entry(entry,
388*4882a593Smuzhiyun struct kbase_jd_atom, queue);
389*4882a593Smuzhiyun s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
390*4882a593Smuzhiyun katom->start_timestamp));
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (elapsed_time < (s64)timeout_ms) {
393*4882a593Smuzhiyun restarting = true;
394*4882a593Smuzhiyun continue;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
398*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_WAIT:
399*4882a593Smuzhiyun /* Take it out of the list to ensure that it
400*4882a593Smuzhiyun * will be cancelled in all cases
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun list_del(&katom->queue);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
405*4882a593Smuzhiyun INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
406*4882a593Smuzhiyun queue_work(kctx->jctx.job_done_wq, &katom->work);
407*4882a593Smuzhiyun break;
408*4882a593Smuzhiyun #ifdef CONFIG_MALI_FENCE_DEBUG
409*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
410*4882a593Smuzhiyun kbase_fence_debug_timeout(katom);
411*4882a593Smuzhiyun break;
412*4882a593Smuzhiyun #endif
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (restarting)
417*4882a593Smuzhiyun mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
418*4882a593Smuzhiyun spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
kbasep_soft_event_wait(struct kbase_jd_atom * katom)421*4882a593Smuzhiyun static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
424*4882a593Smuzhiyun unsigned char status;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* The status of this soft-job is stored in jc */
427*4882a593Smuzhiyun if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
428*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (status == BASE_JD_SOFT_EVENT_SET)
433*4882a593Smuzhiyun return 0; /* Event already set, nothing to do */
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun kbasep_add_waiting_with_timeout(katom);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun return 1;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
kbasep_soft_event_update_locked(struct kbase_jd_atom * katom,unsigned char new_status)440*4882a593Smuzhiyun static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
441*4882a593Smuzhiyun unsigned char new_status)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun /* Complete jobs waiting on the same event */
444*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
447*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
448*4882a593Smuzhiyun return;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (new_status == BASE_JD_SOFT_EVENT_SET)
452*4882a593Smuzhiyun kbasep_complete_triggered_soft_events(kctx, katom->jc);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun * kbase_soft_event_update() - Update soft event state
457*4882a593Smuzhiyun * @kctx: Pointer to context
458*4882a593Smuzhiyun * @event: Event to update
459*4882a593Smuzhiyun * @new_status: New status value of event
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * Update the event, and wake up any atoms waiting for the event.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Return: 0 on success, a negative error code on failure.
464*4882a593Smuzhiyun */
kbase_soft_event_update(struct kbase_context * kctx,u64 event,unsigned char new_status)465*4882a593Smuzhiyun int kbase_soft_event_update(struct kbase_context *kctx,
466*4882a593Smuzhiyun u64 event,
467*4882a593Smuzhiyun unsigned char new_status)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun int err = 0;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (kbasep_write_soft_event_status(kctx, event, new_status)) {
474*4882a593Smuzhiyun err = -ENOENT;
475*4882a593Smuzhiyun goto out;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (new_status == BASE_JD_SOFT_EVENT_SET)
479*4882a593Smuzhiyun kbasep_complete_triggered_soft_events(kctx, event);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun out:
482*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun return err;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
kbasep_soft_event_cancel_job(struct kbase_jd_atom * katom)487*4882a593Smuzhiyun static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
490*4882a593Smuzhiyun if (jd_done_nolock(katom, NULL))
491*4882a593Smuzhiyun kbase_js_sched_all(katom->kctx->kbdev);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun struct kbase_debug_copy_buffer {
495*4882a593Smuzhiyun size_t size;
496*4882a593Smuzhiyun struct page **pages;
497*4882a593Smuzhiyun int nr_pages;
498*4882a593Smuzhiyun size_t offset;
499*4882a593Smuzhiyun struct kbase_mem_phy_alloc *gpu_alloc;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun struct page **extres_pages;
502*4882a593Smuzhiyun int nr_extres_pages;
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun
free_user_buffer(struct kbase_debug_copy_buffer * buffer)505*4882a593Smuzhiyun static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct page **pages = buffer->extres_pages;
508*4882a593Smuzhiyun int nr_pages = buffer->nr_extres_pages;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (pages) {
511*4882a593Smuzhiyun int i;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
514*4882a593Smuzhiyun struct page *pg = pages[i];
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (pg)
517*4882a593Smuzhiyun put_page(pg);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun kfree(pages);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
kbase_debug_copy_finish(struct kbase_jd_atom * katom)523*4882a593Smuzhiyun static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct kbase_debug_copy_buffer *buffers =
526*4882a593Smuzhiyun (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
527*4882a593Smuzhiyun unsigned int i;
528*4882a593Smuzhiyun unsigned int nr = katom->nr_extres;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (!buffers)
531*4882a593Smuzhiyun return;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun kbase_gpu_vm_lock(katom->kctx);
534*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
535*4882a593Smuzhiyun int p;
536*4882a593Smuzhiyun struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!buffers[i].pages)
539*4882a593Smuzhiyun break;
540*4882a593Smuzhiyun for (p = 0; p < buffers[i].nr_pages; p++) {
541*4882a593Smuzhiyun struct page *pg = buffers[i].pages[p];
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (pg)
544*4882a593Smuzhiyun put_page(pg);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun kfree(buffers[i].pages);
547*4882a593Smuzhiyun if (gpu_alloc) {
548*4882a593Smuzhiyun switch (gpu_alloc->type) {
549*4882a593Smuzhiyun case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun free_user_buffer(&buffers[i]);
552*4882a593Smuzhiyun break;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun default:
555*4882a593Smuzhiyun /* Nothing to be done. */
556*4882a593Smuzhiyun break;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun kbase_mem_phy_alloc_put(gpu_alloc);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun kbase_gpu_vm_unlock(katom->kctx);
562*4882a593Smuzhiyun kfree(buffers);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun katom->jc = 0;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
kbase_debug_copy_prepare(struct kbase_jd_atom * katom)567*4882a593Smuzhiyun static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun struct kbase_debug_copy_buffer *buffers;
570*4882a593Smuzhiyun struct base_jd_debug_copy_buffer *user_buffers = NULL;
571*4882a593Smuzhiyun unsigned int i;
572*4882a593Smuzhiyun unsigned int nr = katom->nr_extres;
573*4882a593Smuzhiyun int ret = 0;
574*4882a593Smuzhiyun void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (!user_structs)
577*4882a593Smuzhiyun return -EINVAL;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
580*4882a593Smuzhiyun if (!buffers) {
581*4882a593Smuzhiyun ret = -ENOMEM;
582*4882a593Smuzhiyun katom->jc = 0;
583*4882a593Smuzhiyun goto out_cleanup;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun katom->jc = (u64)(uintptr_t)buffers;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (!user_buffers) {
590*4882a593Smuzhiyun ret = -ENOMEM;
591*4882a593Smuzhiyun goto out_cleanup;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun ret = copy_from_user(user_buffers, user_structs,
595*4882a593Smuzhiyun sizeof(*user_buffers)*nr);
596*4882a593Smuzhiyun if (ret)
597*4882a593Smuzhiyun goto out_cleanup;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
600*4882a593Smuzhiyun u64 addr = user_buffers[i].address;
601*4882a593Smuzhiyun u64 page_addr = addr & PAGE_MASK;
602*4882a593Smuzhiyun u64 end_page_addr = addr + user_buffers[i].size - 1;
603*4882a593Smuzhiyun u64 last_page_addr = end_page_addr & PAGE_MASK;
604*4882a593Smuzhiyun int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
605*4882a593Smuzhiyun int pinned_pages;
606*4882a593Smuzhiyun struct kbase_va_region *reg;
607*4882a593Smuzhiyun struct base_external_resource user_extres;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (!addr)
610*4882a593Smuzhiyun continue;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun buffers[i].nr_pages = nr_pages;
613*4882a593Smuzhiyun buffers[i].offset = addr & ~PAGE_MASK;
614*4882a593Smuzhiyun if (buffers[i].offset >= PAGE_SIZE) {
615*4882a593Smuzhiyun ret = -EINVAL;
616*4882a593Smuzhiyun goto out_cleanup;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun buffers[i].size = user_buffers[i].size;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
621*4882a593Smuzhiyun GFP_KERNEL);
622*4882a593Smuzhiyun if (!buffers[i].pages) {
623*4882a593Smuzhiyun ret = -ENOMEM;
624*4882a593Smuzhiyun goto out_cleanup;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun pinned_pages = get_user_pages_fast(page_addr,
628*4882a593Smuzhiyun nr_pages,
629*4882a593Smuzhiyun 1, /* Write */
630*4882a593Smuzhiyun buffers[i].pages);
631*4882a593Smuzhiyun if (pinned_pages < 0) {
632*4882a593Smuzhiyun ret = pinned_pages;
633*4882a593Smuzhiyun goto out_cleanup;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun if (pinned_pages != nr_pages) {
636*4882a593Smuzhiyun ret = -EINVAL;
637*4882a593Smuzhiyun goto out_cleanup;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun user_extres = user_buffers[i].extres;
641*4882a593Smuzhiyun if (user_extres.ext_resource == 0ULL) {
642*4882a593Smuzhiyun ret = -EINVAL;
643*4882a593Smuzhiyun goto out_cleanup;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun kbase_gpu_vm_lock(katom->kctx);
647*4882a593Smuzhiyun reg = kbase_region_tracker_find_region_enclosing_address(
648*4882a593Smuzhiyun katom->kctx, user_extres.ext_resource &
649*4882a593Smuzhiyun ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (NULL == reg || NULL == reg->gpu_alloc ||
652*4882a593Smuzhiyun (reg->flags & KBASE_REG_FREE)) {
653*4882a593Smuzhiyun ret = -EINVAL;
654*4882a593Smuzhiyun goto out_unlock;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
658*4882a593Smuzhiyun buffers[i].nr_extres_pages = reg->nr_pages;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
661*4882a593Smuzhiyun dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun switch (reg->gpu_alloc->type) {
664*4882a593Smuzhiyun case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
667*4882a593Smuzhiyun unsigned long nr_pages =
668*4882a593Smuzhiyun alloc->imported.user_buf.nr_pages;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (alloc->imported.user_buf.mm != current->mm) {
671*4882a593Smuzhiyun ret = -EINVAL;
672*4882a593Smuzhiyun goto out_unlock;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun buffers[i].extres_pages = kcalloc(nr_pages,
675*4882a593Smuzhiyun sizeof(struct page *), GFP_KERNEL);
676*4882a593Smuzhiyun if (!buffers[i].extres_pages) {
677*4882a593Smuzhiyun ret = -ENOMEM;
678*4882a593Smuzhiyun goto out_unlock;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun ret = get_user_pages_fast(
682*4882a593Smuzhiyun alloc->imported.user_buf.address,
683*4882a593Smuzhiyun nr_pages, 0,
684*4882a593Smuzhiyun buffers[i].extres_pages);
685*4882a593Smuzhiyun if (ret != nr_pages)
686*4882a593Smuzhiyun goto out_unlock;
687*4882a593Smuzhiyun ret = 0;
688*4882a593Smuzhiyun break;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun case KBASE_MEM_TYPE_IMPORTED_UMP:
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun dev_warn(katom->kctx->kbdev->dev,
693*4882a593Smuzhiyun "UMP is not supported for debug_copy jobs\n");
694*4882a593Smuzhiyun ret = -EINVAL;
695*4882a593Smuzhiyun goto out_unlock;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun default:
698*4882a593Smuzhiyun /* Nothing to be done. */
699*4882a593Smuzhiyun break;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun kbase_gpu_vm_unlock(katom->kctx);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun kfree(user_buffers);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun return ret;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun out_unlock:
708*4882a593Smuzhiyun kbase_gpu_vm_unlock(katom->kctx);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun out_cleanup:
711*4882a593Smuzhiyun kfree(buffers);
712*4882a593Smuzhiyun kfree(user_buffers);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* Frees allocated memory for kbase_debug_copy_job struct, including
715*4882a593Smuzhiyun * members, and sets jc to 0 */
716*4882a593Smuzhiyun kbase_debug_copy_finish(katom);
717*4882a593Smuzhiyun return ret;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
kbase_mem_copy_from_extres_page(struct kbase_context * kctx,void * extres_page,struct page ** pages,unsigned int nr_pages,unsigned int * target_page_nr,size_t offset,size_t * to_copy)720*4882a593Smuzhiyun static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
721*4882a593Smuzhiyun void *extres_page, struct page **pages, unsigned int nr_pages,
722*4882a593Smuzhiyun unsigned int *target_page_nr, size_t offset, size_t *to_copy)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun void *target_page = kmap(pages[*target_page_nr]);
725*4882a593Smuzhiyun size_t chunk = PAGE_SIZE-offset;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun lockdep_assert_held(&kctx->reg_lock);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (!target_page) {
730*4882a593Smuzhiyun *target_page_nr += 1;
731*4882a593Smuzhiyun dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
732*4882a593Smuzhiyun return;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun chunk = min(chunk, *to_copy);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun memcpy(target_page + offset, extres_page, chunk);
738*4882a593Smuzhiyun *to_copy -= chunk;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun kunmap(pages[*target_page_nr]);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun *target_page_nr += 1;
743*4882a593Smuzhiyun if (*target_page_nr >= nr_pages)
744*4882a593Smuzhiyun return;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun target_page = kmap(pages[*target_page_nr]);
747*4882a593Smuzhiyun if (!target_page) {
748*4882a593Smuzhiyun *target_page_nr += 1;
749*4882a593Smuzhiyun dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
750*4882a593Smuzhiyun return;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(target_page);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun chunk = min(offset, *to_copy);
756*4882a593Smuzhiyun memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
757*4882a593Smuzhiyun *to_copy -= chunk;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun kunmap(pages[*target_page_nr]);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
dma_buf_kmap_page(struct kbase_mem_phy_alloc * gpu_alloc,unsigned long page_num,struct page ** page)763*4882a593Smuzhiyun static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc,
764*4882a593Smuzhiyun unsigned long page_num, struct page **page)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun struct sg_table *sgt = gpu_alloc->imported.umm.sgt;
767*4882a593Smuzhiyun struct sg_page_iter sg_iter;
768*4882a593Smuzhiyun unsigned long page_index = 0;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (WARN_ON(gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
771*4882a593Smuzhiyun return NULL;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (!sgt)
774*4882a593Smuzhiyun return NULL;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (WARN_ON(page_num >= gpu_alloc->nents))
777*4882a593Smuzhiyun return NULL;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
780*4882a593Smuzhiyun if (page_index == page_num) {
781*4882a593Smuzhiyun *page = sg_page_iter_page(&sg_iter);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return kmap(*page);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun page_index++;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun return NULL;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun #endif
791*4882a593Smuzhiyun
kbase_mem_copy_from_extres(struct kbase_context * kctx,struct kbase_debug_copy_buffer * buf_data)792*4882a593Smuzhiyun static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
793*4882a593Smuzhiyun struct kbase_debug_copy_buffer *buf_data)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun unsigned int i;
796*4882a593Smuzhiyun unsigned int target_page_nr = 0;
797*4882a593Smuzhiyun struct page **pages = buf_data->pages;
798*4882a593Smuzhiyun u64 offset = buf_data->offset;
799*4882a593Smuzhiyun size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
800*4882a593Smuzhiyun size_t to_copy = min(extres_size, buf_data->size);
801*4882a593Smuzhiyun struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
802*4882a593Smuzhiyun int ret = 0;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(pages != NULL);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun kbase_gpu_vm_lock(kctx);
807*4882a593Smuzhiyun if (!gpu_alloc) {
808*4882a593Smuzhiyun ret = -EINVAL;
809*4882a593Smuzhiyun goto out_unlock;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun switch (gpu_alloc->type) {
813*4882a593Smuzhiyun case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun for (i = 0; i < buf_data->nr_extres_pages; i++) {
816*4882a593Smuzhiyun struct page *pg = buf_data->extres_pages[i];
817*4882a593Smuzhiyun void *extres_page = kmap(pg);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (extres_page)
820*4882a593Smuzhiyun kbase_mem_copy_from_extres_page(kctx,
821*4882a593Smuzhiyun extres_page, pages,
822*4882a593Smuzhiyun buf_data->nr_pages,
823*4882a593Smuzhiyun &target_page_nr,
824*4882a593Smuzhiyun offset, &to_copy);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun kunmap(pg);
827*4882a593Smuzhiyun if (target_page_nr >= buf_data->nr_pages)
828*4882a593Smuzhiyun break;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun break;
833*4882a593Smuzhiyun #ifdef CONFIG_DMA_SHARED_BUFFER
834*4882a593Smuzhiyun case KBASE_MEM_TYPE_IMPORTED_UMM: {
835*4882a593Smuzhiyun struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(dma_buf != NULL);
838*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(dma_buf->size ==
839*4882a593Smuzhiyun buf_data->nr_extres_pages * PAGE_SIZE);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun ret = dma_buf_begin_cpu_access(dma_buf,
842*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
843*4882a593Smuzhiyun 0, buf_data->nr_extres_pages*PAGE_SIZE,
844*4882a593Smuzhiyun #endif
845*4882a593Smuzhiyun DMA_FROM_DEVICE);
846*4882a593Smuzhiyun if (ret)
847*4882a593Smuzhiyun goto out_unlock;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun for (i = 0; i < buf_data->nr_extres_pages; i++) {
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
852*4882a593Smuzhiyun struct page *pg;
853*4882a593Smuzhiyun void *extres_page = dma_buf_kmap_page(gpu_alloc, i, &pg);
854*4882a593Smuzhiyun #else
855*4882a593Smuzhiyun void *extres_page = dma_buf_kmap(dma_buf, i);
856*4882a593Smuzhiyun #endif
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (extres_page)
859*4882a593Smuzhiyun kbase_mem_copy_from_extres_page(kctx,
860*4882a593Smuzhiyun extres_page, pages,
861*4882a593Smuzhiyun buf_data->nr_pages,
862*4882a593Smuzhiyun &target_page_nr,
863*4882a593Smuzhiyun offset, &to_copy);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
866*4882a593Smuzhiyun kunmap(pg);
867*4882a593Smuzhiyun #else
868*4882a593Smuzhiyun dma_buf_kunmap(dma_buf, i, extres_page);
869*4882a593Smuzhiyun #endif
870*4882a593Smuzhiyun if (target_page_nr >= buf_data->nr_pages)
871*4882a593Smuzhiyun break;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun dma_buf_end_cpu_access(dma_buf,
874*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
875*4882a593Smuzhiyun 0, buf_data->nr_extres_pages*PAGE_SIZE,
876*4882a593Smuzhiyun #endif
877*4882a593Smuzhiyun DMA_FROM_DEVICE);
878*4882a593Smuzhiyun break;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun #endif
881*4882a593Smuzhiyun default:
882*4882a593Smuzhiyun ret = -EINVAL;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun out_unlock:
885*4882a593Smuzhiyun kbase_gpu_vm_unlock(kctx);
886*4882a593Smuzhiyun return ret;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
kbase_debug_copy(struct kbase_jd_atom * katom)890*4882a593Smuzhiyun static int kbase_debug_copy(struct kbase_jd_atom *katom)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct kbase_debug_copy_buffer *buffers =
893*4882a593Smuzhiyun (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
894*4882a593Smuzhiyun unsigned int i;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun for (i = 0; i < katom->nr_extres; i++) {
897*4882a593Smuzhiyun int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (res)
900*4882a593Smuzhiyun return res;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun return 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
kbase_jit_allocate_prepare(struct kbase_jd_atom * katom)906*4882a593Smuzhiyun static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun __user void *data = (__user void *)(uintptr_t) katom->jc;
909*4882a593Smuzhiyun struct base_jit_alloc_info *info;
910*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
911*4882a593Smuzhiyun int ret;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* Fail the job if there is no info structure */
914*4882a593Smuzhiyun if (!data) {
915*4882a593Smuzhiyun ret = -EINVAL;
916*4882a593Smuzhiyun goto fail;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* Copy the information for safe access and future storage */
920*4882a593Smuzhiyun info = kzalloc(sizeof(*info), GFP_KERNEL);
921*4882a593Smuzhiyun if (!info) {
922*4882a593Smuzhiyun ret = -ENOMEM;
923*4882a593Smuzhiyun goto fail;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (copy_from_user(info, data, sizeof(*info)) != 0) {
927*4882a593Smuzhiyun ret = -EINVAL;
928*4882a593Smuzhiyun goto free_info;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /* If the ID is zero then fail the job */
932*4882a593Smuzhiyun if (info->id == 0) {
933*4882a593Smuzhiyun ret = -EINVAL;
934*4882a593Smuzhiyun goto free_info;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /* Sanity check that the PA fits within the VA */
938*4882a593Smuzhiyun if (info->va_pages < info->commit_pages) {
939*4882a593Smuzhiyun ret = -EINVAL;
940*4882a593Smuzhiyun goto free_info;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /* Ensure the GPU address is correctly aligned */
944*4882a593Smuzhiyun if ((info->gpu_alloc_addr & 0x7) != 0) {
945*4882a593Smuzhiyun ret = -EINVAL;
946*4882a593Smuzhiyun goto free_info;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* Replace the user pointer with our kernel allocated info structure */
950*4882a593Smuzhiyun katom->jc = (u64)(uintptr_t) info;
951*4882a593Smuzhiyun katom->jit_blocked = false;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun lockdep_assert_held(&kctx->jctx.lock);
954*4882a593Smuzhiyun list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun * Note:
958*4882a593Smuzhiyun * The provided info->gpu_alloc_addr isn't validated here as
959*4882a593Smuzhiyun * userland can cache allocations which means that even
960*4882a593Smuzhiyun * though the region is valid it doesn't represent the
961*4882a593Smuzhiyun * same thing it used to.
962*4882a593Smuzhiyun *
963*4882a593Smuzhiyun * Complete validation of va_pages, commit_pages and extent
964*4882a593Smuzhiyun * isn't done here as it will be done during the call to
965*4882a593Smuzhiyun * kbase_mem_alloc.
966*4882a593Smuzhiyun */
967*4882a593Smuzhiyun return 0;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun free_info:
970*4882a593Smuzhiyun kfree(info);
971*4882a593Smuzhiyun fail:
972*4882a593Smuzhiyun katom->jc = 0;
973*4882a593Smuzhiyun return ret;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
kbase_jit_free_get_id(struct kbase_jd_atom * katom)976*4882a593Smuzhiyun static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun if (WARN_ON(katom->core_req != BASE_JD_REQ_SOFT_JIT_FREE))
979*4882a593Smuzhiyun return 0;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun return (u8) katom->jc;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
kbase_jit_allocate_process(struct kbase_jd_atom * katom)984*4882a593Smuzhiyun static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
987*4882a593Smuzhiyun struct base_jit_alloc_info *info;
988*4882a593Smuzhiyun struct kbase_va_region *reg;
989*4882a593Smuzhiyun struct kbase_vmap_struct mapping;
990*4882a593Smuzhiyun u64 *ptr, new_addr;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (katom->jit_blocked) {
993*4882a593Smuzhiyun list_del(&katom->queue);
994*4882a593Smuzhiyun katom->jit_blocked = false;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /* The JIT ID is still in use so fail the allocation */
1000*4882a593Smuzhiyun if (kctx->jit_alloc[info->id]) {
1001*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1002*4882a593Smuzhiyun return 0;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* Create a JIT allocation */
1006*4882a593Smuzhiyun reg = kbase_jit_allocate(kctx, info);
1007*4882a593Smuzhiyun if (!reg) {
1008*4882a593Smuzhiyun struct kbase_jd_atom *jit_atom;
1009*4882a593Smuzhiyun bool can_block = false;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun lockdep_assert_held(&kctx->jctx.lock);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun jit_atom = list_first_entry(&kctx->jit_atoms_head,
1014*4882a593Smuzhiyun struct kbase_jd_atom, jit_node);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
1017*4882a593Smuzhiyun if (jit_atom == katom)
1018*4882a593Smuzhiyun break;
1019*4882a593Smuzhiyun if (jit_atom->core_req == BASE_JD_REQ_SOFT_JIT_FREE) {
1020*4882a593Smuzhiyun u8 free_id = kbase_jit_free_get_id(jit_atom);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (free_id && kctx->jit_alloc[free_id]) {
1023*4882a593Smuzhiyun /* A JIT free which is active and
1024*4882a593Smuzhiyun * submitted before this atom
1025*4882a593Smuzhiyun */
1026*4882a593Smuzhiyun can_block = true;
1027*4882a593Smuzhiyun break;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun if (!can_block) {
1033*4882a593Smuzhiyun /* Mark the allocation so we know it's in use even if
1034*4882a593Smuzhiyun * the allocation itself fails.
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun kctx->jit_alloc[info->id] =
1037*4882a593Smuzhiyun (struct kbase_va_region *) -1;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1040*4882a593Smuzhiyun return 0;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* There are pending frees for an active allocation
1044*4882a593Smuzhiyun * so we should wait to see whether they free the memory.
1045*4882a593Smuzhiyun * Add to the beginning of the list to ensure that the atom is
1046*4882a593Smuzhiyun * processed only once in kbase_jit_free_finish
1047*4882a593Smuzhiyun */
1048*4882a593Smuzhiyun list_add(&katom->queue, &kctx->jit_pending_alloc);
1049*4882a593Smuzhiyun katom->jit_blocked = true;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun return 1;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /*
1055*4882a593Smuzhiyun * Write the address of the JIT allocation to the user provided
1056*4882a593Smuzhiyun * GPU allocation.
1057*4882a593Smuzhiyun */
1058*4882a593Smuzhiyun ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1059*4882a593Smuzhiyun &mapping);
1060*4882a593Smuzhiyun if (!ptr) {
1061*4882a593Smuzhiyun /*
1062*4882a593Smuzhiyun * Leave the allocation "live" as the JIT free jit will be
1063*4882a593Smuzhiyun * submitted anyway.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1066*4882a593Smuzhiyun return 0;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun new_addr = reg->start_pfn << PAGE_SHIFT;
1070*4882a593Smuzhiyun *ptr = new_addr;
1071*4882a593Smuzhiyun KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
1072*4882a593Smuzhiyun katom, info->gpu_alloc_addr, new_addr);
1073*4882a593Smuzhiyun kbase_vunmap(kctx, &mapping);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_DONE;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * Bind it to the user provided ID. Do this last so we can check for
1079*4882a593Smuzhiyun * the JIT free racing this JIT alloc job.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun kctx->jit_alloc[info->id] = reg;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun return 0;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
kbase_jit_allocate_finish(struct kbase_jd_atom * katom)1086*4882a593Smuzhiyun static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun struct base_jit_alloc_info *info;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun lockdep_assert_held(&katom->kctx->jctx.lock);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /* Remove atom from jit_atoms_head list */
1093*4882a593Smuzhiyun list_del(&katom->jit_node);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun if (katom->jit_blocked) {
1096*4882a593Smuzhiyun list_del(&katom->queue);
1097*4882a593Smuzhiyun katom->jit_blocked = false;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1101*4882a593Smuzhiyun /* Free the info structure */
1102*4882a593Smuzhiyun kfree(info);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
kbase_jit_free_prepare(struct kbase_jd_atom * katom)1105*4882a593Smuzhiyun static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun lockdep_assert_held(&kctx->jctx.lock);
1110*4882a593Smuzhiyun list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun return 0;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
kbase_jit_free_process(struct kbase_jd_atom * katom)1115*4882a593Smuzhiyun static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
1118*4882a593Smuzhiyun u8 id = kbase_jit_free_get_id(katom);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /*
1121*4882a593Smuzhiyun * If the ID is zero or it is not in use yet then fail the job.
1122*4882a593Smuzhiyun */
1123*4882a593Smuzhiyun if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
1124*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1125*4882a593Smuzhiyun return;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /*
1129*4882a593Smuzhiyun * If the ID is valid but the allocation request failed still succeed
1130*4882a593Smuzhiyun * this soft job but don't try and free the allocation.
1131*4882a593Smuzhiyun */
1132*4882a593Smuzhiyun if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
1133*4882a593Smuzhiyun kbase_jit_free(kctx, kctx->jit_alloc[id]);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun kctx->jit_alloc[id] = NULL;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
kbasep_jit_free_finish_worker(struct work_struct * work)1138*4882a593Smuzhiyun static void kbasep_jit_free_finish_worker(struct work_struct *work)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
1141*4882a593Smuzhiyun work);
1142*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
1143*4882a593Smuzhiyun int resched;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
1146*4882a593Smuzhiyun kbase_finish_soft_job(katom);
1147*4882a593Smuzhiyun resched = jd_done_nolock(katom, NULL);
1148*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (resched)
1151*4882a593Smuzhiyun kbase_js_sched_all(kctx->kbdev);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
kbase_jit_free_finish(struct kbase_jd_atom * katom)1154*4882a593Smuzhiyun static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct list_head *i, *tmp;
1157*4882a593Smuzhiyun struct kbase_context *kctx = katom->kctx;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun lockdep_assert_held(&kctx->jctx.lock);
1160*4882a593Smuzhiyun /* Remove this atom from the kctx->jit_atoms_head list */
1161*4882a593Smuzhiyun list_del(&katom->jit_node);
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) {
1164*4882a593Smuzhiyun struct kbase_jd_atom *pending_atom = list_entry(i,
1165*4882a593Smuzhiyun struct kbase_jd_atom, queue);
1166*4882a593Smuzhiyun if (kbase_jit_allocate_process(pending_atom) == 0) {
1167*4882a593Smuzhiyun /* Atom has completed */
1168*4882a593Smuzhiyun INIT_WORK(&pending_atom->work,
1169*4882a593Smuzhiyun kbasep_jit_free_finish_worker);
1170*4882a593Smuzhiyun queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
kbase_ext_res_prepare(struct kbase_jd_atom * katom)1175*4882a593Smuzhiyun static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun __user struct base_external_resource_list *user_ext_res;
1178*4882a593Smuzhiyun struct base_external_resource_list *ext_res;
1179*4882a593Smuzhiyun u64 count = 0;
1180*4882a593Smuzhiyun size_t copy_size;
1181*4882a593Smuzhiyun int ret;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun user_ext_res = (__user struct base_external_resource_list *)
1184*4882a593Smuzhiyun (uintptr_t) katom->jc;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /* Fail the job if there is no info structure */
1187*4882a593Smuzhiyun if (!user_ext_res) {
1188*4882a593Smuzhiyun ret = -EINVAL;
1189*4882a593Smuzhiyun goto fail;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
1193*4882a593Smuzhiyun ret = -EINVAL;
1194*4882a593Smuzhiyun goto fail;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /* Is the number of external resources in range? */
1198*4882a593Smuzhiyun if (!count || count > BASE_EXT_RES_COUNT_MAX) {
1199*4882a593Smuzhiyun ret = -EINVAL;
1200*4882a593Smuzhiyun goto fail;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /* Copy the information for safe access and future storage */
1204*4882a593Smuzhiyun copy_size = sizeof(*ext_res);
1205*4882a593Smuzhiyun copy_size += sizeof(struct base_external_resource) * (count - 1);
1206*4882a593Smuzhiyun ext_res = kzalloc(copy_size, GFP_KERNEL);
1207*4882a593Smuzhiyun if (!ext_res) {
1208*4882a593Smuzhiyun ret = -ENOMEM;
1209*4882a593Smuzhiyun goto fail;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
1213*4882a593Smuzhiyun ret = -EINVAL;
1214*4882a593Smuzhiyun goto free_info;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /*
1218*4882a593Smuzhiyun * Overwrite the count with the first value incase it was changed
1219*4882a593Smuzhiyun * after the fact.
1220*4882a593Smuzhiyun */
1221*4882a593Smuzhiyun ext_res->count = count;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun * Replace the user pointer with our kernel allocated
1225*4882a593Smuzhiyun * ext_res structure.
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun katom->jc = (u64)(uintptr_t) ext_res;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun return 0;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun free_info:
1232*4882a593Smuzhiyun kfree(ext_res);
1233*4882a593Smuzhiyun fail:
1234*4882a593Smuzhiyun return ret;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
kbase_ext_res_process(struct kbase_jd_atom * katom,bool map)1237*4882a593Smuzhiyun static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun struct base_external_resource_list *ext_res;
1240*4882a593Smuzhiyun int i;
1241*4882a593Smuzhiyun bool failed = false;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1244*4882a593Smuzhiyun if (!ext_res)
1245*4882a593Smuzhiyun goto failed_jc;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun kbase_gpu_vm_lock(katom->kctx);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun for (i = 0; i < ext_res->count; i++) {
1250*4882a593Smuzhiyun u64 gpu_addr;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun gpu_addr = ext_res->ext_res[i].ext_resource &
1253*4882a593Smuzhiyun ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1254*4882a593Smuzhiyun if (map) {
1255*4882a593Smuzhiyun if (!kbase_sticky_resource_acquire(katom->kctx,
1256*4882a593Smuzhiyun gpu_addr))
1257*4882a593Smuzhiyun goto failed_loop;
1258*4882a593Smuzhiyun } else
1259*4882a593Smuzhiyun if (!kbase_sticky_resource_release(katom->kctx, NULL,
1260*4882a593Smuzhiyun gpu_addr))
1261*4882a593Smuzhiyun failed = true;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /*
1265*4882a593Smuzhiyun * In the case of unmap we continue unmapping other resources in the
1266*4882a593Smuzhiyun * case of failure but will always report failure if _any_ unmap
1267*4882a593Smuzhiyun * request fails.
1268*4882a593Smuzhiyun */
1269*4882a593Smuzhiyun if (failed)
1270*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1271*4882a593Smuzhiyun else
1272*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_DONE;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun kbase_gpu_vm_unlock(katom->kctx);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun return;
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun failed_loop:
1279*4882a593Smuzhiyun while (--i > 0) {
1280*4882a593Smuzhiyun u64 gpu_addr;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun gpu_addr = ext_res->ext_res[i].ext_resource &
1283*4882a593Smuzhiyun ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1289*4882a593Smuzhiyun kbase_gpu_vm_unlock(katom->kctx);
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun failed_jc:
1292*4882a593Smuzhiyun return;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
kbase_ext_res_finish(struct kbase_jd_atom * katom)1295*4882a593Smuzhiyun static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun struct base_external_resource_list *ext_res;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1300*4882a593Smuzhiyun /* Free the info structure */
1301*4882a593Smuzhiyun kfree(ext_res);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
kbase_process_soft_job(struct kbase_jd_atom * katom)1304*4882a593Smuzhiyun int kbase_process_soft_job(struct kbase_jd_atom *katom)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1307*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1308*4882a593Smuzhiyun return kbase_dump_cpu_gpu_time(katom);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1311*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1312*4882a593Smuzhiyun katom->event_code = kbase_sync_fence_out_trigger(katom,
1313*4882a593Smuzhiyun katom->event_code == BASE_JD_EVENT_DONE ?
1314*4882a593Smuzhiyun 0 : -EFAULT);
1315*4882a593Smuzhiyun break;
1316*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun int ret = kbase_sync_fence_in_wait(katom);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun if (ret == 1) {
1321*4882a593Smuzhiyun #ifdef CONFIG_MALI_FENCE_DEBUG
1322*4882a593Smuzhiyun kbasep_add_waiting_with_timeout(katom);
1323*4882a593Smuzhiyun #else
1324*4882a593Smuzhiyun kbasep_add_waiting_soft_job(katom);
1325*4882a593Smuzhiyun #endif
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun return ret;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun #endif
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_REPLAY:
1332*4882a593Smuzhiyun return kbase_replay_process(katom);
1333*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_WAIT:
1334*4882a593Smuzhiyun return kbasep_soft_event_wait(katom);
1335*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_SET:
1336*4882a593Smuzhiyun kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1337*4882a593Smuzhiyun break;
1338*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_RESET:
1339*4882a593Smuzhiyun kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1340*4882a593Smuzhiyun break;
1341*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DEBUG_COPY:
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun int res = kbase_debug_copy(katom);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (res)
1346*4882a593Smuzhiyun katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1347*4882a593Smuzhiyun break;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_ALLOC:
1350*4882a593Smuzhiyun return kbase_jit_allocate_process(katom);
1351*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_FREE:
1352*4882a593Smuzhiyun kbase_jit_free_process(katom);
1353*4882a593Smuzhiyun break;
1354*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1355*4882a593Smuzhiyun kbase_ext_res_process(katom, true);
1356*4882a593Smuzhiyun break;
1357*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1358*4882a593Smuzhiyun kbase_ext_res_process(katom, false);
1359*4882a593Smuzhiyun break;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun /* Atom is complete */
1363*4882a593Smuzhiyun return 0;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
kbase_cancel_soft_job(struct kbase_jd_atom * katom)1366*4882a593Smuzhiyun void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1369*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1370*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
1371*4882a593Smuzhiyun kbase_sync_fence_in_cancel_wait(katom);
1372*4882a593Smuzhiyun break;
1373*4882a593Smuzhiyun #endif
1374*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_WAIT:
1375*4882a593Smuzhiyun kbasep_soft_event_cancel_job(katom);
1376*4882a593Smuzhiyun break;
1377*4882a593Smuzhiyun default:
1378*4882a593Smuzhiyun /* This soft-job doesn't support cancellation! */
1379*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(0);
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
kbase_prepare_soft_job(struct kbase_jd_atom * katom)1383*4882a593Smuzhiyun int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1386*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
1389*4882a593Smuzhiyun return -EINVAL;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun break;
1392*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1393*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun struct base_fence fence;
1396*4882a593Smuzhiyun int fd;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1399*4882a593Smuzhiyun return -EINVAL;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun fd = kbase_sync_fence_out_create(katom,
1402*4882a593Smuzhiyun fence.basep.stream_fd);
1403*4882a593Smuzhiyun if (fd < 0)
1404*4882a593Smuzhiyun return -EINVAL;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun fence.basep.fd = fd;
1407*4882a593Smuzhiyun if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
1408*4882a593Smuzhiyun kbase_sync_fence_out_remove(katom);
1409*4882a593Smuzhiyun kbase_sync_fence_close_fd(fd);
1410*4882a593Smuzhiyun fence.basep.fd = -EINVAL;
1411*4882a593Smuzhiyun return -EINVAL;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun break;
1415*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun struct base_fence fence;
1418*4882a593Smuzhiyun int ret;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1421*4882a593Smuzhiyun return -EINVAL;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun /* Get a reference to the fence object */
1424*4882a593Smuzhiyun ret = kbase_sync_fence_in_from_fd(katom,
1425*4882a593Smuzhiyun fence.basep.fd);
1426*4882a593Smuzhiyun if (ret < 0)
1427*4882a593Smuzhiyun return ret;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun #ifdef CONFIG_MALI_DMA_FENCE
1430*4882a593Smuzhiyun /*
1431*4882a593Smuzhiyun * Set KCTX_NO_IMPLICIT_FENCE in the context the first
1432*4882a593Smuzhiyun * time a soft fence wait job is observed. This will
1433*4882a593Smuzhiyun * prevent the implicit dma-buf fence to conflict with
1434*4882a593Smuzhiyun * the Android native sync fences.
1435*4882a593Smuzhiyun */
1436*4882a593Smuzhiyun if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
1437*4882a593Smuzhiyun kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
1438*4882a593Smuzhiyun #endif /* CONFIG_MALI_DMA_FENCE */
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun break;
1441*4882a593Smuzhiyun #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1442*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_ALLOC:
1443*4882a593Smuzhiyun return kbase_jit_allocate_prepare(katom);
1444*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_REPLAY:
1445*4882a593Smuzhiyun break;
1446*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_FREE:
1447*4882a593Smuzhiyun return kbase_jit_free_prepare(katom);
1448*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_WAIT:
1449*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_SET:
1450*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EVENT_RESET:
1451*4882a593Smuzhiyun if (katom->jc == 0)
1452*4882a593Smuzhiyun return -EINVAL;
1453*4882a593Smuzhiyun break;
1454*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DEBUG_COPY:
1455*4882a593Smuzhiyun return kbase_debug_copy_prepare(katom);
1456*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1457*4882a593Smuzhiyun return kbase_ext_res_prepare(katom);
1458*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1459*4882a593Smuzhiyun return kbase_ext_res_prepare(katom);
1460*4882a593Smuzhiyun default:
1461*4882a593Smuzhiyun /* Unsupported soft-job */
1462*4882a593Smuzhiyun return -EINVAL;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun return 0;
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
kbase_finish_soft_job(struct kbase_jd_atom * katom)1467*4882a593Smuzhiyun void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1470*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1471*4882a593Smuzhiyun /* Nothing to do */
1472*4882a593Smuzhiyun break;
1473*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1474*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1475*4882a593Smuzhiyun /* If fence has not yet been signaled, do it now */
1476*4882a593Smuzhiyun kbase_sync_fence_out_trigger(katom, katom->event_code ==
1477*4882a593Smuzhiyun BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1478*4882a593Smuzhiyun break;
1479*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_FENCE_WAIT:
1480*4882a593Smuzhiyun /* Release katom's reference to fence object */
1481*4882a593Smuzhiyun kbase_sync_fence_in_remove(katom);
1482*4882a593Smuzhiyun break;
1483*4882a593Smuzhiyun #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1484*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_DEBUG_COPY:
1485*4882a593Smuzhiyun kbase_debug_copy_finish(katom);
1486*4882a593Smuzhiyun break;
1487*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_ALLOC:
1488*4882a593Smuzhiyun kbase_jit_allocate_finish(katom);
1489*4882a593Smuzhiyun break;
1490*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1491*4882a593Smuzhiyun kbase_ext_res_finish(katom);
1492*4882a593Smuzhiyun break;
1493*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1494*4882a593Smuzhiyun kbase_ext_res_finish(katom);
1495*4882a593Smuzhiyun break;
1496*4882a593Smuzhiyun case BASE_JD_REQ_SOFT_JIT_FREE:
1497*4882a593Smuzhiyun kbase_jit_free_finish(katom);
1498*4882a593Smuzhiyun break;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun
kbase_resume_suspended_soft_jobs(struct kbase_device * kbdev)1502*4882a593Smuzhiyun void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun LIST_HEAD(local_suspended_soft_jobs);
1505*4882a593Smuzhiyun struct kbase_jd_atom *tmp_iter;
1506*4882a593Smuzhiyun struct kbase_jd_atom *katom_iter;
1507*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata;
1508*4882a593Smuzhiyun bool resched = false;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun js_devdata = &kbdev->js_data;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /* Move out the entire list */
1515*4882a593Smuzhiyun mutex_lock(&js_devdata->runpool_mutex);
1516*4882a593Smuzhiyun list_splice_init(&js_devdata->suspended_soft_jobs_list,
1517*4882a593Smuzhiyun &local_suspended_soft_jobs);
1518*4882a593Smuzhiyun mutex_unlock(&js_devdata->runpool_mutex);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /*
1521*4882a593Smuzhiyun * Each atom must be detached from the list and ran separately -
1522*4882a593Smuzhiyun * it could be re-added to the old list, but this is unlikely
1523*4882a593Smuzhiyun */
1524*4882a593Smuzhiyun list_for_each_entry_safe(katom_iter, tmp_iter,
1525*4882a593Smuzhiyun &local_suspended_soft_jobs, dep_item[1]) {
1526*4882a593Smuzhiyun struct kbase_context *kctx = katom_iter->kctx;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun mutex_lock(&kctx->jctx.lock);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun /* Remove from the global list */
1531*4882a593Smuzhiyun list_del(&katom_iter->dep_item[1]);
1532*4882a593Smuzhiyun /* Remove from the context's list of waiting soft jobs */
1533*4882a593Smuzhiyun kbasep_remove_waiting_soft_job(katom_iter);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun if (kbase_process_soft_job(katom_iter) == 0) {
1536*4882a593Smuzhiyun kbase_finish_soft_job(katom_iter);
1537*4882a593Smuzhiyun resched |= jd_done_nolock(katom_iter, NULL);
1538*4882a593Smuzhiyun } else {
1539*4882a593Smuzhiyun KBASE_DEBUG_ASSERT((katom_iter->core_req &
1540*4882a593Smuzhiyun BASE_JD_REQ_SOFT_JOB_TYPE)
1541*4882a593Smuzhiyun != BASE_JD_REQ_SOFT_REPLAY);
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun mutex_unlock(&kctx->jctx.lock);
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun if (resched)
1548*4882a593Smuzhiyun kbase_js_sched_all(kbdev);
1549*4882a593Smuzhiyun }
1550