1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include <tl/mali_kbase_tracepoints.h>
24 #include <mali_kbase_ctx_sched.h>
25 #include "device/mali_kbase_device.h"
26 #include "mali_kbase_csf.h"
27 #include <linux/export.h>
28
29 #if IS_ENABLED(CONFIG_SYNC_FILE)
30 #include "mali_kbase_fence.h"
31 #include "mali_kbase_sync.h"
32
33 static DEFINE_SPINLOCK(kbase_csf_fence_lock);
34 #endif
35
36 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
37 #define FENCE_WAIT_TIMEOUT_MS 3000
38 #endif
39
40 static void kcpu_queue_process(struct kbase_kcpu_command_queue *kcpu_queue,
41 bool drain_queue);
42
43 static void kcpu_queue_process_worker(struct work_struct *data);
44
kbase_kcpu_map_import_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_import_info * import_info,struct kbase_kcpu_command * current_command)45 static int kbase_kcpu_map_import_prepare(
46 struct kbase_kcpu_command_queue *kcpu_queue,
47 struct base_kcpu_command_import_info *import_info,
48 struct kbase_kcpu_command *current_command)
49 {
50 struct kbase_context *const kctx = kcpu_queue->kctx;
51 struct kbase_va_region *reg;
52 struct kbase_mem_phy_alloc *alloc;
53 struct page **pages;
54 struct tagged_addr *pa;
55 long i;
56 int ret = 0;
57
58 lockdep_assert_held(&kcpu_queue->lock);
59
60 /* Take the processes mmap lock */
61 down_read(kbase_mem_get_process_mmap_lock());
62 kbase_gpu_vm_lock(kctx);
63
64 reg = kbase_region_tracker_find_region_enclosing_address(kctx,
65 import_info->handle);
66
67 if (kbase_is_region_invalid_or_free(reg) ||
68 !kbase_mem_is_imported(reg->gpu_alloc->type)) {
69 ret = -EINVAL;
70 goto out;
71 }
72
73 if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
74 /* Pin the physical pages backing the user buffer while
75 * we are in the process context and holding the mmap lock.
76 * The dma mapping & GPU mapping of the pages would be done
77 * when the MAP_IMPORT operation is executed.
78 *
79 * Though the pages would be pinned, no reference is taken
80 * on the physical pages tracking object. When the last
81 * reference to the tracking object is dropped the pages
82 * would be unpinned if they weren't unpinned before.
83 *
84 * Region should be CPU cached: abort if it isn't.
85 */
86 if (WARN_ON(!(reg->flags & KBASE_REG_CPU_CACHED))) {
87 ret = -EINVAL;
88 goto out;
89 }
90
91 ret = kbase_jd_user_buf_pin_pages(kctx, reg);
92 if (ret)
93 goto out;
94
95 alloc = reg->gpu_alloc;
96 pa = kbase_get_gpu_phy_pages(reg);
97 pages = alloc->imported.user_buf.pages;
98
99 for (i = 0; i < alloc->nents; i++)
100 pa[i] = as_tagged(page_to_phys(pages[i]));
101 }
102
103 current_command->type = BASE_KCPU_COMMAND_TYPE_MAP_IMPORT;
104 current_command->info.import.gpu_va = import_info->handle;
105
106 out:
107 kbase_gpu_vm_unlock(kctx);
108 /* Release the processes mmap lock */
109 up_read(kbase_mem_get_process_mmap_lock());
110
111 return ret;
112 }
113
kbase_kcpu_unmap_import_prepare_internal(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_import_info * import_info,struct kbase_kcpu_command * current_command,enum base_kcpu_command_type type)114 static int kbase_kcpu_unmap_import_prepare_internal(
115 struct kbase_kcpu_command_queue *kcpu_queue,
116 struct base_kcpu_command_import_info *import_info,
117 struct kbase_kcpu_command *current_command,
118 enum base_kcpu_command_type type)
119 {
120 struct kbase_context *const kctx = kcpu_queue->kctx;
121 struct kbase_va_region *reg;
122 int ret = 0;
123
124 lockdep_assert_held(&kcpu_queue->lock);
125
126 kbase_gpu_vm_lock(kctx);
127
128 reg = kbase_region_tracker_find_region_enclosing_address(kctx,
129 import_info->handle);
130
131 if (kbase_is_region_invalid_or_free(reg) ||
132 !kbase_mem_is_imported(reg->gpu_alloc->type)) {
133 ret = -EINVAL;
134 goto out;
135 }
136
137 if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
138 /* The pages should have been pinned when MAP_IMPORT
139 * was enqueued previously.
140 */
141 if (reg->gpu_alloc->nents !=
142 reg->gpu_alloc->imported.user_buf.nr_pages) {
143 ret = -EINVAL;
144 goto out;
145 }
146 }
147
148 current_command->type = type;
149 current_command->info.import.gpu_va = import_info->handle;
150
151 out:
152 kbase_gpu_vm_unlock(kctx);
153
154 return ret;
155 }
156
kbase_kcpu_unmap_import_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_import_info * import_info,struct kbase_kcpu_command * current_command)157 static int kbase_kcpu_unmap_import_prepare(
158 struct kbase_kcpu_command_queue *kcpu_queue,
159 struct base_kcpu_command_import_info *import_info,
160 struct kbase_kcpu_command *current_command)
161 {
162 return kbase_kcpu_unmap_import_prepare_internal(kcpu_queue,
163 import_info, current_command,
164 BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT);
165 }
166
kbase_kcpu_unmap_import_force_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_import_info * import_info,struct kbase_kcpu_command * current_command)167 static int kbase_kcpu_unmap_import_force_prepare(
168 struct kbase_kcpu_command_queue *kcpu_queue,
169 struct base_kcpu_command_import_info *import_info,
170 struct kbase_kcpu_command *current_command)
171 {
172 return kbase_kcpu_unmap_import_prepare_internal(kcpu_queue,
173 import_info, current_command,
174 BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE);
175 }
176
177 /**
178 * kbase_jit_add_to_pending_alloc_list() - Pend JIT allocation
179 *
180 * @queue: The queue containing this JIT allocation
181 * @cmd: The JIT allocation that is blocking this queue
182 */
kbase_jit_add_to_pending_alloc_list(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command * cmd)183 static void kbase_jit_add_to_pending_alloc_list(
184 struct kbase_kcpu_command_queue *queue,
185 struct kbase_kcpu_command *cmd)
186 {
187 struct kbase_context *const kctx = queue->kctx;
188 struct list_head *target_list_head =
189 &kctx->csf.kcpu_queues.jit_blocked_queues;
190 struct kbase_kcpu_command_queue *blocked_queue;
191
192 lockdep_assert_held(&queue->lock);
193 lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
194
195 list_for_each_entry(blocked_queue,
196 &kctx->csf.kcpu_queues.jit_blocked_queues,
197 jit_blocked) {
198 struct kbase_kcpu_command const *const jit_alloc_cmd =
199 &blocked_queue->commands[blocked_queue->start_offset];
200
201 WARN_ON(jit_alloc_cmd->type != BASE_KCPU_COMMAND_TYPE_JIT_ALLOC);
202 if (cmd->enqueue_ts < jit_alloc_cmd->enqueue_ts) {
203 target_list_head = &blocked_queue->jit_blocked;
204 break;
205 }
206 }
207
208 list_add_tail(&queue->jit_blocked, target_list_head);
209 }
210
211 /**
212 * kbase_kcpu_jit_allocate_process() - Process JIT allocation
213 *
214 * @queue: The queue containing this JIT allocation
215 * @cmd: The JIT allocation command
216 *
217 * Return:
218 * * 0 - allocation OK
219 * * -EINVAL - missing info or JIT ID still in use
220 * * -EAGAIN - Retry
221 * * -ENOMEM - no memory. unable to allocate
222 */
kbase_kcpu_jit_allocate_process(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command * cmd)223 static int kbase_kcpu_jit_allocate_process(
224 struct kbase_kcpu_command_queue *queue,
225 struct kbase_kcpu_command *cmd)
226 {
227 struct kbase_context *const kctx = queue->kctx;
228 struct kbase_kcpu_command_jit_alloc_info *alloc_info =
229 &cmd->info.jit_alloc;
230 struct base_jit_alloc_info *info = alloc_info->info;
231 struct kbase_vmap_struct mapping;
232 struct kbase_va_region *reg;
233 u32 count = alloc_info->count;
234 u64 *ptr, new_addr;
235 u32 i;
236 int ret;
237
238 lockdep_assert_held(&queue->lock);
239
240 if (WARN_ON(!info))
241 return -EINVAL;
242
243 mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
244
245 /* Check if all JIT IDs are not in use */
246 for (i = 0; i < count; i++, info++) {
247 /* The JIT ID is still in use so fail the allocation */
248 if (kctx->jit_alloc[info->id]) {
249 dev_dbg(kctx->kbdev->dev, "JIT ID still in use");
250 ret = -EINVAL;
251 goto fail;
252 }
253 }
254
255 if (alloc_info->blocked) {
256 list_del(&queue->jit_blocked);
257 alloc_info->blocked = false;
258 }
259
260 /* Now start the allocation loop */
261 for (i = 0, info = alloc_info->info; i < count; i++, info++) {
262 /* Create a JIT allocation */
263 reg = kbase_jit_allocate(kctx, info, true);
264 if (!reg) {
265 bool can_block = false;
266 struct kbase_kcpu_command const *jit_cmd;
267
268 list_for_each_entry(jit_cmd, &kctx->csf.kcpu_queues.jit_cmds_head, info.jit_alloc.node) {
269 if (jit_cmd == cmd)
270 break;
271
272 if (jit_cmd->type == BASE_KCPU_COMMAND_TYPE_JIT_FREE) {
273 u8 const *const free_ids = jit_cmd->info.jit_free.ids;
274
275 if (free_ids && *free_ids && kctx->jit_alloc[*free_ids]) {
276 /*
277 * A JIT free which is active
278 * and submitted before this
279 * command.
280 */
281 can_block = true;
282 break;
283 }
284 }
285 }
286
287 if (!can_block) {
288 /*
289 * No prior JIT_FREE command is active. Roll
290 * back previous allocations and fail.
291 */
292 dev_warn_ratelimited(kctx->kbdev->dev, "JIT alloc command failed: %pK\n", cmd);
293 ret = -ENOMEM;
294 goto fail_rollback;
295 }
296
297 /* There are pending frees for an active allocation
298 * so we should wait to see whether they free the
299 * memory. Add to the list of atoms for which JIT
300 * allocation is pending.
301 */
302 kbase_jit_add_to_pending_alloc_list(queue, cmd);
303 alloc_info->blocked = true;
304
305 /* Rollback, the whole set will be re-attempted */
306 while (i-- > 0) {
307 info--;
308 kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
309 kctx->jit_alloc[info->id] = NULL;
310 }
311
312 ret = -EAGAIN;
313 goto fail;
314 }
315
316 /* Bind it to the user provided ID. */
317 kctx->jit_alloc[info->id] = reg;
318 }
319
320 for (i = 0, info = alloc_info->info; i < count; i++, info++) {
321 /*
322 * Write the address of the JIT allocation to the user provided
323 * GPU allocation.
324 */
325 ptr = kbase_vmap_prot(kctx, info->gpu_alloc_addr, sizeof(*ptr),
326 KBASE_REG_CPU_WR, &mapping);
327 if (!ptr) {
328 ret = -ENOMEM;
329 goto fail_rollback;
330 }
331
332 reg = kctx->jit_alloc[info->id];
333 new_addr = reg->start_pfn << PAGE_SHIFT;
334 *ptr = new_addr;
335 kbase_vunmap(kctx, &mapping);
336 }
337
338 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
339
340 return 0;
341
342 fail_rollback:
343 /* Roll back completely */
344 for (i = 0, info = alloc_info->info; i < count; i++, info++) {
345 /* Free the allocations that were successful.
346 * Mark all the allocations including the failed one and the
347 * other un-attempted allocations in the set, so we know they
348 * are in use.
349 */
350 if (kctx->jit_alloc[info->id])
351 kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
352
353 kctx->jit_alloc[info->id] = KBASE_RESERVED_REG_JIT_ALLOC;
354 }
355 fail:
356 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
357
358 return ret;
359 }
360
kbase_kcpu_jit_allocate_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_jit_alloc_info * alloc_info,struct kbase_kcpu_command * current_command)361 static int kbase_kcpu_jit_allocate_prepare(
362 struct kbase_kcpu_command_queue *kcpu_queue,
363 struct base_kcpu_command_jit_alloc_info *alloc_info,
364 struct kbase_kcpu_command *current_command)
365 {
366 struct kbase_context *const kctx = kcpu_queue->kctx;
367 void __user *data = u64_to_user_ptr(alloc_info->info);
368 struct base_jit_alloc_info *info = NULL;
369 u32 count = alloc_info->count;
370 int ret = 0;
371 u32 i;
372
373 lockdep_assert_held(&kcpu_queue->lock);
374
375 if ((count == 0) || (count > ARRAY_SIZE(kctx->jit_alloc)) ||
376 (count > kcpu_queue->kctx->jit_max_allocations) || (!data) ||
377 !kbase_mem_allow_alloc(kctx)) {
378 ret = -EINVAL;
379 goto out;
380 }
381
382 info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
383 if (!info) {
384 ret = -ENOMEM;
385 goto out;
386 }
387
388 if (copy_from_user(info, data, sizeof(*info) * count) != 0) {
389 ret = -EINVAL;
390 goto out_free;
391 }
392
393 for (i = 0; i < count; i++) {
394 ret = kbasep_jit_alloc_validate(kctx, &info[i]);
395 if (ret)
396 goto out_free;
397 }
398
399 /* Search for duplicate JIT ids */
400 for (i = 0; i < (count - 1); i++) {
401 u32 j;
402
403 for (j = (i + 1); j < count; j++) {
404 if (info[i].id == info[j].id) {
405 ret = -EINVAL;
406 goto out_free;
407 }
408 }
409 }
410
411 current_command->type = BASE_KCPU_COMMAND_TYPE_JIT_ALLOC;
412 current_command->info.jit_alloc.info = info;
413 current_command->info.jit_alloc.count = count;
414 current_command->info.jit_alloc.blocked = false;
415 mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
416 list_add_tail(¤t_command->info.jit_alloc.node,
417 &kctx->csf.kcpu_queues.jit_cmds_head);
418 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
419
420 return 0;
421 out_free:
422 kfree(info);
423 out:
424 return ret;
425 }
426
427 /**
428 * kbase_kcpu_jit_allocate_finish() - Finish handling the JIT_ALLOC command
429 *
430 * @queue: The queue containing this JIT allocation
431 * @cmd: The JIT allocation command
432 */
kbase_kcpu_jit_allocate_finish(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command * cmd)433 static void kbase_kcpu_jit_allocate_finish(
434 struct kbase_kcpu_command_queue *queue,
435 struct kbase_kcpu_command *cmd)
436 {
437 lockdep_assert_held(&queue->lock);
438
439 mutex_lock(&queue->kctx->csf.kcpu_queues.jit_lock);
440
441 /* Remove this command from the jit_cmds_head list */
442 list_del(&cmd->info.jit_alloc.node);
443
444 /*
445 * If we get to this point we must have already cleared the blocked
446 * flag, otherwise it'd be a bug.
447 */
448 if (WARN_ON(cmd->info.jit_alloc.blocked)) {
449 list_del(&queue->jit_blocked);
450 cmd->info.jit_alloc.blocked = false;
451 }
452
453 mutex_unlock(&queue->kctx->csf.kcpu_queues.jit_lock);
454
455 kfree(cmd->info.jit_alloc.info);
456 }
457
458 /**
459 * kbase_kcpu_jit_retry_pending_allocs() - Retry blocked JIT_ALLOC commands
460 *
461 * @kctx: The context containing the blocked JIT_ALLOC commands
462 */
kbase_kcpu_jit_retry_pending_allocs(struct kbase_context * kctx)463 static void kbase_kcpu_jit_retry_pending_allocs(struct kbase_context *kctx)
464 {
465 struct kbase_kcpu_command_queue *blocked_queue;
466
467 lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
468
469 /*
470 * Reschedule all queues blocked by JIT_ALLOC commands.
471 * NOTE: This code traverses the list of blocked queues directly. It
472 * only works as long as the queued works are not executed at the same
473 * time. This precondition is true since we're holding the
474 * kbase_csf_kcpu_queue_context.jit_lock .
475 */
476 list_for_each_entry(blocked_queue, &kctx->csf.kcpu_queues.jit_blocked_queues, jit_blocked)
477 queue_work(blocked_queue->wq, &blocked_queue->work);
478 }
479
kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command * const cmd)480 static int kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue *queue,
481 struct kbase_kcpu_command *const cmd)
482 {
483 struct kbase_kcpu_command_jit_free_info const *const free_info =
484 &cmd->info.jit_free;
485 u8 const *const ids = free_info->ids;
486 u32 const count = free_info->count;
487 u32 i;
488 int rc = 0;
489 struct kbase_context *kctx = queue->kctx;
490
491 if (WARN_ON(!ids))
492 return -EINVAL;
493
494 lockdep_assert_held(&queue->lock);
495 mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
496
497 KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END(queue->kctx->kbdev,
498 queue);
499
500 for (i = 0; i < count; i++) {
501 u64 pages_used = 0;
502 int item_err = 0;
503
504 if (!kctx->jit_alloc[ids[i]]) {
505 dev_dbg(kctx->kbdev->dev, "invalid JIT free ID");
506 rc = -EINVAL;
507 item_err = rc;
508 } else {
509 struct kbase_va_region *const reg = kctx->jit_alloc[ids[i]];
510
511 /*
512 * If the ID is valid but the allocation request failed, still
513 * succeed this command but don't try and free the allocation.
514 */
515 if (reg != KBASE_RESERVED_REG_JIT_ALLOC) {
516 pages_used = reg->gpu_alloc->nents;
517 kbase_jit_free(kctx, reg);
518 }
519
520 kctx->jit_alloc[ids[i]] = NULL;
521 }
522
523 KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END(
524 queue->kctx->kbdev, queue, item_err, pages_used);
525 }
526
527 /*
528 * Remove this command from the jit_cmds_head list and retry pending
529 * allocations.
530 */
531 list_del(&cmd->info.jit_free.node);
532 kbase_kcpu_jit_retry_pending_allocs(kctx);
533
534 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
535
536 /* Free the list of ids */
537 kfree(ids);
538
539 return rc;
540 }
541
kbase_kcpu_jit_free_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_jit_free_info * free_info,struct kbase_kcpu_command * current_command)542 static int kbase_kcpu_jit_free_prepare(
543 struct kbase_kcpu_command_queue *kcpu_queue,
544 struct base_kcpu_command_jit_free_info *free_info,
545 struct kbase_kcpu_command *current_command)
546 {
547 struct kbase_context *const kctx = kcpu_queue->kctx;
548 void __user *data = u64_to_user_ptr(free_info->ids);
549 u8 *ids;
550 u32 count = free_info->count;
551 int ret;
552 u32 i;
553
554 lockdep_assert_held(&kcpu_queue->lock);
555
556 /* Sanity checks */
557 if (!count || count > ARRAY_SIZE(kctx->jit_alloc)) {
558 ret = -EINVAL;
559 goto out;
560 }
561
562 /* Copy the information for safe access and future storage */
563 ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
564 if (!ids) {
565 ret = -ENOMEM;
566 goto out;
567 }
568
569 if (!data) {
570 ret = -EINVAL;
571 goto out_free;
572 }
573
574 if (copy_from_user(ids, data, sizeof(*ids) * count)) {
575 ret = -EINVAL;
576 goto out_free;
577 }
578
579 for (i = 0; i < count; i++) {
580 /* Fail the command if ID sent is zero */
581 if (!ids[i]) {
582 ret = -EINVAL;
583 goto out_free;
584 }
585 }
586
587 /* Search for duplicate JIT ids */
588 for (i = 0; i < (count - 1); i++) {
589 u32 j;
590
591 for (j = (i + 1); j < count; j++) {
592 if (ids[i] == ids[j]) {
593 ret = -EINVAL;
594 goto out_free;
595 }
596 }
597 }
598
599 current_command->type = BASE_KCPU_COMMAND_TYPE_JIT_FREE;
600 current_command->info.jit_free.ids = ids;
601 current_command->info.jit_free.count = count;
602 mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
603 list_add_tail(¤t_command->info.jit_free.node,
604 &kctx->csf.kcpu_queues.jit_cmds_head);
605 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
606
607 return 0;
608 out_free:
609 kfree(ids);
610 out:
611 return ret;
612 }
613
614 #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST
kbase_csf_queue_group_suspend_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_group_suspend_info * suspend_buf,struct kbase_kcpu_command * current_command)615 static int kbase_csf_queue_group_suspend_prepare(
616 struct kbase_kcpu_command_queue *kcpu_queue,
617 struct base_kcpu_command_group_suspend_info *suspend_buf,
618 struct kbase_kcpu_command *current_command)
619 {
620 struct kbase_context *const kctx = kcpu_queue->kctx;
621 struct kbase_suspend_copy_buffer *sus_buf = NULL;
622 const u32 csg_suspend_buf_size =
623 kctx->kbdev->csf.global_iface.groups[0].suspend_size;
624 u64 addr = suspend_buf->buffer;
625 u64 page_addr = addr & PAGE_MASK;
626 u64 end_addr = addr + csg_suspend_buf_size - 1;
627 u64 last_page_addr = end_addr & PAGE_MASK;
628 int nr_pages = (last_page_addr - page_addr) / PAGE_SIZE + 1;
629 int pinned_pages = 0, ret = 0;
630 struct kbase_va_region *reg;
631
632 lockdep_assert_held(&kcpu_queue->lock);
633
634 if (suspend_buf->size < csg_suspend_buf_size)
635 return -EINVAL;
636
637 ret = kbase_csf_queue_group_handle_is_valid(kctx,
638 suspend_buf->group_handle);
639 if (ret)
640 return ret;
641
642 sus_buf = kzalloc(sizeof(*sus_buf), GFP_KERNEL);
643 if (!sus_buf)
644 return -ENOMEM;
645
646 sus_buf->size = csg_suspend_buf_size;
647 sus_buf->nr_pages = nr_pages;
648 sus_buf->offset = addr & ~PAGE_MASK;
649
650 sus_buf->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
651 if (!sus_buf->pages) {
652 ret = -ENOMEM;
653 goto out_clean_sus_buf;
654 }
655
656 /* Check if the page_addr is a valid GPU VA from SAME_VA zone,
657 * otherwise consider it is a CPU VA corresponding to the Host
658 * memory allocated by userspace.
659 */
660 kbase_gpu_vm_lock(kctx);
661 reg = kbase_region_tracker_find_region_enclosing_address(kctx,
662 page_addr);
663
664 if (kbase_is_region_invalid_or_free(reg)) {
665 kbase_gpu_vm_unlock(kctx);
666 pinned_pages = get_user_pages_fast(page_addr, nr_pages, 1,
667 sus_buf->pages);
668 kbase_gpu_vm_lock(kctx);
669
670 if (pinned_pages < 0) {
671 ret = pinned_pages;
672 goto out_clean_pages;
673 }
674 if (pinned_pages != nr_pages) {
675 ret = -EINVAL;
676 goto out_clean_pages;
677 }
678 } else {
679 struct tagged_addr *page_array;
680 u64 start, end, i;
681
682 if (((reg->flags & KBASE_REG_ZONE_MASK) != KBASE_REG_ZONE_SAME_VA) ||
683 (kbase_reg_current_backed_size(reg) < nr_pages) ||
684 !(reg->flags & KBASE_REG_CPU_WR) ||
685 (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) ||
686 (kbase_is_region_shrinkable(reg)) || (kbase_va_region_is_no_user_free(reg))) {
687 ret = -EINVAL;
688 goto out_clean_pages;
689 }
690
691 start = PFN_DOWN(page_addr) - reg->start_pfn;
692 end = start + nr_pages;
693
694 if (end > reg->nr_pages) {
695 ret = -EINVAL;
696 goto out_clean_pages;
697 }
698
699 sus_buf->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
700 kbase_mem_phy_alloc_kernel_mapped(reg->cpu_alloc);
701 page_array = kbase_get_cpu_phy_pages(reg);
702 page_array += start;
703
704 for (i = 0; i < nr_pages; i++, page_array++)
705 sus_buf->pages[i] = as_page(*page_array);
706 }
707
708 kbase_gpu_vm_unlock(kctx);
709 current_command->type = BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND;
710 current_command->info.suspend_buf_copy.sus_buf = sus_buf;
711 current_command->info.suspend_buf_copy.group_handle =
712 suspend_buf->group_handle;
713 return ret;
714
715 out_clean_pages:
716 kbase_gpu_vm_unlock(kctx);
717 kfree(sus_buf->pages);
718 out_clean_sus_buf:
719 kfree(sus_buf);
720
721 return ret;
722 }
723
kbase_csf_queue_group_suspend_process(struct kbase_context * kctx,struct kbase_suspend_copy_buffer * sus_buf,u8 group_handle)724 static int kbase_csf_queue_group_suspend_process(struct kbase_context *kctx,
725 struct kbase_suspend_copy_buffer *sus_buf,
726 u8 group_handle)
727 {
728 return kbase_csf_queue_group_suspend(kctx, sus_buf, group_handle);
729 }
730 #endif
731
event_cqs_callback(void * param)732 static enum kbase_csf_event_callback_action event_cqs_callback(void *param)
733 {
734 struct kbase_kcpu_command_queue *kcpu_queue =
735 (struct kbase_kcpu_command_queue *)param;
736
737 queue_work(kcpu_queue->wq, &kcpu_queue->work);
738
739 return KBASE_CSF_EVENT_CALLBACK_KEEP;
740 }
741
cleanup_cqs_wait(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_wait_info * cqs_wait)742 static void cleanup_cqs_wait(struct kbase_kcpu_command_queue *queue,
743 struct kbase_kcpu_command_cqs_wait_info *cqs_wait)
744 {
745 WARN_ON(!cqs_wait->nr_objs);
746 WARN_ON(!cqs_wait->objs);
747 WARN_ON(!cqs_wait->signaled);
748 WARN_ON(!queue->cqs_wait_count);
749
750 if (--queue->cqs_wait_count == 0) {
751 kbase_csf_event_wait_remove(queue->kctx,
752 event_cqs_callback, queue);
753 }
754
755 kfree(cqs_wait->signaled);
756 kfree(cqs_wait->objs);
757 cqs_wait->signaled = NULL;
758 cqs_wait->objs = NULL;
759 }
760
kbase_kcpu_cqs_wait_process(struct kbase_device * kbdev,struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_wait_info * cqs_wait)761 static int kbase_kcpu_cqs_wait_process(struct kbase_device *kbdev,
762 struct kbase_kcpu_command_queue *queue,
763 struct kbase_kcpu_command_cqs_wait_info *cqs_wait)
764 {
765 u32 i;
766
767 lockdep_assert_held(&queue->lock);
768
769 if (WARN_ON(!cqs_wait->objs))
770 return -EINVAL;
771
772 /* Skip the CQS waits that have already been signaled when processing */
773 for (i = find_first_zero_bit(cqs_wait->signaled, cqs_wait->nr_objs); i < cqs_wait->nr_objs; i++) {
774 if (!test_bit(i, cqs_wait->signaled)) {
775 struct kbase_vmap_struct *mapping;
776 bool sig_set;
777 u32 *evt = (u32 *)kbase_phy_alloc_mapping_get(queue->kctx,
778 cqs_wait->objs[i].addr, &mapping);
779
780 if (!queue->command_started) {
781 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START(kbdev,
782 queue);
783 queue->command_started = true;
784 KBASE_KTRACE_ADD_CSF_KCPU(kbdev, KCPU_CQS_WAIT_START,
785 queue, cqs_wait->nr_objs, 0);
786 }
787
788 if (!evt) {
789 dev_warn(kbdev->dev,
790 "Sync memory %llx already freed", cqs_wait->objs[i].addr);
791 queue->has_error = true;
792 return -EINVAL;
793 }
794
795 sig_set =
796 evt[BASEP_EVENT32_VAL_OFFSET / sizeof(u32)] > cqs_wait->objs[i].val;
797 if (sig_set) {
798 bool error = false;
799
800 bitmap_set(cqs_wait->signaled, i, 1);
801 if ((cqs_wait->inherit_err_flags & (1U << i)) &&
802 evt[BASEP_EVENT32_ERR_OFFSET / sizeof(u32)] > 0) {
803 queue->has_error = true;
804 error = true;
805 }
806
807 KBASE_KTRACE_ADD_CSF_KCPU(kbdev, KCPU_CQS_WAIT_END,
808 queue, cqs_wait->objs[i].addr,
809 error);
810
811 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END(
812 kbdev, queue, evt[BASEP_EVENT32_ERR_OFFSET / sizeof(u32)]);
813 queue->command_started = false;
814 }
815
816 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
817
818 if (!sig_set)
819 break;
820 }
821 }
822
823 /* For the queue to progress further, all cqs objects should get
824 * signaled.
825 */
826 return bitmap_full(cqs_wait->signaled, cqs_wait->nr_objs);
827 }
828
kbase_kcpu_cqs_is_data_type_valid(u8 data_type)829 static inline bool kbase_kcpu_cqs_is_data_type_valid(u8 data_type)
830 {
831 return data_type == BASEP_CQS_DATA_TYPE_U32 || data_type == BASEP_CQS_DATA_TYPE_U64;
832 }
833
kbase_kcpu_cqs_is_aligned(u64 addr,u8 data_type)834 static inline bool kbase_kcpu_cqs_is_aligned(u64 addr, u8 data_type)
835 {
836 BUILD_BUG_ON(BASEP_EVENT32_ALIGN_BYTES != BASEP_EVENT32_SIZE_BYTES);
837 BUILD_BUG_ON(BASEP_EVENT64_ALIGN_BYTES != BASEP_EVENT64_SIZE_BYTES);
838 WARN_ON(!kbase_kcpu_cqs_is_data_type_valid(data_type));
839
840 switch (data_type) {
841 default:
842 return false;
843 case BASEP_CQS_DATA_TYPE_U32:
844 return (addr & (BASEP_EVENT32_ALIGN_BYTES - 1)) == 0;
845 case BASEP_CQS_DATA_TYPE_U64:
846 return (addr & (BASEP_EVENT64_ALIGN_BYTES - 1)) == 0;
847 }
848 }
849
kbase_kcpu_cqs_wait_prepare(struct kbase_kcpu_command_queue * queue,struct base_kcpu_command_cqs_wait_info * cqs_wait_info,struct kbase_kcpu_command * current_command)850 static int kbase_kcpu_cqs_wait_prepare(struct kbase_kcpu_command_queue *queue,
851 struct base_kcpu_command_cqs_wait_info *cqs_wait_info,
852 struct kbase_kcpu_command *current_command)
853 {
854 struct base_cqs_wait_info *objs;
855 unsigned int nr_objs = cqs_wait_info->nr_objs;
856 unsigned int i;
857
858 lockdep_assert_held(&queue->lock);
859
860 if (nr_objs > BASEP_KCPU_CQS_MAX_NUM_OBJS)
861 return -EINVAL;
862
863 if (!nr_objs)
864 return -EINVAL;
865
866 objs = kcalloc(nr_objs, sizeof(*objs), GFP_KERNEL);
867 if (!objs)
868 return -ENOMEM;
869
870 if (copy_from_user(objs, u64_to_user_ptr(cqs_wait_info->objs),
871 nr_objs * sizeof(*objs))) {
872 kfree(objs);
873 return -ENOMEM;
874 }
875
876 /* Check the CQS objects as early as possible. By checking their alignment
877 * (required alignment equals to size for Sync32 and Sync64 objects), we can
878 * prevent overrunning the supplied event page.
879 */
880 for (i = 0; i < nr_objs; i++) {
881 if (!kbase_kcpu_cqs_is_aligned(objs[i].addr, BASEP_CQS_DATA_TYPE_U32)) {
882 kfree(objs);
883 return -EINVAL;
884 }
885 }
886
887 if (++queue->cqs_wait_count == 1) {
888 if (kbase_csf_event_wait_add(queue->kctx,
889 event_cqs_callback, queue)) {
890 kfree(objs);
891 queue->cqs_wait_count--;
892 return -ENOMEM;
893 }
894 }
895
896 current_command->type = BASE_KCPU_COMMAND_TYPE_CQS_WAIT;
897 current_command->info.cqs_wait.nr_objs = nr_objs;
898 current_command->info.cqs_wait.objs = objs;
899 current_command->info.cqs_wait.inherit_err_flags =
900 cqs_wait_info->inherit_err_flags;
901
902 current_command->info.cqs_wait.signaled = kcalloc(BITS_TO_LONGS(nr_objs),
903 sizeof(*current_command->info.cqs_wait.signaled), GFP_KERNEL);
904 if (!current_command->info.cqs_wait.signaled) {
905 if (--queue->cqs_wait_count == 0) {
906 kbase_csf_event_wait_remove(queue->kctx,
907 event_cqs_callback, queue);
908 }
909
910 kfree(objs);
911 return -ENOMEM;
912 }
913
914 return 0;
915 }
916
kbase_kcpu_cqs_set_process(struct kbase_device * kbdev,struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_set_info * cqs_set)917 static void kbase_kcpu_cqs_set_process(struct kbase_device *kbdev,
918 struct kbase_kcpu_command_queue *queue,
919 struct kbase_kcpu_command_cqs_set_info *cqs_set)
920 {
921 unsigned int i;
922
923 lockdep_assert_held(&queue->lock);
924
925 if (WARN_ON(!cqs_set->objs))
926 return;
927
928 for (i = 0; i < cqs_set->nr_objs; i++) {
929 struct kbase_vmap_struct *mapping;
930 u32 *evt;
931
932 evt = (u32 *)kbase_phy_alloc_mapping_get(
933 queue->kctx, cqs_set->objs[i].addr, &mapping);
934
935 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET(kbdev, queue, evt ? 0 : 1);
936
937 if (!evt) {
938 dev_warn(kbdev->dev,
939 "Sync memory %llx already freed", cqs_set->objs[i].addr);
940 queue->has_error = true;
941 } else {
942 evt[BASEP_EVENT32_ERR_OFFSET / sizeof(u32)] = queue->has_error;
943 /* Set to signaled */
944 evt[BASEP_EVENT32_VAL_OFFSET / sizeof(u32)]++;
945 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
946
947 KBASE_KTRACE_ADD_CSF_KCPU(kbdev, KCPU_CQS_SET, queue, cqs_set->objs[i].addr,
948 evt[BASEP_EVENT32_ERR_OFFSET / sizeof(u32)]);
949 }
950 }
951
952 kbase_csf_event_signal_notify_gpu(queue->kctx);
953
954 kfree(cqs_set->objs);
955 cqs_set->objs = NULL;
956 }
957
kbase_kcpu_cqs_set_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_cqs_set_info * cqs_set_info,struct kbase_kcpu_command * current_command)958 static int kbase_kcpu_cqs_set_prepare(
959 struct kbase_kcpu_command_queue *kcpu_queue,
960 struct base_kcpu_command_cqs_set_info *cqs_set_info,
961 struct kbase_kcpu_command *current_command)
962 {
963 struct base_cqs_set *objs;
964 unsigned int nr_objs = cqs_set_info->nr_objs;
965 unsigned int i;
966
967 lockdep_assert_held(&kcpu_queue->lock);
968
969 if (nr_objs > BASEP_KCPU_CQS_MAX_NUM_OBJS)
970 return -EINVAL;
971
972 if (!nr_objs)
973 return -EINVAL;
974
975 objs = kcalloc(nr_objs, sizeof(*objs), GFP_KERNEL);
976 if (!objs)
977 return -ENOMEM;
978
979 if (copy_from_user(objs, u64_to_user_ptr(cqs_set_info->objs),
980 nr_objs * sizeof(*objs))) {
981 kfree(objs);
982 return -ENOMEM;
983 }
984
985 /* Check the CQS objects as early as possible. By checking their alignment
986 * (required alignment equals to size for Sync32 and Sync64 objects), we can
987 * prevent overrunning the supplied event page.
988 */
989 for (i = 0; i < nr_objs; i++) {
990 if (!kbase_kcpu_cqs_is_aligned(objs[i].addr, BASEP_CQS_DATA_TYPE_U32)) {
991 kfree(objs);
992 return -EINVAL;
993 }
994 }
995
996 current_command->type = BASE_KCPU_COMMAND_TYPE_CQS_SET;
997 current_command->info.cqs_set.nr_objs = nr_objs;
998 current_command->info.cqs_set.objs = objs;
999
1000 return 0;
1001 }
1002
cleanup_cqs_wait_operation(struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_wait_operation_info * cqs_wait_operation)1003 static void cleanup_cqs_wait_operation(struct kbase_kcpu_command_queue *queue,
1004 struct kbase_kcpu_command_cqs_wait_operation_info *cqs_wait_operation)
1005 {
1006 WARN_ON(!cqs_wait_operation->nr_objs);
1007 WARN_ON(!cqs_wait_operation->objs);
1008 WARN_ON(!cqs_wait_operation->signaled);
1009 WARN_ON(!queue->cqs_wait_count);
1010
1011 if (--queue->cqs_wait_count == 0) {
1012 kbase_csf_event_wait_remove(queue->kctx,
1013 event_cqs_callback, queue);
1014 }
1015
1016 kfree(cqs_wait_operation->signaled);
1017 kfree(cqs_wait_operation->objs);
1018 cqs_wait_operation->signaled = NULL;
1019 cqs_wait_operation->objs = NULL;
1020 }
1021
kbase_kcpu_cqs_wait_operation_process(struct kbase_device * kbdev,struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_wait_operation_info * cqs_wait_operation)1022 static int kbase_kcpu_cqs_wait_operation_process(struct kbase_device *kbdev,
1023 struct kbase_kcpu_command_queue *queue,
1024 struct kbase_kcpu_command_cqs_wait_operation_info *cqs_wait_operation)
1025 {
1026 u32 i;
1027
1028 lockdep_assert_held(&queue->lock);
1029
1030 if (WARN_ON(!cqs_wait_operation->objs))
1031 return -EINVAL;
1032
1033 /* Skip the CQS waits that have already been signaled when processing */
1034 for (i = find_first_zero_bit(cqs_wait_operation->signaled, cqs_wait_operation->nr_objs); i < cqs_wait_operation->nr_objs; i++) {
1035 if (!test_bit(i, cqs_wait_operation->signaled)) {
1036 struct kbase_vmap_struct *mapping;
1037 bool sig_set;
1038 uintptr_t evt = (uintptr_t)kbase_phy_alloc_mapping_get(
1039 queue->kctx, cqs_wait_operation->objs[i].addr, &mapping);
1040 u64 val = 0;
1041
1042 if (!queue->command_started) {
1043 queue->command_started = true;
1044 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START(
1045 kbdev, queue);
1046 }
1047
1048
1049 if (!evt) {
1050 dev_warn(kbdev->dev,
1051 "Sync memory %llx already freed", cqs_wait_operation->objs[i].addr);
1052 queue->has_error = true;
1053 return -EINVAL;
1054 }
1055
1056 switch (cqs_wait_operation->objs[i].data_type) {
1057 default:
1058 WARN_ON(!kbase_kcpu_cqs_is_data_type_valid(
1059 cqs_wait_operation->objs[i].data_type));
1060 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
1061 queue->has_error = true;
1062 return -EINVAL;
1063 case BASEP_CQS_DATA_TYPE_U32:
1064 val = *(u32 *)evt;
1065 evt += BASEP_EVENT32_ERR_OFFSET - BASEP_EVENT32_VAL_OFFSET;
1066 break;
1067 case BASEP_CQS_DATA_TYPE_U64:
1068 val = *(u64 *)evt;
1069 evt += BASEP_EVENT64_ERR_OFFSET - BASEP_EVENT64_VAL_OFFSET;
1070 break;
1071 }
1072
1073 switch (cqs_wait_operation->objs[i].operation) {
1074 case BASEP_CQS_WAIT_OPERATION_LE:
1075 sig_set = val <= cqs_wait_operation->objs[i].val;
1076 break;
1077 case BASEP_CQS_WAIT_OPERATION_GT:
1078 sig_set = val > cqs_wait_operation->objs[i].val;
1079 break;
1080 default:
1081 dev_dbg(kbdev->dev,
1082 "Unsupported CQS wait operation %d", cqs_wait_operation->objs[i].operation);
1083
1084 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
1085 queue->has_error = true;
1086
1087 return -EINVAL;
1088 }
1089
1090 if (sig_set) {
1091 bitmap_set(cqs_wait_operation->signaled, i, 1);
1092 if ((cqs_wait_operation->inherit_err_flags & (1U << i)) &&
1093 *(u32 *)evt > 0) {
1094 queue->has_error = true;
1095 }
1096
1097 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END(
1098 kbdev, queue, *(u32 *)evt);
1099
1100 queue->command_started = false;
1101 }
1102
1103 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
1104
1105 if (!sig_set)
1106 break;
1107 }
1108 }
1109
1110 /* For the queue to progress further, all cqs objects should get
1111 * signaled.
1112 */
1113 return bitmap_full(cqs_wait_operation->signaled, cqs_wait_operation->nr_objs);
1114 }
1115
kbase_kcpu_cqs_wait_operation_prepare(struct kbase_kcpu_command_queue * queue,struct base_kcpu_command_cqs_wait_operation_info * cqs_wait_operation_info,struct kbase_kcpu_command * current_command)1116 static int kbase_kcpu_cqs_wait_operation_prepare(struct kbase_kcpu_command_queue *queue,
1117 struct base_kcpu_command_cqs_wait_operation_info *cqs_wait_operation_info,
1118 struct kbase_kcpu_command *current_command)
1119 {
1120 struct base_cqs_wait_operation_info *objs;
1121 unsigned int nr_objs = cqs_wait_operation_info->nr_objs;
1122 unsigned int i;
1123
1124 lockdep_assert_held(&queue->lock);
1125
1126 if (nr_objs > BASEP_KCPU_CQS_MAX_NUM_OBJS)
1127 return -EINVAL;
1128
1129 if (!nr_objs)
1130 return -EINVAL;
1131
1132 objs = kcalloc(nr_objs, sizeof(*objs), GFP_KERNEL);
1133 if (!objs)
1134 return -ENOMEM;
1135
1136 if (copy_from_user(objs, u64_to_user_ptr(cqs_wait_operation_info->objs),
1137 nr_objs * sizeof(*objs))) {
1138 kfree(objs);
1139 return -ENOMEM;
1140 }
1141
1142 /* Check the CQS objects as early as possible. By checking their alignment
1143 * (required alignment equals to size for Sync32 and Sync64 objects), we can
1144 * prevent overrunning the supplied event page.
1145 */
1146 for (i = 0; i < nr_objs; i++) {
1147 if (!kbase_kcpu_cqs_is_data_type_valid(objs[i].data_type) ||
1148 !kbase_kcpu_cqs_is_aligned(objs[i].addr, objs[i].data_type)) {
1149 kfree(objs);
1150 return -EINVAL;
1151 }
1152 }
1153
1154 if (++queue->cqs_wait_count == 1) {
1155 if (kbase_csf_event_wait_add(queue->kctx,
1156 event_cqs_callback, queue)) {
1157 kfree(objs);
1158 queue->cqs_wait_count--;
1159 return -ENOMEM;
1160 }
1161 }
1162
1163 current_command->type = BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION;
1164 current_command->info.cqs_wait_operation.nr_objs = nr_objs;
1165 current_command->info.cqs_wait_operation.objs = objs;
1166 current_command->info.cqs_wait_operation.inherit_err_flags =
1167 cqs_wait_operation_info->inherit_err_flags;
1168
1169 current_command->info.cqs_wait_operation.signaled = kcalloc(BITS_TO_LONGS(nr_objs),
1170 sizeof(*current_command->info.cqs_wait_operation.signaled), GFP_KERNEL);
1171 if (!current_command->info.cqs_wait_operation.signaled) {
1172 if (--queue->cqs_wait_count == 0) {
1173 kbase_csf_event_wait_remove(queue->kctx,
1174 event_cqs_callback, queue);
1175 }
1176
1177 kfree(objs);
1178 return -ENOMEM;
1179 }
1180
1181 return 0;
1182 }
1183
kbasep_kcpu_cqs_do_set_operation_32(struct kbase_kcpu_command_queue * queue,uintptr_t evt,u8 operation,u64 val)1184 static void kbasep_kcpu_cqs_do_set_operation_32(struct kbase_kcpu_command_queue *queue,
1185 uintptr_t evt, u8 operation, u64 val)
1186 {
1187 struct kbase_device *kbdev = queue->kctx->kbdev;
1188
1189 switch (operation) {
1190 case BASEP_CQS_SET_OPERATION_ADD:
1191 *(u32 *)evt += (u32)val;
1192 break;
1193 case BASEP_CQS_SET_OPERATION_SET:
1194 *(u32 *)evt = val;
1195 break;
1196 default:
1197 dev_dbg(kbdev->dev, "Unsupported CQS set operation %d", operation);
1198 queue->has_error = true;
1199 break;
1200 }
1201 }
1202
kbasep_kcpu_cqs_do_set_operation_64(struct kbase_kcpu_command_queue * queue,uintptr_t evt,u8 operation,u64 val)1203 static void kbasep_kcpu_cqs_do_set_operation_64(struct kbase_kcpu_command_queue *queue,
1204 uintptr_t evt, u8 operation, u64 val)
1205 {
1206 struct kbase_device *kbdev = queue->kctx->kbdev;
1207
1208 switch (operation) {
1209 case BASEP_CQS_SET_OPERATION_ADD:
1210 *(u64 *)evt += val;
1211 break;
1212 case BASEP_CQS_SET_OPERATION_SET:
1213 *(u64 *)evt = val;
1214 break;
1215 default:
1216 dev_dbg(kbdev->dev, "Unsupported CQS set operation %d", operation);
1217 queue->has_error = true;
1218 break;
1219 }
1220 }
1221
kbase_kcpu_cqs_set_operation_process(struct kbase_device * kbdev,struct kbase_kcpu_command_queue * queue,struct kbase_kcpu_command_cqs_set_operation_info * cqs_set_operation)1222 static void kbase_kcpu_cqs_set_operation_process(
1223 struct kbase_device *kbdev,
1224 struct kbase_kcpu_command_queue *queue,
1225 struct kbase_kcpu_command_cqs_set_operation_info *cqs_set_operation)
1226 {
1227 unsigned int i;
1228
1229 lockdep_assert_held(&queue->lock);
1230
1231 if (WARN_ON(!cqs_set_operation->objs))
1232 return;
1233
1234 for (i = 0; i < cqs_set_operation->nr_objs; i++) {
1235 struct kbase_vmap_struct *mapping;
1236 uintptr_t evt;
1237
1238 evt = (uintptr_t)kbase_phy_alloc_mapping_get(
1239 queue->kctx, cqs_set_operation->objs[i].addr, &mapping);
1240
1241 if (!evt) {
1242 dev_warn(kbdev->dev,
1243 "Sync memory %llx already freed", cqs_set_operation->objs[i].addr);
1244 queue->has_error = true;
1245 } else {
1246 struct base_cqs_set_operation_info *obj = &cqs_set_operation->objs[i];
1247
1248 switch (obj->data_type) {
1249 default:
1250 WARN_ON(!kbase_kcpu_cqs_is_data_type_valid(obj->data_type));
1251 queue->has_error = true;
1252 goto skip_err_propagation;
1253 case BASEP_CQS_DATA_TYPE_U32:
1254 kbasep_kcpu_cqs_do_set_operation_32(queue, evt, obj->operation,
1255 obj->val);
1256 evt += BASEP_EVENT32_ERR_OFFSET - BASEP_EVENT32_VAL_OFFSET;
1257 break;
1258 case BASEP_CQS_DATA_TYPE_U64:
1259 kbasep_kcpu_cqs_do_set_operation_64(queue, evt, obj->operation,
1260 obj->val);
1261 evt += BASEP_EVENT64_ERR_OFFSET - BASEP_EVENT64_VAL_OFFSET;
1262 break;
1263 }
1264
1265 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION(
1266 kbdev, queue, *(u32 *)evt ? 1 : 0);
1267
1268 /* Always propagate errors */
1269 *(u32 *)evt = queue->has_error;
1270
1271 skip_err_propagation:
1272 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
1273 }
1274 }
1275
1276 kbase_csf_event_signal_notify_gpu(queue->kctx);
1277
1278 kfree(cqs_set_operation->objs);
1279 cqs_set_operation->objs = NULL;
1280 }
1281
kbase_kcpu_cqs_set_operation_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_cqs_set_operation_info * cqs_set_operation_info,struct kbase_kcpu_command * current_command)1282 static int kbase_kcpu_cqs_set_operation_prepare(
1283 struct kbase_kcpu_command_queue *kcpu_queue,
1284 struct base_kcpu_command_cqs_set_operation_info *cqs_set_operation_info,
1285 struct kbase_kcpu_command *current_command)
1286 {
1287 struct base_cqs_set_operation_info *objs;
1288 unsigned int nr_objs = cqs_set_operation_info->nr_objs;
1289 unsigned int i;
1290
1291 lockdep_assert_held(&kcpu_queue->lock);
1292
1293 if (nr_objs > BASEP_KCPU_CQS_MAX_NUM_OBJS)
1294 return -EINVAL;
1295
1296 if (!nr_objs)
1297 return -EINVAL;
1298
1299 objs = kcalloc(nr_objs, sizeof(*objs), GFP_KERNEL);
1300 if (!objs)
1301 return -ENOMEM;
1302
1303 if (copy_from_user(objs, u64_to_user_ptr(cqs_set_operation_info->objs),
1304 nr_objs * sizeof(*objs))) {
1305 kfree(objs);
1306 return -ENOMEM;
1307 }
1308
1309 /* Check the CQS objects as early as possible. By checking their alignment
1310 * (required alignment equals to size for Sync32 and Sync64 objects), we can
1311 * prevent overrunning the supplied event page.
1312 */
1313 for (i = 0; i < nr_objs; i++) {
1314 if (!kbase_kcpu_cqs_is_data_type_valid(objs[i].data_type) ||
1315 !kbase_kcpu_cqs_is_aligned(objs[i].addr, objs[i].data_type)) {
1316 kfree(objs);
1317 return -EINVAL;
1318 }
1319 }
1320
1321 current_command->type = BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION;
1322 current_command->info.cqs_set_operation.nr_objs = nr_objs;
1323 current_command->info.cqs_set_operation.objs = objs;
1324
1325 return 0;
1326 }
1327
1328 #if IS_ENABLED(CONFIG_SYNC_FILE)
1329 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
kbase_csf_fence_wait_callback(struct fence * fence,struct fence_cb * cb)1330 static void kbase_csf_fence_wait_callback(struct fence *fence,
1331 struct fence_cb *cb)
1332 #else
1333 static void kbase_csf_fence_wait_callback(struct dma_fence *fence,
1334 struct dma_fence_cb *cb)
1335 #endif
1336 {
1337 struct kbase_kcpu_command_fence_info *fence_info = container_of(cb,
1338 struct kbase_kcpu_command_fence_info, fence_cb);
1339 struct kbase_kcpu_command_queue *kcpu_queue = fence_info->kcpu_queue;
1340 struct kbase_context *const kctx = kcpu_queue->kctx;
1341
1342 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
1343 /* Fence gets signaled. Deactivate the timer for fence-wait timeout */
1344 del_timer(&kcpu_queue->fence_timeout);
1345 #endif
1346 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_FENCE_WAIT_END, kcpu_queue,
1347 fence->context, fence->seqno);
1348
1349 /* Resume kcpu command queue processing. */
1350 queue_work(kcpu_queue->wq, &kcpu_queue->work);
1351 }
1352
kbasep_kcpu_fence_wait_cancel(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command_fence_info * fence_info)1353 static void kbasep_kcpu_fence_wait_cancel(struct kbase_kcpu_command_queue *kcpu_queue,
1354 struct kbase_kcpu_command_fence_info *fence_info)
1355 {
1356 struct kbase_context *const kctx = kcpu_queue->kctx;
1357
1358 lockdep_assert_held(&kcpu_queue->lock);
1359
1360 if (WARN_ON(!fence_info->fence))
1361 return;
1362
1363 if (kcpu_queue->fence_wait_processed) {
1364 bool removed = dma_fence_remove_callback(fence_info->fence,
1365 &fence_info->fence_cb);
1366
1367 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
1368 /* Fence-wait cancelled or fence signaled. In the latter case
1369 * the timer would already have been deactivated inside
1370 * kbase_csf_fence_wait_callback().
1371 */
1372 del_timer_sync(&kcpu_queue->fence_timeout);
1373 #endif
1374 if (removed)
1375 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_FENCE_WAIT_END,
1376 kcpu_queue, fence_info->fence->context,
1377 fence_info->fence->seqno);
1378 }
1379
1380 /* Release the reference which is kept by the kcpu_queue */
1381 kbase_fence_put(fence_info->fence);
1382 kcpu_queue->fence_wait_processed = false;
1383
1384 fence_info->fence = NULL;
1385 }
1386
1387 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
1388 /**
1389 * fence_timeout_callback() - Timeout callback function for fence-wait
1390 *
1391 * @timer: Timer struct
1392 *
1393 * Context and seqno of the timed-out fence will be displayed in dmesg.
1394 * If the fence has been signalled a work will be enqueued to process
1395 * the fence-wait without displaying debugging information.
1396 */
fence_timeout_callback(struct timer_list * timer)1397 static void fence_timeout_callback(struct timer_list *timer)
1398 {
1399 struct kbase_kcpu_command_queue *kcpu_queue =
1400 container_of(timer, struct kbase_kcpu_command_queue, fence_timeout);
1401 struct kbase_context *const kctx = kcpu_queue->kctx;
1402 struct kbase_kcpu_command *cmd = &kcpu_queue->commands[kcpu_queue->start_offset];
1403 struct kbase_kcpu_command_fence_info *fence_info;
1404 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
1405 struct fence *fence;
1406 #else
1407 struct dma_fence *fence;
1408 #endif
1409 struct kbase_sync_fence_info info;
1410
1411 if (cmd->type != BASE_KCPU_COMMAND_TYPE_FENCE_WAIT) {
1412 dev_err(kctx->kbdev->dev,
1413 "%s: Unexpected command type %d in ctx:%d_%d kcpu queue:%u", __func__,
1414 cmd->type, kctx->tgid, kctx->id, kcpu_queue->id);
1415 return;
1416 }
1417
1418 fence_info = &cmd->info.fence;
1419
1420 fence = kbase_fence_get(fence_info);
1421 if (!fence) {
1422 dev_err(kctx->kbdev->dev, "no fence found in ctx:%d_%d kcpu queue:%u", kctx->tgid,
1423 kctx->id, kcpu_queue->id);
1424 return;
1425 }
1426
1427 kbase_sync_fence_info_get(fence, &info);
1428
1429 if (info.status == 1) {
1430 queue_work(kcpu_queue->wq, &kcpu_queue->work);
1431 } else if (info.status == 0) {
1432 dev_warn(kctx->kbdev->dev, "fence has not yet signalled in %ums",
1433 FENCE_WAIT_TIMEOUT_MS);
1434 dev_warn(kctx->kbdev->dev,
1435 "ctx:%d_%d kcpu queue:%u still waiting for fence[%pK] context#seqno:%s",
1436 kctx->tgid, kctx->id, kcpu_queue->id, fence, info.name);
1437 } else {
1438 dev_warn(kctx->kbdev->dev, "fence has got error");
1439 dev_warn(kctx->kbdev->dev,
1440 "ctx:%d_%d kcpu queue:%u faulty fence[%pK] context#seqno:%s error(%d)",
1441 kctx->tgid, kctx->id, kcpu_queue->id, fence, info.name, info.status);
1442 }
1443
1444 kbase_fence_put(fence);
1445 }
1446
1447 /**
1448 * fence_timeout_start() - Start a timer to check fence-wait timeout
1449 *
1450 * @cmd: KCPU command queue
1451 *
1452 * Activate a timer to check whether a fence-wait command in the queue
1453 * gets completed within FENCE_WAIT_TIMEOUT_MS
1454 */
fence_timeout_start(struct kbase_kcpu_command_queue * cmd)1455 static void fence_timeout_start(struct kbase_kcpu_command_queue *cmd)
1456 {
1457 mod_timer(&cmd->fence_timeout, jiffies + msecs_to_jiffies(FENCE_WAIT_TIMEOUT_MS));
1458 }
1459 #endif
1460
1461 /**
1462 * kbase_kcpu_fence_wait_process() - Process the kcpu fence wait command
1463 *
1464 * @kcpu_queue: The queue containing the fence wait command
1465 * @fence_info: Reference to a fence for which the command is waiting
1466 *
1467 * Return: 0 if fence wait is blocked, 1 if it is unblocked, negative error if
1468 * an error has occurred and fence should no longer be waited on.
1469 */
kbase_kcpu_fence_wait_process(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command_fence_info * fence_info)1470 static int kbase_kcpu_fence_wait_process(
1471 struct kbase_kcpu_command_queue *kcpu_queue,
1472 struct kbase_kcpu_command_fence_info *fence_info)
1473 {
1474 int fence_status = 0;
1475 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
1476 struct fence *fence;
1477 #else
1478 struct dma_fence *fence;
1479 #endif
1480 struct kbase_context *const kctx = kcpu_queue->kctx;
1481
1482 lockdep_assert_held(&kcpu_queue->lock);
1483
1484 if (WARN_ON(!fence_info->fence))
1485 return -EINVAL;
1486
1487 fence = fence_info->fence;
1488
1489 if (kcpu_queue->fence_wait_processed) {
1490 fence_status = dma_fence_get_status(fence);
1491 } else {
1492 int cb_err = dma_fence_add_callback(fence,
1493 &fence_info->fence_cb,
1494 kbase_csf_fence_wait_callback);
1495
1496 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev,
1497 KCPU_FENCE_WAIT_START, kcpu_queue,
1498 fence->context, fence->seqno);
1499 fence_status = cb_err;
1500 if (cb_err == 0) {
1501 kcpu_queue->fence_wait_processed = true;
1502 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
1503 fence_timeout_start(kcpu_queue);
1504 #endif
1505 } else if (cb_err == -ENOENT) {
1506 fence_status = dma_fence_get_status(fence);
1507 if (!fence_status) {
1508 struct kbase_sync_fence_info info;
1509
1510 kbase_sync_fence_info_get(fence, &info);
1511 dev_warn(kctx->kbdev->dev,
1512 "Unexpected status for fence %s of ctx:%d_%d kcpu queue:%u",
1513 info.name, kctx->tgid, kctx->id, kcpu_queue->id);
1514 }
1515 }
1516 }
1517
1518 /*
1519 * At this point fence status can contain 3 types of values:
1520 * - Value 0 to represent that fence in question is not signalled yet
1521 * - Value 1 to represent that fence in question is signalled without
1522 * errors
1523 * - Negative error code to represent that some error has occurred such
1524 * that waiting on it is no longer valid.
1525 */
1526
1527 if (fence_status)
1528 kbasep_kcpu_fence_wait_cancel(kcpu_queue, fence_info);
1529
1530 return fence_status;
1531 }
1532
kbase_kcpu_fence_wait_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_fence_info * fence_info,struct kbase_kcpu_command * current_command)1533 static int kbase_kcpu_fence_wait_prepare(struct kbase_kcpu_command_queue *kcpu_queue,
1534 struct base_kcpu_command_fence_info *fence_info,
1535 struct kbase_kcpu_command *current_command)
1536 {
1537 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
1538 struct fence *fence_in;
1539 #else
1540 struct dma_fence *fence_in;
1541 #endif
1542 struct base_fence fence;
1543
1544 lockdep_assert_held(&kcpu_queue->lock);
1545
1546 if (copy_from_user(&fence, u64_to_user_ptr(fence_info->fence), sizeof(fence)))
1547 return -ENOMEM;
1548
1549 fence_in = sync_file_get_fence(fence.basep.fd);
1550
1551 if (!fence_in)
1552 return -ENOENT;
1553
1554 current_command->type = BASE_KCPU_COMMAND_TYPE_FENCE_WAIT;
1555 current_command->info.fence.fence = fence_in;
1556 current_command->info.fence.kcpu_queue = kcpu_queue;
1557 return 0;
1558 }
1559
kbasep_kcpu_fence_signal_process(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command_fence_info * fence_info)1560 static int kbasep_kcpu_fence_signal_process(struct kbase_kcpu_command_queue *kcpu_queue,
1561 struct kbase_kcpu_command_fence_info *fence_info)
1562 {
1563 struct kbase_context *const kctx = kcpu_queue->kctx;
1564 int ret;
1565
1566 if (WARN_ON(!fence_info->fence))
1567 return -EINVAL;
1568
1569 ret = dma_fence_signal(fence_info->fence);
1570
1571 if (unlikely(ret < 0)) {
1572 dev_warn(kctx->kbdev->dev, "dma_fence(%d) has been signalled already\n", ret);
1573 /* Treated as a success */
1574 ret = 0;
1575 }
1576
1577 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_FENCE_SIGNAL, kcpu_queue,
1578 fence_info->fence->context,
1579 fence_info->fence->seqno);
1580
1581 /* dma_fence refcount needs to be decreased to release it. */
1582 kbase_fence_put(fence_info->fence);
1583 fence_info->fence = NULL;
1584
1585 return ret;
1586 }
1587
kbasep_kcpu_fence_signal_init(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command * current_command,struct base_fence * fence,struct sync_file ** sync_file,int * fd)1588 static int kbasep_kcpu_fence_signal_init(struct kbase_kcpu_command_queue *kcpu_queue,
1589 struct kbase_kcpu_command *current_command,
1590 struct base_fence *fence, struct sync_file **sync_file,
1591 int *fd)
1592 {
1593 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
1594 struct fence *fence_out;
1595 #else
1596 struct dma_fence *fence_out;
1597 #endif
1598 struct kbase_kcpu_dma_fence *kcpu_fence;
1599 int ret = 0;
1600
1601 lockdep_assert_held(&kcpu_queue->lock);
1602
1603 kcpu_fence = kzalloc(sizeof(*kcpu_fence), GFP_KERNEL);
1604 if (!kcpu_fence)
1605 return -ENOMEM;
1606
1607 #if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
1608 fence_out = (struct fence *)kcpu_fence;
1609 #else
1610 fence_out = (struct dma_fence *)kcpu_fence;
1611 #endif
1612
1613 dma_fence_init(fence_out,
1614 &kbase_fence_ops,
1615 &kbase_csf_fence_lock,
1616 kcpu_queue->fence_context,
1617 ++kcpu_queue->fence_seqno);
1618
1619 #if (KERNEL_VERSION(4, 9, 67) >= LINUX_VERSION_CODE)
1620 /* Take an extra reference to the fence on behalf of the sync file.
1621 * This is only needded on older kernels where sync_file_create()
1622 * does not take its own reference. This was changed in v4.9.68
1623 * where sync_file_create() now takes its own reference.
1624 */
1625 dma_fence_get(fence_out);
1626 #endif
1627
1628 /* Set reference to KCPU metadata and increment refcount */
1629 kcpu_fence->metadata = kcpu_queue->metadata;
1630 WARN_ON(!kbase_refcount_inc_not_zero(&kcpu_fence->metadata->refcount));
1631
1632 /* create a sync_file fd representing the fence */
1633 *sync_file = sync_file_create(fence_out);
1634 if (!(*sync_file)) {
1635 ret = -ENOMEM;
1636 goto file_create_fail;
1637 }
1638
1639 *fd = get_unused_fd_flags(O_CLOEXEC);
1640 if (*fd < 0) {
1641 ret = *fd;
1642 goto fd_flags_fail;
1643 }
1644
1645 fence->basep.fd = *fd;
1646
1647 current_command->type = BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL;
1648 current_command->info.fence.fence = fence_out;
1649
1650 return 0;
1651
1652 fd_flags_fail:
1653 fput((*sync_file)->file);
1654 file_create_fail:
1655 /*
1656 * Upon failure, dma_fence refcount that was increased by
1657 * dma_fence_get() or sync_file_create() needs to be decreased
1658 * to release it.
1659 */
1660 kbase_fence_put(fence_out);
1661 current_command->info.fence.fence = NULL;
1662
1663 return ret;
1664 }
1665
kbase_kcpu_fence_signal_prepare(struct kbase_kcpu_command_queue * kcpu_queue,struct base_kcpu_command_fence_info * fence_info,struct kbase_kcpu_command * current_command)1666 static int kbase_kcpu_fence_signal_prepare(struct kbase_kcpu_command_queue *kcpu_queue,
1667 struct base_kcpu_command_fence_info *fence_info,
1668 struct kbase_kcpu_command *current_command)
1669 {
1670 struct base_fence fence;
1671 struct sync_file *sync_file = NULL;
1672 int fd;
1673 int ret = 0;
1674
1675 lockdep_assert_held(&kcpu_queue->lock);
1676
1677 if (copy_from_user(&fence, u64_to_user_ptr(fence_info->fence), sizeof(fence)))
1678 return -EFAULT;
1679
1680 ret = kbasep_kcpu_fence_signal_init(kcpu_queue, current_command, &fence, &sync_file, &fd);
1681 if (ret)
1682 return ret;
1683
1684 if (copy_to_user(u64_to_user_ptr(fence_info->fence), &fence,
1685 sizeof(fence))) {
1686 ret = -EFAULT;
1687 goto fail;
1688 }
1689
1690 /* 'sync_file' pointer can't be safely dereferenced once 'fd' is
1691 * installed, so the install step needs to be done at the last
1692 * before returning success.
1693 */
1694 fd_install(fd, sync_file->file);
1695 return 0;
1696
1697 fail:
1698 fput(sync_file->file);
1699 kbase_fence_put(current_command->info.fence.fence);
1700 current_command->info.fence.fence = NULL;
1701
1702 return ret;
1703 }
1704
kbase_kcpu_fence_signal_process(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command_fence_info * fence_info)1705 int kbase_kcpu_fence_signal_process(struct kbase_kcpu_command_queue *kcpu_queue,
1706 struct kbase_kcpu_command_fence_info *fence_info)
1707 {
1708 if (!kcpu_queue || !fence_info)
1709 return -EINVAL;
1710
1711 return kbasep_kcpu_fence_signal_process(kcpu_queue, fence_info);
1712 }
1713 KBASE_EXPORT_TEST_API(kbase_kcpu_fence_signal_process);
1714
kbase_kcpu_fence_signal_init(struct kbase_kcpu_command_queue * kcpu_queue,struct kbase_kcpu_command * current_command,struct base_fence * fence,struct sync_file ** sync_file,int * fd)1715 int kbase_kcpu_fence_signal_init(struct kbase_kcpu_command_queue *kcpu_queue,
1716 struct kbase_kcpu_command *current_command,
1717 struct base_fence *fence, struct sync_file **sync_file, int *fd)
1718 {
1719 if (!kcpu_queue || !current_command || !fence || !sync_file || !fd)
1720 return -EINVAL;
1721
1722 return kbasep_kcpu_fence_signal_init(kcpu_queue, current_command, fence, sync_file, fd);
1723 }
1724 KBASE_EXPORT_TEST_API(kbase_kcpu_fence_signal_init);
1725 #endif /* CONFIG_SYNC_FILE */
1726
kcpu_queue_process_worker(struct work_struct * data)1727 static void kcpu_queue_process_worker(struct work_struct *data)
1728 {
1729 struct kbase_kcpu_command_queue *queue = container_of(data,
1730 struct kbase_kcpu_command_queue, work);
1731
1732 mutex_lock(&queue->lock);
1733 kcpu_queue_process(queue, false);
1734 mutex_unlock(&queue->lock);
1735 }
1736
delete_queue(struct kbase_context * kctx,u32 id)1737 static int delete_queue(struct kbase_context *kctx, u32 id)
1738 {
1739 int err = 0;
1740
1741 mutex_lock(&kctx->csf.kcpu_queues.lock);
1742
1743 if ((id < KBASEP_MAX_KCPU_QUEUES) && kctx->csf.kcpu_queues.array[id]) {
1744 struct kbase_kcpu_command_queue *queue =
1745 kctx->csf.kcpu_queues.array[id];
1746
1747 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_QUEUE_DELETE,
1748 queue, queue->num_pending_cmds, queue->cqs_wait_count);
1749
1750 /* Disassociate the queue from the system to prevent further
1751 * submissions. Draining pending commands would be acceptable
1752 * even if a new queue is created using the same ID.
1753 */
1754 kctx->csf.kcpu_queues.array[id] = NULL;
1755 bitmap_clear(kctx->csf.kcpu_queues.in_use, id, 1);
1756
1757 mutex_unlock(&kctx->csf.kcpu_queues.lock);
1758
1759 mutex_lock(&queue->lock);
1760
1761 /* Metadata struct may outlive KCPU queue. */
1762 kbase_kcpu_dma_fence_meta_put(queue->metadata);
1763
1764 /* Drain the remaining work for this queue first and go past
1765 * all the waits.
1766 */
1767 kcpu_queue_process(queue, true);
1768
1769 /* All commands should have been processed */
1770 WARN_ON(queue->num_pending_cmds);
1771
1772 /* All CQS wait commands should have been cleaned up */
1773 WARN_ON(queue->cqs_wait_count);
1774
1775 /* Fire the tracepoint with the mutex held to enforce correct
1776 * ordering with the summary stream.
1777 */
1778 KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE(kctx->kbdev, queue);
1779
1780 mutex_unlock(&queue->lock);
1781
1782 cancel_work_sync(&queue->work);
1783 destroy_workqueue(queue->wq);
1784
1785 mutex_destroy(&queue->lock);
1786
1787 kfree(queue);
1788 } else {
1789 dev_dbg(kctx->kbdev->dev,
1790 "Attempt to delete a non-existent KCPU queue");
1791 mutex_unlock(&kctx->csf.kcpu_queues.lock);
1792 err = -EINVAL;
1793 }
1794 return err;
1795 }
1796
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_INFO(struct kbase_device * kbdev,const struct kbase_kcpu_command_queue * queue,const struct kbase_kcpu_command_jit_alloc_info * jit_alloc,int alloc_status)1797 static void KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_INFO(
1798 struct kbase_device *kbdev,
1799 const struct kbase_kcpu_command_queue *queue,
1800 const struct kbase_kcpu_command_jit_alloc_info *jit_alloc,
1801 int alloc_status)
1802 {
1803 u8 i;
1804
1805 KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(kbdev, queue);
1806 for (i = 0; i < jit_alloc->count; i++) {
1807 const u8 id = jit_alloc->info[i].id;
1808 const struct kbase_va_region *reg = queue->kctx->jit_alloc[id];
1809 u64 gpu_alloc_addr = 0;
1810 u64 mmu_flags = 0;
1811
1812 if ((alloc_status == 0) && !WARN_ON(!reg) &&
1813 !WARN_ON(reg == KBASE_RESERVED_REG_JIT_ALLOC)) {
1814 #ifdef CONFIG_MALI_VECTOR_DUMP
1815 struct tagged_addr phy = {0};
1816 #endif /* CONFIG_MALI_VECTOR_DUMP */
1817
1818 gpu_alloc_addr = reg->start_pfn << PAGE_SHIFT;
1819 #ifdef CONFIG_MALI_VECTOR_DUMP
1820 mmu_flags = kbase_mmu_create_ate(kbdev,
1821 phy, reg->flags,
1822 MIDGARD_MMU_BOTTOMLEVEL,
1823 queue->kctx->jit_group_id);
1824 #endif /* CONFIG_MALI_VECTOR_DUMP */
1825 }
1826 KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(
1827 kbdev, queue, alloc_status, gpu_alloc_addr, mmu_flags);
1828 }
1829 }
1830
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(struct kbase_device * kbdev,const struct kbase_kcpu_command_queue * queue)1831 static void KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(
1832 struct kbase_device *kbdev,
1833 const struct kbase_kcpu_command_queue *queue)
1834 {
1835 KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(kbdev, queue);
1836 }
1837
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_END(struct kbase_device * kbdev,const struct kbase_kcpu_command_queue * queue)1838 static void KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_END(
1839 struct kbase_device *kbdev,
1840 const struct kbase_kcpu_command_queue *queue)
1841 {
1842 KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END(kbdev, queue);
1843 }
1844
kcpu_queue_process(struct kbase_kcpu_command_queue * queue,bool drain_queue)1845 static void kcpu_queue_process(struct kbase_kcpu_command_queue *queue,
1846 bool drain_queue)
1847 {
1848 struct kbase_device *kbdev = queue->kctx->kbdev;
1849 bool process_next = true;
1850 size_t i;
1851
1852 lockdep_assert_held(&queue->lock);
1853
1854 for (i = 0; i != queue->num_pending_cmds; ++i) {
1855 struct kbase_kcpu_command *cmd =
1856 &queue->commands[(u8)(queue->start_offset + i)];
1857 int status;
1858
1859 switch (cmd->type) {
1860 case BASE_KCPU_COMMAND_TYPE_FENCE_WAIT:
1861 if (!queue->command_started) {
1862 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START(kbdev,
1863 queue);
1864 queue->command_started = true;
1865 }
1866
1867 status = 0;
1868 #if IS_ENABLED(CONFIG_SYNC_FILE)
1869 if (drain_queue) {
1870 kbasep_kcpu_fence_wait_cancel(queue, &cmd->info.fence);
1871 } else {
1872 status = kbase_kcpu_fence_wait_process(queue,
1873 &cmd->info.fence);
1874
1875 if (status == 0)
1876 process_next = false;
1877 else if (status < 0)
1878 queue->has_error = true;
1879 }
1880 #else
1881 dev_warn(kbdev->dev,
1882 "unexpected fence wait command found\n");
1883
1884 status = -EINVAL;
1885 queue->has_error = true;
1886 #endif
1887
1888 if (process_next) {
1889 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END(
1890 kbdev, queue, status < 0 ? status : 0);
1891 queue->command_started = false;
1892 }
1893 break;
1894 case BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL:
1895 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START(kbdev, queue);
1896
1897 status = 0;
1898
1899 #if IS_ENABLED(CONFIG_SYNC_FILE)
1900 status = kbasep_kcpu_fence_signal_process(queue, &cmd->info.fence);
1901
1902 if (status < 0)
1903 queue->has_error = true;
1904 #else
1905 dev_warn(kbdev->dev,
1906 "unexpected fence signal command found\n");
1907
1908 status = -EINVAL;
1909 queue->has_error = true;
1910 #endif
1911
1912 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END(kbdev, queue,
1913 status);
1914 break;
1915 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT:
1916 status = kbase_kcpu_cqs_wait_process(kbdev, queue,
1917 &cmd->info.cqs_wait);
1918
1919 if (!status && !drain_queue) {
1920 process_next = false;
1921 } else {
1922 /* Either all CQS objects were signaled or
1923 * there was an error or the queue itself is
1924 * being deleted.
1925 * In all cases can move to the next command.
1926 * TBD: handle the error
1927 */
1928 cleanup_cqs_wait(queue, &cmd->info.cqs_wait);
1929 }
1930
1931 break;
1932 case BASE_KCPU_COMMAND_TYPE_CQS_SET:
1933 kbase_kcpu_cqs_set_process(kbdev, queue,
1934 &cmd->info.cqs_set);
1935
1936 break;
1937 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION:
1938 status = kbase_kcpu_cqs_wait_operation_process(kbdev, queue,
1939 &cmd->info.cqs_wait_operation);
1940
1941 if (!status && !drain_queue) {
1942 process_next = false;
1943 } else {
1944 /* Either all CQS objects were signaled or
1945 * there was an error or the queue itself is
1946 * being deleted.
1947 * In all cases can move to the next command.
1948 * TBD: handle the error
1949 */
1950 cleanup_cqs_wait_operation(queue, &cmd->info.cqs_wait_operation);
1951 }
1952
1953 break;
1954 case BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION:
1955 kbase_kcpu_cqs_set_operation_process(kbdev, queue,
1956 &cmd->info.cqs_set_operation);
1957
1958 break;
1959 case BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER:
1960 /* Clear the queue's error state */
1961 queue->has_error = false;
1962
1963 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER(kbdev, queue);
1964 break;
1965 case BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: {
1966 struct kbase_ctx_ext_res_meta *meta = NULL;
1967
1968 if (!drain_queue) {
1969 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START(kbdev,
1970 queue);
1971
1972 kbase_gpu_vm_lock(queue->kctx);
1973 meta = kbase_sticky_resource_acquire(
1974 queue->kctx, cmd->info.import.gpu_va);
1975 kbase_gpu_vm_unlock(queue->kctx);
1976
1977 if (meta == NULL) {
1978 queue->has_error = true;
1979 dev_dbg(
1980 kbdev->dev,
1981 "failed to map an external resource");
1982 }
1983
1984 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END(
1985 kbdev, queue, meta ? 0 : 1);
1986 }
1987 break;
1988 }
1989 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: {
1990 bool ret;
1991
1992 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START(kbdev, queue);
1993
1994 kbase_gpu_vm_lock(queue->kctx);
1995 ret = kbase_sticky_resource_release(
1996 queue->kctx, NULL, cmd->info.import.gpu_va);
1997 kbase_gpu_vm_unlock(queue->kctx);
1998
1999 if (!ret) {
2000 queue->has_error = true;
2001 dev_dbg(kbdev->dev,
2002 "failed to release the reference. resource not found");
2003 }
2004
2005 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END(kbdev, queue,
2006 ret ? 0 : 1);
2007 break;
2008 }
2009 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: {
2010 bool ret;
2011
2012 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START(kbdev,
2013 queue);
2014
2015 kbase_gpu_vm_lock(queue->kctx);
2016 ret = kbase_sticky_resource_release_force(
2017 queue->kctx, NULL, cmd->info.import.gpu_va);
2018 kbase_gpu_vm_unlock(queue->kctx);
2019
2020 if (!ret) {
2021 queue->has_error = true;
2022 dev_dbg(kbdev->dev,
2023 "failed to release the reference. resource not found");
2024 }
2025
2026 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END(
2027 kbdev, queue, ret ? 0 : 1);
2028 break;
2029 }
2030 case BASE_KCPU_COMMAND_TYPE_JIT_ALLOC:
2031 {
2032 if (drain_queue) {
2033 /* We still need to call this function to clean the JIT alloc info up */
2034 kbase_kcpu_jit_allocate_finish(queue, cmd);
2035 } else {
2036 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START(kbdev,
2037 queue);
2038
2039 status = kbase_kcpu_jit_allocate_process(queue,
2040 cmd);
2041 if (status == -EAGAIN) {
2042 process_next = false;
2043 } else {
2044 if (status != 0)
2045 queue->has_error = true;
2046
2047 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_INFO(
2048 kbdev, queue,
2049 &cmd->info.jit_alloc, status);
2050
2051 kbase_kcpu_jit_allocate_finish(queue,
2052 cmd);
2053 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(
2054 kbdev, queue);
2055 }
2056 }
2057
2058 break;
2059 }
2060 case BASE_KCPU_COMMAND_TYPE_JIT_FREE: {
2061 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START(kbdev, queue);
2062
2063 status = kbase_kcpu_jit_free_process(queue, cmd);
2064 if (status)
2065 queue->has_error = true;
2066
2067 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_END(
2068 kbdev, queue);
2069 break;
2070 }
2071 #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST
2072 case BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: {
2073 struct kbase_suspend_copy_buffer *sus_buf =
2074 cmd->info.suspend_buf_copy.sus_buf;
2075
2076 if (!drain_queue) {
2077 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START(
2078 kbdev, queue);
2079
2080 status = kbase_csf_queue_group_suspend_process(
2081 queue->kctx, sus_buf,
2082 cmd->info.suspend_buf_copy.group_handle);
2083 if (status)
2084 queue->has_error = true;
2085
2086 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END(
2087 kbdev, queue, status);
2088 }
2089
2090 if (!sus_buf->cpu_alloc) {
2091 int i;
2092
2093 for (i = 0; i < sus_buf->nr_pages; i++)
2094 put_page(sus_buf->pages[i]);
2095 } else {
2096 kbase_mem_phy_alloc_kernel_unmapped(
2097 sus_buf->cpu_alloc);
2098 kbase_mem_phy_alloc_put(
2099 sus_buf->cpu_alloc);
2100 }
2101
2102 kfree(sus_buf->pages);
2103 kfree(sus_buf);
2104 break;
2105 }
2106 #endif
2107 default:
2108 dev_dbg(kbdev->dev,
2109 "Unrecognized command type");
2110 break;
2111 } /* switch */
2112
2113 /*TBD: error handling */
2114
2115 if (!process_next)
2116 break;
2117 }
2118
2119 if (i > 0) {
2120 queue->start_offset += i;
2121 queue->num_pending_cmds -= i;
2122
2123 /* If an attempt to enqueue commands failed then we must raise
2124 * an event in case the client wants to retry now that there is
2125 * free space in the buffer.
2126 */
2127 if (queue->enqueue_failed) {
2128 queue->enqueue_failed = false;
2129 kbase_csf_event_signal_cpu_only(queue->kctx);
2130 }
2131 }
2132 }
2133
kcpu_queue_get_space(struct kbase_kcpu_command_queue * queue)2134 static size_t kcpu_queue_get_space(struct kbase_kcpu_command_queue *queue)
2135 {
2136 return KBASEP_KCPU_QUEUE_SIZE - queue->num_pending_cmds;
2137 }
2138
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_COMMAND(const struct kbase_kcpu_command_queue * queue,const struct kbase_kcpu_command * cmd)2139 static void KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_COMMAND(
2140 const struct kbase_kcpu_command_queue *queue,
2141 const struct kbase_kcpu_command *cmd)
2142 {
2143 struct kbase_device *kbdev = queue->kctx->kbdev;
2144
2145 switch (cmd->type) {
2146 case BASE_KCPU_COMMAND_TYPE_FENCE_WAIT:
2147 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT(kbdev, queue,
2148 cmd->info.fence.fence);
2149 break;
2150 case BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL:
2151 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL(kbdev, queue,
2152 cmd->info.fence.fence);
2153 break;
2154 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT:
2155 {
2156 const struct base_cqs_wait_info *waits =
2157 cmd->info.cqs_wait.objs;
2158 u32 inherit_err_flags = cmd->info.cqs_wait.inherit_err_flags;
2159 unsigned int i;
2160
2161 for (i = 0; i < cmd->info.cqs_wait.nr_objs; i++) {
2162 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT(
2163 kbdev, queue, waits[i].addr, waits[i].val,
2164 (inherit_err_flags & ((u32)1 << i)) ? 1 : 0);
2165 }
2166 break;
2167 }
2168 case BASE_KCPU_COMMAND_TYPE_CQS_SET:
2169 {
2170 const struct base_cqs_set *sets = cmd->info.cqs_set.objs;
2171 unsigned int i;
2172
2173 for (i = 0; i < cmd->info.cqs_set.nr_objs; i++) {
2174 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET(kbdev, queue,
2175 sets[i].addr);
2176 }
2177 break;
2178 }
2179 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION:
2180 {
2181 const struct base_cqs_wait_operation_info *waits =
2182 cmd->info.cqs_wait_operation.objs;
2183 u32 inherit_err_flags = cmd->info.cqs_wait_operation.inherit_err_flags;
2184 unsigned int i;
2185
2186 for (i = 0; i < cmd->info.cqs_wait_operation.nr_objs; i++) {
2187 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION(
2188 kbdev, queue, waits[i].addr, waits[i].val,
2189 waits[i].operation, waits[i].data_type,
2190 (inherit_err_flags & ((uint32_t)1 << i)) ? 1 : 0);
2191 }
2192 break;
2193 }
2194 case BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION:
2195 {
2196 const struct base_cqs_set_operation_info *sets = cmd->info.cqs_set_operation.objs;
2197 unsigned int i;
2198
2199 for (i = 0; i < cmd->info.cqs_set_operation.nr_objs; i++) {
2200 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION(
2201 kbdev, queue, sets[i].addr, sets[i].val,
2202 sets[i].operation, sets[i].data_type);
2203 }
2204 break;
2205 }
2206 case BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER:
2207 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER(kbdev, queue);
2208 break;
2209 case BASE_KCPU_COMMAND_TYPE_MAP_IMPORT:
2210 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT(kbdev, queue,
2211 cmd->info.import.gpu_va);
2212 break;
2213 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT:
2214 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT(kbdev, queue,
2215 cmd->info.import.gpu_va);
2216 break;
2217 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE:
2218 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE(
2219 kbdev, queue, cmd->info.import.gpu_va);
2220 break;
2221 case BASE_KCPU_COMMAND_TYPE_JIT_ALLOC:
2222 {
2223 u8 i;
2224
2225 KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC(kbdev, queue);
2226 for (i = 0; i < cmd->info.jit_alloc.count; i++) {
2227 const struct base_jit_alloc_info *info =
2228 &cmd->info.jit_alloc.info[i];
2229
2230 KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC(
2231 kbdev, queue, info->gpu_alloc_addr, info->va_pages,
2232 info->commit_pages, info->extension, info->id, info->bin_id,
2233 info->max_allocations, info->flags, info->usage_id);
2234 }
2235 KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC(kbdev, queue);
2236 break;
2237 }
2238 case BASE_KCPU_COMMAND_TYPE_JIT_FREE:
2239 {
2240 u8 i;
2241
2242 KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE(kbdev, queue);
2243 for (i = 0; i < cmd->info.jit_free.count; i++) {
2244 KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE(
2245 kbdev, queue, cmd->info.jit_free.ids[i]);
2246 }
2247 KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE(kbdev, queue);
2248 break;
2249 }
2250 #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST
2251 case BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND:
2252 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND(
2253 kbdev, queue, cmd->info.suspend_buf_copy.sus_buf,
2254 cmd->info.suspend_buf_copy.group_handle);
2255 break;
2256 #endif
2257 default:
2258 dev_dbg(kbdev->dev, "Unknown command type %u", cmd->type);
2259 break;
2260 }
2261 }
2262
kbase_csf_kcpu_queue_enqueue(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_enqueue * enq)2263 int kbase_csf_kcpu_queue_enqueue(struct kbase_context *kctx,
2264 struct kbase_ioctl_kcpu_queue_enqueue *enq)
2265 {
2266 struct kbase_kcpu_command_queue *queue = NULL;
2267 void __user *user_cmds = u64_to_user_ptr(enq->addr);
2268 int ret = 0;
2269 u32 i;
2270
2271 /* The offset to the first command that is being processed or yet to
2272 * be processed is of u8 type, so the number of commands inside the
2273 * queue cannot be more than 256. The current implementation expects
2274 * exactly 256, any other size will require the addition of wrapping
2275 * logic.
2276 */
2277 BUILD_BUG_ON(KBASEP_KCPU_QUEUE_SIZE != 256);
2278
2279 /* Whilst the backend interface allows enqueueing multiple commands in
2280 * a single operation, the Base interface does not expose any mechanism
2281 * to do so. And also right now the handling is missing for the case
2282 * where multiple commands are submitted and the enqueue of one of the
2283 * command in the set fails after successfully enqueuing other commands
2284 * in the set.
2285 */
2286 if (enq->nr_commands != 1) {
2287 dev_dbg(kctx->kbdev->dev,
2288 "More than one commands enqueued");
2289 return -EINVAL;
2290 }
2291
2292 /* There might be a race between one thread trying to enqueue commands to the queue
2293 * and other thread trying to delete the same queue.
2294 * This racing could lead to use-after-free problem by enqueuing thread if
2295 * resources for the queue has already been freed by deleting thread.
2296 *
2297 * To prevent the issue, two mutexes are acquired/release asymmetrically as follows.
2298 *
2299 * Lock A (kctx mutex)
2300 * Lock B (queue mutex)
2301 * Unlock A
2302 * Unlock B
2303 *
2304 * With the kctx mutex being held, enqueuing thread will check the queue
2305 * and will return error code if the queue had already been deleted.
2306 */
2307 mutex_lock(&kctx->csf.kcpu_queues.lock);
2308 queue = kctx->csf.kcpu_queues.array[enq->id];
2309 if (queue == NULL) {
2310 dev_dbg(kctx->kbdev->dev, "Invalid KCPU queue (id:%u)", enq->id);
2311 mutex_unlock(&kctx->csf.kcpu_queues.lock);
2312 return -EINVAL;
2313 }
2314 mutex_lock(&queue->lock);
2315 mutex_unlock(&kctx->csf.kcpu_queues.lock);
2316
2317 if (kcpu_queue_get_space(queue) < enq->nr_commands) {
2318 ret = -EBUSY;
2319 queue->enqueue_failed = true;
2320 goto out;
2321 }
2322
2323 /* Copy all command's info to the command buffer.
2324 * Note: it would be more efficient to process all commands in-line
2325 * until we encounter an unresolved CQS_ / FENCE_WAIT, however, the
2326 * interface allows multiple commands to be enqueued so we must account
2327 * for the possibility to roll back.
2328 */
2329
2330 for (i = 0; (i != enq->nr_commands) && !ret; ++i) {
2331 struct kbase_kcpu_command *kcpu_cmd =
2332 &queue->commands[(u8)(queue->start_offset + queue->num_pending_cmds + i)];
2333 struct base_kcpu_command command;
2334 unsigned int j;
2335
2336 if (copy_from_user(&command, user_cmds, sizeof(command))) {
2337 ret = -EFAULT;
2338 goto out;
2339 }
2340
2341 user_cmds = (void __user *)((uintptr_t)user_cmds +
2342 sizeof(struct base_kcpu_command));
2343
2344 for (j = 0; j < sizeof(command.padding); j++) {
2345 if (command.padding[j] != 0) {
2346 dev_dbg(kctx->kbdev->dev,
2347 "base_kcpu_command padding not 0\n");
2348 ret = -EINVAL;
2349 goto out;
2350 }
2351 }
2352
2353 kcpu_cmd->enqueue_ts = atomic64_inc_return(&kctx->csf.kcpu_queues.cmd_seq_num);
2354 switch (command.type) {
2355 case BASE_KCPU_COMMAND_TYPE_FENCE_WAIT:
2356 #if IS_ENABLED(CONFIG_SYNC_FILE)
2357 ret = kbase_kcpu_fence_wait_prepare(queue,
2358 &command.info.fence, kcpu_cmd);
2359 #else
2360 ret = -EINVAL;
2361 dev_warn(kctx->kbdev->dev, "fence wait command unsupported\n");
2362 #endif
2363 break;
2364 case BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL:
2365 #if IS_ENABLED(CONFIG_SYNC_FILE)
2366 ret = kbase_kcpu_fence_signal_prepare(queue,
2367 &command.info.fence, kcpu_cmd);
2368 #else
2369 ret = -EINVAL;
2370 dev_warn(kctx->kbdev->dev, "fence signal command unsupported\n");
2371 #endif
2372 break;
2373 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT:
2374 ret = kbase_kcpu_cqs_wait_prepare(queue,
2375 &command.info.cqs_wait, kcpu_cmd);
2376 break;
2377 case BASE_KCPU_COMMAND_TYPE_CQS_SET:
2378 ret = kbase_kcpu_cqs_set_prepare(queue,
2379 &command.info.cqs_set, kcpu_cmd);
2380 break;
2381 case BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION:
2382 ret = kbase_kcpu_cqs_wait_operation_prepare(queue,
2383 &command.info.cqs_wait_operation, kcpu_cmd);
2384 break;
2385 case BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION:
2386 ret = kbase_kcpu_cqs_set_operation_prepare(queue,
2387 &command.info.cqs_set_operation, kcpu_cmd);
2388 break;
2389 case BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER:
2390 kcpu_cmd->type = BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER;
2391 ret = 0;
2392 break;
2393 case BASE_KCPU_COMMAND_TYPE_MAP_IMPORT:
2394 ret = kbase_kcpu_map_import_prepare(queue,
2395 &command.info.import, kcpu_cmd);
2396 break;
2397 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT:
2398 ret = kbase_kcpu_unmap_import_prepare(queue,
2399 &command.info.import, kcpu_cmd);
2400 break;
2401 case BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE:
2402 ret = kbase_kcpu_unmap_import_force_prepare(queue,
2403 &command.info.import, kcpu_cmd);
2404 break;
2405 case BASE_KCPU_COMMAND_TYPE_JIT_ALLOC:
2406 ret = kbase_kcpu_jit_allocate_prepare(queue,
2407 &command.info.jit_alloc, kcpu_cmd);
2408 break;
2409 case BASE_KCPU_COMMAND_TYPE_JIT_FREE:
2410 ret = kbase_kcpu_jit_free_prepare(queue,
2411 &command.info.jit_free, kcpu_cmd);
2412 break;
2413 #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST
2414 case BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND:
2415 ret = kbase_csf_queue_group_suspend_prepare(queue,
2416 &command.info.suspend_buf_copy,
2417 kcpu_cmd);
2418 break;
2419 #endif
2420 default:
2421 dev_dbg(queue->kctx->kbdev->dev,
2422 "Unknown command type %u", command.type);
2423 ret = -EINVAL;
2424 break;
2425 }
2426 }
2427
2428 if (!ret) {
2429 /* We only instrument the enqueues after all commands have been
2430 * successfully enqueued, as if we do them during the enqueue
2431 * and there is an error, we won't be able to roll them back
2432 * like is done for the command enqueues themselves.
2433 */
2434 for (i = 0; i != enq->nr_commands; ++i) {
2435 u8 cmd_idx = (u8)(queue->start_offset + queue->num_pending_cmds + i);
2436
2437 KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_COMMAND(
2438 queue, &queue->commands[cmd_idx]);
2439 }
2440
2441 queue->num_pending_cmds += enq->nr_commands;
2442 kcpu_queue_process(queue, false);
2443 }
2444
2445 out:
2446 mutex_unlock(&queue->lock);
2447
2448 return ret;
2449 }
2450
kbase_csf_kcpu_queue_context_init(struct kbase_context * kctx)2451 int kbase_csf_kcpu_queue_context_init(struct kbase_context *kctx)
2452 {
2453 int idx;
2454
2455 bitmap_zero(kctx->csf.kcpu_queues.in_use, KBASEP_MAX_KCPU_QUEUES);
2456
2457 for (idx = 0; idx < KBASEP_MAX_KCPU_QUEUES; ++idx)
2458 kctx->csf.kcpu_queues.array[idx] = NULL;
2459
2460 mutex_init(&kctx->csf.kcpu_queues.lock);
2461
2462 atomic64_set(&kctx->csf.kcpu_queues.cmd_seq_num, 0);
2463
2464 return 0;
2465 }
2466
kbase_csf_kcpu_queue_context_term(struct kbase_context * kctx)2467 void kbase_csf_kcpu_queue_context_term(struct kbase_context *kctx)
2468 {
2469 while (!bitmap_empty(kctx->csf.kcpu_queues.in_use,
2470 KBASEP_MAX_KCPU_QUEUES)) {
2471 int id = find_first_bit(kctx->csf.kcpu_queues.in_use,
2472 KBASEP_MAX_KCPU_QUEUES);
2473
2474 if (WARN_ON(!kctx->csf.kcpu_queues.array[id]))
2475 clear_bit(id, kctx->csf.kcpu_queues.in_use);
2476 else
2477 (void)delete_queue(kctx, id);
2478 }
2479
2480 mutex_destroy(&kctx->csf.kcpu_queues.lock);
2481 }
2482 KBASE_EXPORT_TEST_API(kbase_csf_kcpu_queue_context_term);
2483
kbase_csf_kcpu_queue_delete(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_delete * del)2484 int kbase_csf_kcpu_queue_delete(struct kbase_context *kctx,
2485 struct kbase_ioctl_kcpu_queue_delete *del)
2486 {
2487 return delete_queue(kctx, (u32)del->id);
2488 }
2489
kbase_csf_kcpu_queue_new(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_new * newq)2490 int kbase_csf_kcpu_queue_new(struct kbase_context *kctx,
2491 struct kbase_ioctl_kcpu_queue_new *newq)
2492 {
2493 struct kbase_kcpu_command_queue *queue;
2494 int idx;
2495 int n;
2496 int ret = 0;
2497 #if IS_ENABLED(CONFIG_SYNC_FILE)
2498 struct kbase_kcpu_dma_fence_meta *metadata;
2499 #endif
2500 /* The queue id is of u8 type and we use the index of the kcpu_queues
2501 * array as an id, so the number of elements in the array can't be
2502 * more than 256.
2503 */
2504 BUILD_BUG_ON(KBASEP_MAX_KCPU_QUEUES > 256);
2505
2506 mutex_lock(&kctx->csf.kcpu_queues.lock);
2507
2508 idx = find_first_zero_bit(kctx->csf.kcpu_queues.in_use,
2509 KBASEP_MAX_KCPU_QUEUES);
2510 if (idx >= (int)KBASEP_MAX_KCPU_QUEUES) {
2511 ret = -ENOMEM;
2512 goto out;
2513 }
2514
2515 if (WARN_ON(kctx->csf.kcpu_queues.array[idx])) {
2516 ret = -EINVAL;
2517 goto out;
2518 }
2519
2520 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
2521
2522 if (!queue) {
2523 ret = -ENOMEM;
2524 goto out;
2525 }
2526
2527 queue->wq = alloc_workqueue("mali_kbase_csf_kcpu_wq_%i", WQ_UNBOUND | WQ_HIGHPRI, 0, idx);
2528 if (queue->wq == NULL) {
2529 kfree(queue);
2530 ret = -ENOMEM;
2531
2532 goto out;
2533 }
2534
2535 bitmap_set(kctx->csf.kcpu_queues.in_use, idx, 1);
2536 kctx->csf.kcpu_queues.array[idx] = queue;
2537 mutex_init(&queue->lock);
2538 queue->kctx = kctx;
2539 queue->start_offset = 0;
2540 queue->num_pending_cmds = 0;
2541 #if IS_ENABLED(CONFIG_SYNC_FILE)
2542 queue->fence_context = dma_fence_context_alloc(1);
2543 queue->fence_seqno = 0;
2544 queue->fence_wait_processed = false;
2545
2546 metadata = kzalloc(sizeof(*metadata), GFP_KERNEL);
2547 if (!metadata) {
2548 destroy_workqueue(queue->wq);
2549 kfree(queue);
2550 ret = -ENOMEM;
2551 goto out;
2552 }
2553
2554 metadata->kbdev = kctx->kbdev;
2555 metadata->kctx_id = kctx->id;
2556 n = snprintf(metadata->timeline_name, MAX_TIMELINE_NAME, "%d-%d_%d-%lld-kcpu",
2557 kctx->kbdev->id, kctx->tgid, kctx->id, queue->fence_context);
2558 if (WARN_ON(n >= MAX_TIMELINE_NAME)) {
2559 destroy_workqueue(queue->wq);
2560 kfree(queue);
2561 kfree(metadata);
2562 ret = -EINVAL;
2563 goto out;
2564 }
2565
2566 kbase_refcount_set(&metadata->refcount, 1);
2567 queue->metadata = metadata;
2568 atomic_inc(&kctx->kbdev->live_fence_metadata);
2569 #endif /* CONFIG_SYNC_FILE */
2570 queue->enqueue_failed = false;
2571 queue->command_started = false;
2572 INIT_LIST_HEAD(&queue->jit_blocked);
2573 queue->has_error = false;
2574 INIT_WORK(&queue->work, kcpu_queue_process_worker);
2575 queue->id = idx;
2576
2577 newq->id = idx;
2578
2579 /* Fire the tracepoint with the mutex held to enforce correct ordering
2580 * with the summary stream.
2581 */
2582 KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE(kctx->kbdev, queue, queue->id, kctx->id,
2583 queue->num_pending_cmds);
2584
2585 KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_QUEUE_CREATE, queue,
2586 queue->fence_context, 0);
2587 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
2588 kbase_timer_setup(&queue->fence_timeout, fence_timeout_callback);
2589 #endif
2590 out:
2591 mutex_unlock(&kctx->csf.kcpu_queues.lock);
2592
2593 return ret;
2594 }
2595 KBASE_EXPORT_TEST_API(kbase_csf_kcpu_queue_new);
2596