1 /*
2 *
3 * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16 #define ENABLE_DEBUG_LOG
17 #include "platform/rk/custom_log.h"
18
19 #include <mali_kbase.h>
20 #include <mali_kbase_config_defaults.h>
21 #include <mali_kbase_uku.h>
22 #include <mali_midg_regmap.h>
23 #include <mali_kbase_gator.h>
24 #include <mali_kbase_mem_linux.h>
25 #ifdef CONFIG_MALI_DEVFREQ
26 #include <linux/devfreq.h>
27 #include <backend/gpu/mali_kbase_devfreq.h>
28 #ifdef CONFIG_DEVFREQ_THERMAL
29 #include <ipa/mali_kbase_ipa_debugfs.h>
30 #endif /* CONFIG_DEVFREQ_THERMAL */
31 #endif /* CONFIG_MALI_DEVFREQ */
32 #ifdef CONFIG_MALI_NO_MALI
33 #include "mali_kbase_model_linux.h"
34 #endif /* CONFIG_MALI_NO_MALI */
35 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
36 #include "mali_kbase_debug_mem_view.h"
37 #include "mali_kbase_mem.h"
38 #include "mali_kbase_mem_pool_debugfs.h"
39 #if !MALI_CUSTOMER_RELEASE
40 #include "mali_kbase_regs_dump_debugfs.h"
41 #endif /* !MALI_CUSTOMER_RELEASE */
42 #include "mali_kbase_regs_history_debugfs.h"
43 #include <mali_kbase_hwaccess_backend.h>
44 #include <mali_kbase_hwaccess_jm.h>
45 #include <mali_kbase_ctx_sched.h>
46 #include <backend/gpu/mali_kbase_device_internal.h>
47 #include "mali_kbase_ioctl.h"
48
49 #ifdef CONFIG_KDS
50 #include <linux/kds.h>
51 #include <linux/anon_inodes.h>
52 #include <linux/syscalls.h>
53 #endif /* CONFIG_KDS */
54
55 #include <linux/pm_runtime.h>
56 #include <linux/module.h>
57 #include <linux/init.h>
58 #include <linux/poll.h>
59 #include <linux/kernel.h>
60 #include <linux/errno.h>
61 #include <linux/of.h>
62 #include <linux/platform_device.h>
63 #include <linux/of_platform.h>
64 #include <linux/miscdevice.h>
65 #include <linux/list.h>
66 #include <linux/semaphore.h>
67 #include <linux/fs.h>
68 #include <linux/uaccess.h>
69 #include <linux/interrupt.h>
70 #include <linux/mm.h>
71 #include <linux/compat.h> /* is_compat_task */
72 #include <linux/mman.h>
73 #include <linux/version.h>
74 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
75 #include <linux/pm_runtime.h>
76 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
77 #include <mali_kbase_hw.h>
78 #include <platform/mali_kbase_platform_common.h>
79 #ifdef CONFIG_MALI_PLATFORM_FAKE
80 #include <platform/mali_kbase_platform_fake.h>
81 #endif /*CONFIG_MALI_PLATFORM_FAKE */
82 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
83 #include <mali_kbase_sync.h>
84 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
85 #include <linux/clk.h>
86 #include <linux/delay.h>
87
88 #include <mali_kbase_config.h>
89
90
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
92 #include <linux/pm_opp.h>
93 #else
94 #include <linux/opp.h>
95 #endif
96
97 #include <mali_kbase_tlstream.h>
98
99 #include <mali_kbase_as_fault_debugfs.h>
100
101 /* GPU IRQ Tags */
102 #define JOB_IRQ_TAG 0
103 #define MMU_IRQ_TAG 1
104 #define GPU_IRQ_TAG 2
105
106 #if MALI_UNIT_TEST
107 static struct kbase_exported_test_data shared_kernel_test_data;
108 EXPORT_SYMBOL(shared_kernel_test_data);
109 #endif /* MALI_UNIT_TEST */
110
111 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
112 #define ROCKCHIP_VERSION (13)
113
114 static int kbase_dev_nr;
115
116 static DEFINE_MUTEX(kbase_dev_list_lock);
117 static LIST_HEAD(kbase_dev_list);
118
119 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
__compile_time_asserts(void)120 static inline void __compile_time_asserts(void)
121 {
122 CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
123 }
124
kbase_api_handshake(struct kbase_context * kctx,struct kbase_ioctl_version_check * version)125 static int kbase_api_handshake(struct kbase_context *kctx,
126 struct kbase_ioctl_version_check *version)
127 {
128 switch (version->major) {
129 #ifdef BASE_LEGACY_UK6_SUPPORT
130 case 6:
131 /* We are backwards compatible with version 6,
132 * so pretend to be the old version */
133 version->major = 6;
134 version->minor = 1;
135 break;
136 #endif /* BASE_LEGACY_UK6_SUPPORT */
137 #ifdef BASE_LEGACY_UK7_SUPPORT
138 case 7:
139 /* We are backwards compatible with version 7,
140 * so pretend to be the old version */
141 version->major = 7;
142 version->minor = 1;
143 break;
144 #endif /* BASE_LEGACY_UK7_SUPPORT */
145 #ifdef BASE_LEGACY_UK8_SUPPORT
146 case 8:
147 /* We are backwards compatible with version 8,
148 * so pretend to be the old version */
149 version->major = 8;
150 version->minor = 4;
151 break;
152 #endif /* BASE_LEGACY_UK8_SUPPORT */
153 #ifdef BASE_LEGACY_UK9_SUPPORT
154 case 9:
155 /* We are backwards compatible with version 9,
156 * so pretend to be the old version */
157 version->major = 9;
158 version->minor = 0;
159 break;
160 #endif /* BASE_LEGACY_UK8_SUPPORT */
161 case BASE_UK_VERSION_MAJOR:
162 /* set minor to be the lowest common */
163 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
164 (int)version->minor);
165 break;
166 default:
167 /* We return our actual version regardless if it
168 * matches the version returned by userspace -
169 * userspace can bail if it can't handle this
170 * version */
171 version->major = BASE_UK_VERSION_MAJOR;
172 version->minor = BASE_UK_VERSION_MINOR;
173 break;
174 }
175
176 /* save the proposed version number for later use */
177 kctx->api_version = KBASE_API_VERSION(version->major, version->minor);
178
179 return 0;
180 }
181
182 /**
183 * enum mali_error - Mali error codes shared with userspace
184 *
185 * This is subset of those common Mali errors that can be returned to userspace.
186 * Values of matching user and kernel space enumerators MUST be the same.
187 * MALI_ERROR_NONE is guaranteed to be 0.
188 *
189 * @MALI_ERROR_NONE: Success
190 * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver
191 * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure
192 * @MALI_ERROR_FUNCTION_FAILED: Generic error code
193 */
194 enum mali_error {
195 MALI_ERROR_NONE = 0,
196 MALI_ERROR_OUT_OF_GPU_MEMORY,
197 MALI_ERROR_OUT_OF_MEMORY,
198 MALI_ERROR_FUNCTION_FAILED,
199 };
200
201 enum {
202 inited_mem = (1u << 0),
203 inited_js = (1u << 1),
204 inited_pm_runtime_init = (1u << 2),
205 #ifdef CONFIG_MALI_DEVFREQ
206 inited_devfreq = (1u << 3),
207 #endif /* CONFIG_MALI_DEVFREQ */
208 inited_tlstream = (1u << 4),
209 inited_backend_early = (1u << 5),
210 inited_backend_late = (1u << 6),
211 inited_device = (1u << 7),
212 inited_vinstr = (1u << 8),
213
214 inited_job_fault = (1u << 10),
215 inited_sysfs_group = (1u << 11),
216 inited_misc_register = (1u << 12),
217 inited_get_device = (1u << 13),
218 inited_dev_list = (1u << 14),
219 inited_debugfs = (1u << 15),
220 inited_gpu_device = (1u << 16),
221 inited_registers_map = (1u << 17),
222 inited_io_history = (1u << 18),
223 inited_power_control = (1u << 19),
224 inited_buslogger = (1u << 20),
225 inited_protected = (1u << 21),
226 inited_ctx_sched = (1u << 22)
227 };
228
229
230 #ifdef CONFIG_MALI_DEBUG
231 #define INACTIVE_WAIT_MS (5000)
232
kbase_set_driver_inactive(struct kbase_device * kbdev,bool inactive)233 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
234 {
235 kbdev->driver_inactive = inactive;
236 wake_up(&kbdev->driver_inactive_wait);
237
238 /* Wait for any running IOCTLs to complete */
239 if (inactive)
240 msleep(INACTIVE_WAIT_MS);
241 }
242 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
243 #endif /* CONFIG_MALI_DEBUG */
244
245 /**
246 * kbase_legacy_dispatch - UKK dispatch function
247 *
248 * This is the dispatch function for the legacy UKK ioctl interface. No new
249 * ioctls should be added to this function, see kbase_ioctl instead.
250 *
251 * @kctx: The kernel context structure
252 * @args: Pointer to the data structure passed from/to user space
253 * @args_size: Size of the data structure
254 */
kbase_legacy_dispatch(struct kbase_context * kctx,void * const args,u32 args_size)255 static int kbase_legacy_dispatch(struct kbase_context *kctx,
256 void * const args, u32 args_size)
257 {
258 struct kbase_device *kbdev;
259 union uk_header *ukh = args;
260 u32 id;
261 int ret = 0;
262
263 KBASE_DEBUG_ASSERT(ukh != NULL);
264
265 kbdev = kctx->kbdev;
266 id = ukh->id;
267 ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
268
269 #ifdef CONFIG_MALI_DEBUG
270 wait_event(kbdev->driver_inactive_wait,
271 kbdev->driver_inactive == false);
272 #endif /* CONFIG_MALI_DEBUG */
273
274 if (UKP_FUNC_ID_CHECK_VERSION == id) {
275 struct uku_version_check_args *version_check;
276 struct kbase_ioctl_version_check version;
277
278 if (args_size != sizeof(struct uku_version_check_args)) {
279 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
280 return 0;
281 }
282 version_check = (struct uku_version_check_args *)args;
283 version.minor = version_check->minor;
284 version.major = version_check->major;
285
286 kbase_api_handshake(kctx, &version);
287
288 version_check->minor = version.minor;
289 version_check->major = version.major;
290 ukh->ret = MALI_ERROR_NONE;
291 return 0;
292 }
293
294 /* block calls until version handshake */
295 if (kctx->api_version == 0)
296 return -EINVAL;
297
298 if (!atomic_read(&kctx->setup_complete)) {
299 struct kbase_uk_set_flags *kbase_set_flags;
300
301 /* setup pending, try to signal that we'll do the setup,
302 * if setup was already in progress, err this call
303 */
304 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
305 return -EINVAL;
306
307 /* if unexpected call, will stay stuck in setup mode
308 * (is it the only call we accept?)
309 */
310 if (id != KBASE_FUNC_SET_FLAGS)
311 return -EINVAL;
312
313 kbase_set_flags = (struct kbase_uk_set_flags *)args;
314
315 /* if not matching the expected call, stay in setup mode */
316 if (sizeof(*kbase_set_flags) != args_size)
317 goto bad_size;
318
319 /* if bad flags, will stay stuck in setup mode */
320 if (kbase_context_set_create_flags(kctx,
321 kbase_set_flags->create_flags) != 0)
322 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
323
324 atomic_set(&kctx->setup_complete, 1);
325 return 0;
326 }
327
328 /* setup complete, perform normal operation */
329 switch (id) {
330 case KBASE_FUNC_MEM_JIT_INIT:
331 {
332 struct kbase_uk_mem_jit_init *jit_init = args;
333
334 if (sizeof(*jit_init) != args_size)
335 goto bad_size;
336
337 if (kbase_region_tracker_init_jit(kctx,
338 jit_init->va_pages))
339 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
340 break;
341 }
342 case KBASE_FUNC_MEM_ALLOC:
343 {
344 struct kbase_uk_mem_alloc *mem = args;
345 struct kbase_va_region *reg;
346
347 if (sizeof(*mem) != args_size)
348 goto bad_size;
349
350 #if defined(CONFIG_64BIT)
351 if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
352 /* force SAME_VA if a 64-bit client */
353 mem->flags |= BASE_MEM_SAME_VA;
354 }
355 #endif
356
357 reg = kbase_mem_alloc(kctx, mem->va_pages,
358 mem->commit_pages, mem->extent,
359 &mem->flags, &mem->gpu_va);
360 mem->va_alignment = 0;
361
362 if (!reg)
363 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
364 break;
365 }
366 case KBASE_FUNC_MEM_IMPORT: {
367 struct kbase_uk_mem_import *mem_import = args;
368 void __user *phandle;
369
370 if (sizeof(*mem_import) != args_size)
371 goto bad_size;
372 #ifdef CONFIG_COMPAT
373 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
374 phandle = compat_ptr(mem_import->phandle.compat_value);
375 else
376 #endif
377 phandle = mem_import->phandle.value;
378
379 if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
380 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
381 break;
382 }
383
384 if (kbase_mem_import(kctx,
385 (enum base_mem_import_type)
386 mem_import->type,
387 phandle,
388 0,
389 &mem_import->gpu_va,
390 &mem_import->va_pages,
391 &mem_import->flags)) {
392 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
393 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
394 }
395 break;
396 }
397 case KBASE_FUNC_MEM_ALIAS: {
398 struct kbase_uk_mem_alias *alias = args;
399 struct base_mem_aliasing_info __user *user_ai;
400 struct base_mem_aliasing_info *ai;
401
402 if (sizeof(*alias) != args_size)
403 goto bad_size;
404
405 if (alias->nents > 2048) {
406 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
407 break;
408 }
409 if (!alias->nents) {
410 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
411 break;
412 }
413
414 #ifdef CONFIG_COMPAT
415 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
416 user_ai = compat_ptr(alias->ai.compat_value);
417 else
418 #endif
419 user_ai = alias->ai.value;
420
421 ai = vmalloc(sizeof(*ai) * alias->nents);
422
423 if (!ai) {
424 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
425 break;
426 }
427
428 if (copy_from_user(ai, user_ai,
429 sizeof(*ai) * alias->nents)) {
430 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
431 goto copy_failed;
432 }
433
434 alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
435 alias->stride,
436 alias->nents, ai,
437 &alias->va_pages);
438 if (!alias->gpu_va) {
439 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
440 goto no_alias;
441 }
442 no_alias:
443 copy_failed:
444 vfree(ai);
445 break;
446 }
447 case KBASE_FUNC_MEM_COMMIT:
448 {
449 struct kbase_uk_mem_commit *commit = args;
450 int ret;
451
452 if (sizeof(*commit) != args_size)
453 goto bad_size;
454
455 ret = kbase_mem_commit(kctx, commit->gpu_addr,
456 commit->pages);
457
458 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
459 commit->result_subcode =
460 BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
461
462 if (ret == 0) {
463 ukh->ret = MALI_ERROR_NONE;
464 commit->result_subcode =
465 BASE_BACKING_THRESHOLD_OK;
466 } else if (ret == -ENOMEM) {
467 commit->result_subcode =
468 BASE_BACKING_THRESHOLD_ERROR_OOM;
469 }
470
471 break;
472 }
473
474 case KBASE_FUNC_MEM_QUERY:
475 {
476 struct kbase_uk_mem_query *query = args;
477
478 if (sizeof(*query) != args_size)
479 goto bad_size;
480
481 if (kbase_mem_query(kctx, query->gpu_addr,
482 query->query, &query->value) != 0)
483 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
484 else
485 ukh->ret = MALI_ERROR_NONE;
486 break;
487 }
488 break;
489
490 case KBASE_FUNC_MEM_FLAGS_CHANGE:
491 {
492 struct kbase_uk_mem_flags_change *fc = args;
493
494 if (sizeof(*fc) != args_size)
495 goto bad_size;
496
497 if (kbase_mem_flags_change(kctx, fc->gpu_va,
498 fc->flags, fc->mask) != 0)
499 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
500
501 break;
502 }
503 case KBASE_FUNC_MEM_FREE:
504 {
505 struct kbase_uk_mem_free *mem = args;
506
507 if (sizeof(*mem) != args_size)
508 goto bad_size;
509
510 if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
511 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
512 break;
513 }
514
515 case KBASE_FUNC_JOB_SUBMIT:
516 {
517 struct kbase_uk_job_submit *job = args;
518 void __user *user_addr = NULL;
519
520 if (sizeof(*job) != args_size)
521 goto bad_size;
522
523 #ifdef CONFIG_COMPAT
524 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
525 user_addr = compat_ptr(job->addr.compat_value);
526 else
527 #endif
528 user_addr = job->addr.value;
529
530 if (kbase_jd_submit(kctx, user_addr, job->nr_atoms,
531 job->stride, false) != 0)
532 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
533 break;
534 }
535
536 #ifdef BASE_LEGACY_UK6_SUPPORT
537 case KBASE_FUNC_JOB_SUBMIT_UK6:
538 {
539 struct kbase_uk_job_submit *job = args;
540 void __user *user_addr = NULL;
541
542 if (sizeof(*job) != args_size)
543 goto bad_size;
544
545 #ifdef CONFIG_COMPAT
546 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
547 user_addr = compat_ptr(job->addr.compat_value);
548 else
549 #endif
550 user_addr = job->addr.value;
551
552 if (kbase_jd_submit(kctx, user_addr, job->nr_atoms,
553 job->stride, true) != 0)
554 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
555 break;
556 }
557 #endif
558
559 case KBASE_FUNC_SYNC:
560 {
561 struct kbase_uk_sync_now *sn = args;
562
563 if (sizeof(*sn) != args_size)
564 goto bad_size;
565
566 #ifndef CONFIG_MALI_COH_USER
567 if (kbase_sync_now(kctx, &sn->sset.basep_sset) != 0)
568 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
569 #endif
570 break;
571 }
572
573 case KBASE_FUNC_DISJOINT_QUERY:
574 {
575 struct kbase_uk_disjoint_query *dquery = args;
576
577 if (sizeof(*dquery) != args_size)
578 goto bad_size;
579
580 /* Get the disjointness counter value. */
581 dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
582 break;
583 }
584
585 case KBASE_FUNC_POST_TERM:
586 {
587 kbase_event_close(kctx);
588 break;
589 }
590
591 case KBASE_FUNC_HWCNT_SETUP:
592 {
593 struct kbase_uk_hwcnt_setup *setup = args;
594
595 if (sizeof(*setup) != args_size)
596 goto bad_size;
597
598 mutex_lock(&kctx->vinstr_cli_lock);
599 if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
600 &kctx->vinstr_cli, setup) != 0)
601 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
602 mutex_unlock(&kctx->vinstr_cli_lock);
603 break;
604 }
605
606 case KBASE_FUNC_HWCNT_DUMP:
607 {
608 /* args ignored */
609 mutex_lock(&kctx->vinstr_cli_lock);
610 if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
611 BASE_HWCNT_READER_EVENT_MANUAL) != 0)
612 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
613 mutex_unlock(&kctx->vinstr_cli_lock);
614 break;
615 }
616
617 case KBASE_FUNC_HWCNT_CLEAR:
618 {
619 /* args ignored */
620 mutex_lock(&kctx->vinstr_cli_lock);
621 if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
622 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
623 mutex_unlock(&kctx->vinstr_cli_lock);
624 break;
625 }
626
627 case KBASE_FUNC_HWCNT_READER_SETUP:
628 {
629 struct kbase_uk_hwcnt_reader_setup *setup = args;
630
631 if (sizeof(*setup) != args_size)
632 goto bad_size;
633
634 mutex_lock(&kctx->vinstr_cli_lock);
635 if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
636 setup) != 0)
637 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
638 mutex_unlock(&kctx->vinstr_cli_lock);
639 break;
640 }
641
642 case KBASE_FUNC_GPU_PROPS_REG_DUMP:
643 {
644 struct kbase_uk_gpuprops *setup = args;
645
646 if (sizeof(*setup) != args_size)
647 goto bad_size;
648
649 if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
650 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
651 break;
652 }
653 case KBASE_FUNC_FIND_CPU_OFFSET:
654 {
655 struct kbase_uk_find_cpu_offset *find = args;
656
657 if (sizeof(*find) != args_size)
658 goto bad_size;
659
660 if (find->gpu_addr & ~PAGE_MASK) {
661 dev_warn(kbdev->dev,
662 "kbase_legacy_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
663 goto out_bad;
664 }
665
666 if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
667 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
668 } else {
669 int err;
670
671 err = kbasep_find_enclosing_cpu_mapping_offset(
672 kctx,
673 find->cpu_addr,
674 find->size,
675 &find->offset);
676
677 if (err)
678 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
679 }
680 break;
681 }
682 case KBASE_FUNC_GET_VERSION:
683 {
684 struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
685
686 if (sizeof(*get_version) != args_size)
687 goto bad_size;
688
689 /* version buffer size check is made in compile time assert */
690 memcpy(get_version->version_buffer,
691 KERNEL_SIDE_DDK_VERSION_STRING,
692 sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
693 get_version->version_string_size =
694 sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
695 get_version->rk_version = ROCKCHIP_VERSION;
696 break;
697 }
698
699 case KBASE_FUNC_STREAM_CREATE:
700 {
701 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
702 struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
703
704 if (sizeof(*screate) != args_size)
705 goto bad_size;
706
707 if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
708 /* not NULL terminated */
709 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
710 break;
711 }
712
713 if (kbase_sync_fence_stream_create(screate->name,
714 &screate->fd) != 0)
715 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
716 else
717 ukh->ret = MALI_ERROR_NONE;
718 #else /* CONFIG_SYNC || CONFIG_SYNC_FILE */
719 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
720 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
721 break;
722 }
723 case KBASE_FUNC_FENCE_VALIDATE:
724 {
725 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
726 struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
727
728 if (sizeof(*fence_validate) != args_size)
729 goto bad_size;
730
731 if (kbase_sync_fence_validate(fence_validate->fd) != 0)
732 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
733 else
734 ukh->ret = MALI_ERROR_NONE;
735 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
736 break;
737 }
738
739 case KBASE_FUNC_SET_TEST_DATA:
740 {
741 #if MALI_UNIT_TEST
742 struct kbase_uk_set_test_data *set_data = args;
743
744 shared_kernel_test_data = set_data->test_data;
745 shared_kernel_test_data.kctx.value = (void __user *)kctx;
746 shared_kernel_test_data.mm.value = (void __user *)current->mm;
747 ukh->ret = MALI_ERROR_NONE;
748 #endif /* MALI_UNIT_TEST */
749 break;
750 }
751
752 case KBASE_FUNC_INJECT_ERROR:
753 {
754 #ifdef CONFIG_MALI_ERROR_INJECT
755 unsigned long flags;
756 struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
757
758 /*mutex lock */
759 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
760 if (job_atom_inject_error(¶ms) != 0)
761 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
762 else
763 ukh->ret = MALI_ERROR_NONE;
764 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
765 /*mutex unlock */
766 #endif /* CONFIG_MALI_ERROR_INJECT */
767 break;
768 }
769
770 case KBASE_FUNC_MODEL_CONTROL:
771 {
772 #ifdef CONFIG_MALI_NO_MALI
773 unsigned long flags;
774 struct kbase_model_control_params params =
775 ((struct kbase_uk_model_control_params *)args)->params;
776
777 /*mutex lock */
778 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
779 if (gpu_model_control(kbdev->model, ¶ms) != 0)
780 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
781 else
782 ukh->ret = MALI_ERROR_NONE;
783 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
784 /*mutex unlock */
785 #endif /* CONFIG_MALI_NO_MALI */
786 break;
787 }
788
789 #ifdef BASE_LEGACY_UK8_SUPPORT
790 case KBASE_FUNC_KEEP_GPU_POWERED:
791 {
792 dev_warn(kbdev->dev, "kbase_legacy_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
793 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
794 break;
795 }
796 #endif /* BASE_LEGACY_UK8_SUPPORT */
797
798 case KBASE_FUNC_GET_PROFILING_CONTROLS:
799 {
800 struct kbase_uk_profiling_controls *controls =
801 (struct kbase_uk_profiling_controls *)args;
802 u32 i;
803
804 if (sizeof(*controls) != args_size)
805 goto bad_size;
806
807 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
808 controls->profiling_controls[i] =
809 kbdev->kbase_profiling_controls[i];
810
811 break;
812 }
813
814 /* used only for testing purposes; these controls are to be set by gator through gator API */
815 case KBASE_FUNC_SET_PROFILING_CONTROLS:
816 {
817 struct kbase_uk_profiling_controls *controls =
818 (struct kbase_uk_profiling_controls *)args;
819 u32 i;
820
821 if (sizeof(*controls) != args_size)
822 goto bad_size;
823
824 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
825 _mali_profiling_control(i, controls->profiling_controls[i]);
826
827 break;
828 }
829
830 case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
831 {
832 struct kbase_uk_debugfs_mem_profile_add *add_data =
833 (struct kbase_uk_debugfs_mem_profile_add *)args;
834 char *buf;
835 char __user *user_buf;
836
837 if (sizeof(*add_data) != args_size)
838 goto bad_size;
839
840 if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
841 dev_err(kbdev->dev, "buffer too big\n");
842 goto out_bad;
843 }
844
845 #ifdef CONFIG_COMPAT
846 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
847 user_buf =
848 compat_ptr(add_data->buf.compat_value);
849 else
850 #endif
851 user_buf = add_data->buf.value;
852
853 buf = kmalloc(add_data->len, GFP_KERNEL);
854 if (ZERO_OR_NULL_PTR(buf))
855 goto out_bad;
856
857 if (0 != copy_from_user(buf, user_buf, add_data->len)) {
858 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
859 kfree(buf);
860 goto out_bad;
861 }
862
863 if (kbasep_mem_profile_debugfs_insert(kctx, buf,
864 add_data->len)) {
865 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
866 goto out_bad;
867 }
868
869 break;
870 }
871
872 #ifdef CONFIG_MALI_NO_MALI
873 case KBASE_FUNC_SET_PRFCNT_VALUES:
874 {
875
876 struct kbase_uk_prfcnt_values *params =
877 ((struct kbase_uk_prfcnt_values *)args);
878 gpu_model_set_dummy_prfcnt_sample(params->data,
879 params->size);
880
881 break;
882 }
883 #endif /* CONFIG_MALI_NO_MALI */
884 #ifdef BASE_LEGACY_UK10_4_SUPPORT
885 case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4:
886 {
887 struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire
888 = args;
889 int ret;
890
891 if (sizeof(*tlstream_acquire) != args_size)
892 goto bad_size;
893
894 ret = kbase_tlstream_acquire(
895 kctx, 0);
896 if (ret < 0)
897 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
898 else
899 tlstream_acquire->fd = ret;
900 break;
901 }
902 #endif /* BASE_LEGACY_UK10_4_SUPPORT */
903 case KBASE_FUNC_TLSTREAM_ACQUIRE:
904 {
905 struct kbase_uk_tlstream_acquire *tlstream_acquire =
906 args;
907 int ret;
908
909 if (sizeof(*tlstream_acquire) != args_size)
910 goto bad_size;
911
912 if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK)
913 goto out_bad;
914
915 ret = kbase_tlstream_acquire(
916 kctx, tlstream_acquire->flags);
917 if (ret < 0)
918 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
919 else
920 tlstream_acquire->fd = ret;
921 break;
922 }
923 case KBASE_FUNC_TLSTREAM_FLUSH:
924 {
925 struct kbase_uk_tlstream_flush *tlstream_flush =
926 args;
927
928 if (sizeof(*tlstream_flush) != args_size)
929 goto bad_size;
930
931 kbase_tlstream_flush_streams();
932 break;
933 }
934 #if MALI_UNIT_TEST
935 case KBASE_FUNC_TLSTREAM_TEST:
936 {
937 struct kbase_uk_tlstream_test *tlstream_test = args;
938
939 if (sizeof(*tlstream_test) != args_size)
940 goto bad_size;
941
942 kbase_tlstream_test(
943 tlstream_test->tpw_count,
944 tlstream_test->msg_delay,
945 tlstream_test->msg_count,
946 tlstream_test->aux_msg);
947 break;
948 }
949 case KBASE_FUNC_TLSTREAM_STATS:
950 {
951 struct kbase_uk_tlstream_stats *tlstream_stats = args;
952
953 if (sizeof(*tlstream_stats) != args_size)
954 goto bad_size;
955
956 kbase_tlstream_stats(
957 &tlstream_stats->bytes_collected,
958 &tlstream_stats->bytes_generated);
959 break;
960 }
961 #endif /* MALI_UNIT_TEST */
962
963 case KBASE_FUNC_GET_CONTEXT_ID:
964 {
965 struct kbase_uk_context_id *info = args;
966
967 info->id = kctx->id;
968 break;
969 }
970
971 case KBASE_FUNC_SOFT_EVENT_UPDATE:
972 {
973 struct kbase_uk_soft_event_update *update = args;
974
975 if (sizeof(*update) != args_size)
976 goto bad_size;
977
978 if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
979 (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
980 (update->flags != 0))
981 goto out_bad;
982
983 if (kbase_soft_event_update(kctx, update->evt,
984 update->new_status))
985 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
986
987 break;
988 }
989
990 default:
991 dev_err(kbdev->dev, "unknown ioctl %u\n", id);
992 goto out_bad;
993 }
994
995 return ret;
996
997 bad_size:
998 dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
999 out_bad:
1000 return -EINVAL;
1001 }
1002
to_kbase_device(struct device * dev)1003 static struct kbase_device *to_kbase_device(struct device *dev)
1004 {
1005 return dev_get_drvdata(dev);
1006 }
1007
assign_irqs(struct platform_device * pdev)1008 static int assign_irqs(struct platform_device *pdev)
1009 {
1010 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
1011 int i;
1012
1013 if (!kbdev)
1014 return -ENODEV;
1015
1016 /* 3 IRQ resources */
1017 for (i = 0; i < 3; i++) {
1018 struct resource *irq_res;
1019 int irqtag;
1020
1021 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1022 if (!irq_res) {
1023 dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
1024 return -ENOENT;
1025 }
1026
1027 #ifdef CONFIG_OF
1028 if (!strncasecmp(irq_res->name, "JOB", 3)) {
1029 irqtag = JOB_IRQ_TAG;
1030 } else if (!strncasecmp(irq_res->name, "MMU", 3)) {
1031 irqtag = MMU_IRQ_TAG;
1032 } else if (!strncasecmp(irq_res->name, "GPU", 3)) {
1033 irqtag = GPU_IRQ_TAG;
1034 } else {
1035 dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
1036 irq_res->name);
1037 return -EINVAL;
1038 }
1039 #else
1040 irqtag = i;
1041 #endif /* CONFIG_OF */
1042 kbdev->irqs[irqtag].irq = irq_res->start;
1043 kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
1044 }
1045
1046 return 0;
1047 }
1048
1049 /*
1050 * API to acquire device list mutex and
1051 * return pointer to the device list head
1052 */
kbase_dev_list_get(void)1053 const struct list_head *kbase_dev_list_get(void)
1054 {
1055 mutex_lock(&kbase_dev_list_lock);
1056 return &kbase_dev_list;
1057 }
1058 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1059
1060 /* API to release the device list mutex */
kbase_dev_list_put(const struct list_head * dev_list)1061 void kbase_dev_list_put(const struct list_head *dev_list)
1062 {
1063 mutex_unlock(&kbase_dev_list_lock);
1064 }
1065 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1066
1067 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
kbase_find_device(int minor)1068 struct kbase_device *kbase_find_device(int minor)
1069 {
1070 struct kbase_device *kbdev = NULL;
1071 struct list_head *entry;
1072 const struct list_head *dev_list = kbase_dev_list_get();
1073
1074 list_for_each(entry, dev_list) {
1075 struct kbase_device *tmp;
1076
1077 tmp = list_entry(entry, struct kbase_device, entry);
1078 if (tmp->mdev.minor == minor || minor == -1) {
1079 kbdev = tmp;
1080 get_device(kbdev->dev);
1081 break;
1082 }
1083 }
1084 kbase_dev_list_put(dev_list);
1085
1086 return kbdev;
1087 }
1088 EXPORT_SYMBOL(kbase_find_device);
1089
kbase_release_device(struct kbase_device * kbdev)1090 void kbase_release_device(struct kbase_device *kbdev)
1091 {
1092 put_device(kbdev->dev);
1093 }
1094 EXPORT_SYMBOL(kbase_release_device);
1095
1096 #if KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE
1097 /*
1098 * Older versions, before v4.6, of the kernel doesn't have
1099 * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28
1100 */
kstrtobool_from_user(const char __user * s,size_t count,bool * res)1101 static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
1102 {
1103 char buf[32];
1104
1105 count = min(sizeof(buf), count);
1106
1107 if (copy_from_user(buf, s, count))
1108 return -EFAULT;
1109 buf[count] = '\0';
1110
1111 return strtobool(buf, res);
1112 }
1113 #endif
1114
write_ctx_infinite_cache(struct file * f,const char __user * ubuf,size_t size,loff_t * off)1115 static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
1116 {
1117 struct kbase_context *kctx = f->private_data;
1118 int err;
1119 bool value;
1120
1121 err = kstrtobool_from_user(ubuf, size, &value);
1122 if (err)
1123 return err;
1124
1125 if (value)
1126 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
1127 else
1128 kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
1129
1130 return size;
1131 }
1132
read_ctx_infinite_cache(struct file * f,char __user * ubuf,size_t size,loff_t * off)1133 static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
1134 {
1135 struct kbase_context *kctx = f->private_data;
1136 char buf[32];
1137 int count;
1138 bool value;
1139
1140 value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
1141
1142 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
1143
1144 return simple_read_from_buffer(ubuf, size, off, buf, count);
1145 }
1146
1147 static const struct file_operations kbase_infinite_cache_fops = {
1148 .open = simple_open,
1149 .write = write_ctx_infinite_cache,
1150 .read = read_ctx_infinite_cache,
1151 };
1152
kbase_open(struct inode * inode,struct file * filp)1153 static int kbase_open(struct inode *inode, struct file *filp)
1154 {
1155 struct kbase_device *kbdev = NULL;
1156 struct kbase_context *kctx;
1157 int ret = 0;
1158 #ifdef CONFIG_DEBUG_FS
1159 char kctx_name[64];
1160 #endif
1161
1162 kbdev = kbase_find_device(iminor(inode));
1163
1164 if (!kbdev)
1165 return -ENODEV;
1166
1167 kctx = kbase_create_context(kbdev, is_compat_task());
1168 if (!kctx) {
1169 ret = -ENOMEM;
1170 goto out;
1171 }
1172
1173 init_waitqueue_head(&kctx->event_queue);
1174 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
1175 filp->private_data = kctx;
1176 kctx->filp = filp;
1177
1178 if (kbdev->infinite_cache_active_default)
1179 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
1180
1181 #ifdef CONFIG_DEBUG_FS
1182 snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1183
1184 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1185 kbdev->debugfs_ctx_directory);
1186
1187 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1188 ret = -ENOMEM;
1189 goto out;
1190 }
1191
1192 #ifdef CONFIG_MALI_COH_USER
1193 /* if cache is completely coherent at hardware level, then remove the
1194 * infinite cache control support from debugfs.
1195 */
1196 #else
1197 debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
1198 kctx, &kbase_infinite_cache_fops);
1199 #endif /* CONFIG_MALI_COH_USER */
1200
1201 mutex_init(&kctx->mem_profile_lock);
1202
1203 kbasep_jd_debugfs_ctx_init(kctx);
1204 kbase_debug_mem_view_init(filp);
1205
1206 kbase_debug_job_fault_context_init(kctx);
1207
1208 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool);
1209
1210 kbase_jit_debugfs_init(kctx);
1211 #endif /* CONFIG_DEBUG_FS */
1212
1213 dev_dbg(kbdev->dev, "created base context\n");
1214
1215 {
1216 struct kbasep_kctx_list_element *element;
1217
1218 element = kzalloc(sizeof(*element), GFP_KERNEL);
1219 if (element) {
1220 mutex_lock(&kbdev->kctx_list_lock);
1221 element->kctx = kctx;
1222 list_add(&element->link, &kbdev->kctx_list);
1223 KBASE_TLSTREAM_TL_NEW_CTX(
1224 element->kctx,
1225 (u32)(element->kctx->id),
1226 (u32)(element->kctx->tgid));
1227 mutex_unlock(&kbdev->kctx_list_lock);
1228 } else {
1229 /* we don't treat this as a fail - just warn about it */
1230 dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1231 }
1232 }
1233 return 0;
1234
1235 out:
1236 kbase_release_device(kbdev);
1237 return ret;
1238 }
1239
kbase_release(struct inode * inode,struct file * filp)1240 static int kbase_release(struct inode *inode, struct file *filp)
1241 {
1242 struct kbase_context *kctx = filp->private_data;
1243 struct kbase_device *kbdev = kctx->kbdev;
1244 struct kbasep_kctx_list_element *element, *tmp;
1245 bool found_element = false;
1246
1247 KBASE_TLSTREAM_TL_DEL_CTX(kctx);
1248
1249 #ifdef CONFIG_DEBUG_FS
1250 kbasep_mem_profile_debugfs_remove(kctx);
1251 kbase_debug_job_fault_context_term(kctx);
1252 #endif
1253
1254 mutex_lock(&kbdev->kctx_list_lock);
1255 list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1256 if (element->kctx == kctx) {
1257 list_del(&element->link);
1258 kfree(element);
1259 found_element = true;
1260 }
1261 }
1262 mutex_unlock(&kbdev->kctx_list_lock);
1263 if (!found_element)
1264 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1265
1266 filp->private_data = NULL;
1267
1268 mutex_lock(&kctx->vinstr_cli_lock);
1269 /* If this client was performing hwcnt dumping and did not explicitly
1270 * detach itself, remove it from the vinstr core now */
1271 if (kctx->vinstr_cli) {
1272 struct kbase_uk_hwcnt_setup setup;
1273
1274 setup.dump_buffer = 0llu;
1275 kbase_vinstr_legacy_hwc_setup(
1276 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1277 }
1278 mutex_unlock(&kctx->vinstr_cli_lock);
1279
1280 kbase_destroy_context(kctx);
1281
1282 dev_dbg(kbdev->dev, "deleted base context\n");
1283 kbase_release_device(kbdev);
1284 return 0;
1285 }
1286
1287 #define CALL_MAX_SIZE 536
1288
kbase_legacy_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1289 static long kbase_legacy_ioctl(struct file *filp, unsigned int cmd,
1290 unsigned long arg)
1291 {
1292 u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
1293 u32 size = _IOC_SIZE(cmd);
1294 struct kbase_context *kctx = filp->private_data;
1295
1296 if (size > CALL_MAX_SIZE)
1297 return -ENOTTY;
1298
1299 if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1300 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1301 return -EFAULT;
1302 }
1303
1304 if (kbase_legacy_dispatch(kctx, &msg, size) != 0)
1305 return -EFAULT;
1306
1307 if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1308 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1309 return -EFAULT;
1310 }
1311 return 0;
1312 }
1313
kbase_api_set_flags(struct kbase_context * kctx,struct kbase_ioctl_set_flags * flags)1314 static int kbase_api_set_flags(struct kbase_context *kctx,
1315 struct kbase_ioctl_set_flags *flags)
1316 {
1317 int err;
1318
1319 /* setup pending, try to signal that we'll do the setup,
1320 * if setup was already in progress, err this call
1321 */
1322 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
1323 return -EINVAL;
1324
1325 err = kbase_context_set_create_flags(kctx, flags->create_flags);
1326 /* if bad flags, will stay stuck in setup mode */
1327 if (err)
1328 return err;
1329
1330 atomic_set(&kctx->setup_complete, 1);
1331 return 0;
1332 }
1333
kbase_api_job_submit(struct kbase_context * kctx,struct kbase_ioctl_job_submit * submit)1334 static int kbase_api_job_submit(struct kbase_context *kctx,
1335 struct kbase_ioctl_job_submit *submit)
1336 {
1337 void __user *user_addr = NULL;
1338
1339 #ifdef CONFIG_COMPAT
1340 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
1341 user_addr = compat_ptr(submit->addr.compat_value);
1342 else
1343 #endif
1344 user_addr = submit->addr.value;
1345
1346 return kbase_jd_submit(kctx, user_addr, submit->nr_atoms,
1347 submit->stride, false);
1348 }
1349
kbase_api_get_gpuprops(struct kbase_context * kctx,struct kbase_ioctl_get_gpuprops * get_props)1350 static int kbase_api_get_gpuprops(struct kbase_context *kctx,
1351 struct kbase_ioctl_get_gpuprops *get_props)
1352 {
1353 struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props;
1354 int err;
1355
1356 if (get_props->flags != 0) {
1357 dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops");
1358 return -EINVAL;
1359 }
1360
1361 if (get_props->size == 0)
1362 return kprops->prop_buffer_size;
1363 if (get_props->size < kprops->prop_buffer_size)
1364 return -EINVAL;
1365
1366 err = copy_to_user(get_props->buffer.value, kprops->prop_buffer,
1367 kprops->prop_buffer_size);
1368 if (err)
1369 return err;
1370 return kprops->prop_buffer_size;
1371 }
1372
kbase_api_post_term(struct kbase_context * kctx)1373 static int kbase_api_post_term(struct kbase_context *kctx)
1374 {
1375 kbase_event_close(kctx);
1376 return 0;
1377 }
1378
kbase_api_mem_alloc(struct kbase_context * kctx,union kbase_ioctl_mem_alloc * alloc)1379 static int kbase_api_mem_alloc(struct kbase_context *kctx,
1380 union kbase_ioctl_mem_alloc *alloc)
1381 {
1382 struct kbase_va_region *reg;
1383 u64 flags = alloc->in.flags;
1384 u64 gpu_va;
1385
1386 #if defined(CONFIG_64BIT)
1387 if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
1388 /* force SAME_VA if a 64-bit client */
1389 flags |= BASE_MEM_SAME_VA;
1390 }
1391 #endif
1392
1393 reg = kbase_mem_alloc(kctx, alloc->in.va_pages,
1394 alloc->in.commit_pages,
1395 alloc->in.extent,
1396 &flags, &gpu_va);
1397
1398 if (!reg)
1399 return -ENOMEM;
1400
1401 alloc->out.flags = flags;
1402 alloc->out.gpu_va = gpu_va;
1403
1404 return 0;
1405 }
1406
kbase_api_mem_query(struct kbase_context * kctx,union kbase_ioctl_mem_query * query)1407 static int kbase_api_mem_query(struct kbase_context *kctx,
1408 union kbase_ioctl_mem_query *query)
1409 {
1410 return kbase_mem_query(kctx, query->in.gpu_addr,
1411 query->in.query, &query->out.value);
1412 }
1413
kbase_api_mem_free(struct kbase_context * kctx,struct kbase_ioctl_mem_free * free)1414 static int kbase_api_mem_free(struct kbase_context *kctx,
1415 struct kbase_ioctl_mem_free *free)
1416 {
1417 return kbase_mem_free(kctx, free->gpu_addr);
1418 }
1419
kbase_api_hwcnt_reader_setup(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_reader_setup * setup)1420 static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
1421 struct kbase_ioctl_hwcnt_reader_setup *setup)
1422 {
1423 int ret;
1424 struct kbase_uk_hwcnt_reader_setup args = {
1425 .buffer_count = setup->buffer_count,
1426 .jm_bm = setup->jm_bm,
1427 .shader_bm = setup->shader_bm,
1428 .tiler_bm = setup->tiler_bm,
1429 .mmu_l2_bm = setup->mmu_l2_bm
1430 };
1431
1432 mutex_lock(&kctx->vinstr_cli_lock);
1433 ret = kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, &args);
1434 mutex_unlock(&kctx->vinstr_cli_lock);
1435
1436 if (ret)
1437 return ret;
1438 return args.fd;
1439 }
1440
kbase_api_hwcnt_enable(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_enable * enable)1441 static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
1442 struct kbase_ioctl_hwcnt_enable *enable)
1443 {
1444 int ret;
1445 struct kbase_uk_hwcnt_setup args = {
1446 .dump_buffer = enable->dump_buffer,
1447 .jm_bm = enable->jm_bm,
1448 .shader_bm = enable->shader_bm,
1449 .tiler_bm = enable->tiler_bm,
1450 .mmu_l2_bm = enable->mmu_l2_bm
1451 };
1452
1453 mutex_lock(&kctx->vinstr_cli_lock);
1454 ret = kbase_vinstr_legacy_hwc_setup(kctx->kbdev->vinstr_ctx,
1455 &kctx->vinstr_cli, &args);
1456 mutex_unlock(&kctx->vinstr_cli_lock);
1457
1458 return ret;
1459 }
1460
kbase_api_hwcnt_dump(struct kbase_context * kctx)1461 static int kbase_api_hwcnt_dump(struct kbase_context *kctx)
1462 {
1463 int ret;
1464
1465 mutex_lock(&kctx->vinstr_cli_lock);
1466 ret = kbase_vinstr_hwc_dump(kctx->vinstr_cli,
1467 BASE_HWCNT_READER_EVENT_MANUAL);
1468 mutex_unlock(&kctx->vinstr_cli_lock);
1469
1470 return ret;
1471 }
1472
kbase_api_hwcnt_clear(struct kbase_context * kctx)1473 static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
1474 {
1475 int ret;
1476
1477 mutex_lock(&kctx->vinstr_cli_lock);
1478 ret = kbase_vinstr_hwc_clear(kctx->vinstr_cli);
1479 mutex_unlock(&kctx->vinstr_cli_lock);
1480
1481 return ret;
1482 }
1483
kbase_api_disjoint_query(struct kbase_context * kctx,struct kbase_ioctl_disjoint_query * query)1484 static int kbase_api_disjoint_query(struct kbase_context *kctx,
1485 struct kbase_ioctl_disjoint_query *query)
1486 {
1487 query->counter = kbase_disjoint_event_get(kctx->kbdev);
1488
1489 return 0;
1490 }
1491
kbase_api_get_ddk_version(struct kbase_context * kctx,struct kbase_ioctl_get_ddk_version * version)1492 static int kbase_api_get_ddk_version(struct kbase_context *kctx,
1493 struct kbase_ioctl_get_ddk_version *version)
1494 {
1495 int ret;
1496 int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
1497
1498 if (version->version_buffer.value == NULL)
1499 return len;
1500
1501 if (version->size < len)
1502 return -EOVERFLOW;
1503
1504 ret = copy_to_user(version->version_buffer.value,
1505 KERNEL_SIDE_DDK_VERSION_STRING,
1506 sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
1507
1508 if (ret)
1509 return ret;
1510
1511 return len;
1512 }
1513
kbase_api_mem_jit_init(struct kbase_context * kctx,struct kbase_ioctl_mem_jit_init * jit_init)1514 static int kbase_api_mem_jit_init(struct kbase_context *kctx,
1515 struct kbase_ioctl_mem_jit_init *jit_init)
1516 {
1517 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages);
1518 }
1519
kbase_api_mem_sync(struct kbase_context * kctx,struct kbase_ioctl_mem_sync * sync)1520 static int kbase_api_mem_sync(struct kbase_context *kctx,
1521 struct kbase_ioctl_mem_sync *sync)
1522 {
1523 #ifdef CONFIG_MALI_COH_USER
1524 return 0;
1525 #endif
1526 struct basep_syncset sset = {
1527 .mem_handle.basep.handle = sync->handle,
1528 .user_addr = sync->user_addr,
1529 .size = sync->size,
1530 .type = sync->type
1531 };
1532
1533 return kbase_sync_now(kctx, &sset);
1534 }
1535
kbase_api_mem_find_cpu_offset(struct kbase_context * kctx,union kbase_ioctl_mem_find_cpu_offset * find)1536 static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
1537 union kbase_ioctl_mem_find_cpu_offset *find)
1538 {
1539 return kbasep_find_enclosing_cpu_mapping_offset(
1540 kctx,
1541 find->in.cpu_addr,
1542 find->in.size,
1543 &find->out.offset);
1544 }
1545
kbase_api_get_context_id(struct kbase_context * kctx,struct kbase_ioctl_get_context_id * info)1546 static int kbase_api_get_context_id(struct kbase_context *kctx,
1547 struct kbase_ioctl_get_context_id *info)
1548 {
1549 info->id = kctx->id;
1550
1551 return 0;
1552 }
1553
kbase_api_tlstream_acquire(struct kbase_context * kctx,struct kbase_ioctl_tlstream_acquire * acquire)1554 static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
1555 struct kbase_ioctl_tlstream_acquire *acquire)
1556 {
1557 return kbase_tlstream_acquire(kctx, acquire->flags);
1558 }
1559
kbase_api_tlstream_flush(struct kbase_context * kctx)1560 static int kbase_api_tlstream_flush(struct kbase_context *kctx)
1561 {
1562 kbase_tlstream_flush_streams();
1563
1564 return 0;
1565 }
1566
kbase_api_mem_commit(struct kbase_context * kctx,struct kbase_ioctl_mem_commit * commit)1567 static int kbase_api_mem_commit(struct kbase_context *kctx,
1568 struct kbase_ioctl_mem_commit *commit)
1569 {
1570 return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
1571 }
1572
kbase_api_mem_alias(struct kbase_context * kctx,union kbase_ioctl_mem_alias * alias)1573 static int kbase_api_mem_alias(struct kbase_context *kctx,
1574 union kbase_ioctl_mem_alias *alias)
1575 {
1576 struct base_mem_aliasing_info *ai;
1577 void __user *user_addr = NULL;
1578 u64 flags;
1579 int err;
1580
1581 if (alias->in.nents == 0 || alias->in.nents > 2048)
1582 return -EINVAL;
1583
1584 ai = vmalloc(sizeof(*ai) * alias->in.nents);
1585 if (!ai)
1586 return -ENOMEM;
1587
1588 #ifdef CONFIG_COMPAT
1589 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
1590 user_addr =
1591 compat_ptr(alias->in.aliasing_info.compat_value);
1592 else
1593 #endif
1594 user_addr = alias->in.aliasing_info.value;
1595
1596 err = copy_from_user(ai, user_addr, sizeof(*ai) * alias->in.nents);
1597 if (err) {
1598 vfree(ai);
1599 return err;
1600 }
1601
1602 flags = alias->in.flags;
1603
1604 alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
1605 alias->in.stride, alias->in.nents,
1606 ai, &alias->out.va_pages);
1607
1608 alias->out.flags = flags;
1609
1610 vfree(ai);
1611
1612 if (alias->out.gpu_va == 0)
1613 return -ENOMEM;
1614
1615 return 0;
1616 }
1617
kbase_api_mem_import(struct kbase_context * kctx,union kbase_ioctl_mem_import * import)1618 static int kbase_api_mem_import(struct kbase_context *kctx,
1619 union kbase_ioctl_mem_import *import)
1620 {
1621 int ret;
1622 u64 flags = import->in.flags;
1623 void __user *phandle;
1624
1625 #ifdef CONFIG_COMPAT
1626 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
1627 phandle = compat_ptr(import->in.phandle.compat_value);
1628 else
1629 #endif
1630 phandle = import->in.phandle.value;
1631
1632 ret = kbase_mem_import(kctx,
1633 import->in.type,
1634 phandle,
1635 import->in.padding,
1636 &import->out.gpu_va,
1637 &import->out.va_pages,
1638 &flags);
1639
1640 import->out.flags = flags;
1641
1642 return ret;
1643 }
1644
kbase_api_mem_flags_change(struct kbase_context * kctx,struct kbase_ioctl_mem_flags_change * change)1645 static int kbase_api_mem_flags_change(struct kbase_context *kctx,
1646 struct kbase_ioctl_mem_flags_change *change)
1647 {
1648 return kbase_mem_flags_change(kctx, change->gpu_va,
1649 change->flags, change->mask);
1650 }
1651
kbase_api_stream_create(struct kbase_context * kctx,struct kbase_ioctl_stream_create * stream)1652 static int kbase_api_stream_create(struct kbase_context *kctx,
1653 struct kbase_ioctl_stream_create *stream)
1654 {
1655 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1656 int fd, ret;
1657
1658 /* Name must be NULL-terminated and padded with NULLs, so check last
1659 * character is NULL
1660 */
1661 if (stream->name[sizeof(stream->name)-1] != 0)
1662 return -EINVAL;
1663
1664 ret = kbase_sync_fence_stream_create(stream->name, &fd);
1665
1666 if (ret)
1667 return ret;
1668 return fd;
1669 #else
1670 return -ENOENT;
1671 #endif
1672 }
1673
kbase_api_fence_validate(struct kbase_context * kctx,struct kbase_ioctl_fence_validate * validate)1674 static int kbase_api_fence_validate(struct kbase_context *kctx,
1675 struct kbase_ioctl_fence_validate *validate)
1676 {
1677 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1678 return kbase_sync_fence_validate(validate->fd);
1679 #else
1680 return -ENOENT;
1681 #endif
1682 }
1683
kbase_api_get_profiling_controls(struct kbase_context * kctx,struct kbase_ioctl_get_profiling_controls * controls)1684 static int kbase_api_get_profiling_controls(struct kbase_context *kctx,
1685 struct kbase_ioctl_get_profiling_controls *controls)
1686 {
1687 if (controls->count > FBDUMP_CONTROL_MAX)
1688 return -EINVAL;
1689
1690 return copy_to_user(controls->buffer.value,
1691 &kctx->kbdev->kbase_profiling_controls[
1692 FBDUMP_CONTROL_MIN],
1693 controls->count * sizeof(u32));
1694 }
1695
kbase_api_mem_profile_add(struct kbase_context * kctx,struct kbase_ioctl_mem_profile_add * data)1696 static int kbase_api_mem_profile_add(struct kbase_context *kctx,
1697 struct kbase_ioctl_mem_profile_add *data)
1698 {
1699 char __user *user_buf;
1700 char *buf;
1701 int err;
1702
1703 if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1704 dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n");
1705 return -EINVAL;
1706 }
1707
1708 buf = kmalloc(data->len, GFP_KERNEL);
1709 if (ZERO_OR_NULL_PTR(buf))
1710 return -ENOMEM;
1711
1712 #ifdef CONFIG_COMPAT
1713 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
1714 user_buf = compat_ptr(data->buffer.compat_value);
1715 else
1716 #endif
1717 user_buf = data->buffer.value;
1718
1719 err = copy_from_user(buf, user_buf, data->len);
1720 if (err) {
1721 kfree(buf);
1722 return err;
1723 }
1724
1725 return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
1726 }
1727
kbase_api_soft_event_update(struct kbase_context * kctx,struct kbase_ioctl_soft_event_update * update)1728 static int kbase_api_soft_event_update(struct kbase_context *kctx,
1729 struct kbase_ioctl_soft_event_update *update)
1730 {
1731 if (update->flags != 0)
1732 return -EINVAL;
1733
1734 return kbase_soft_event_update(kctx, update->event, update->new_status);
1735 }
1736
1737 #if MALI_UNIT_TEST
kbase_api_tlstream_test(struct kbase_context * kctx,struct kbase_ioctl_tlstream_test * test)1738 static int kbase_api_tlstream_test(struct kbase_context *kctx,
1739 struct kbase_ioctl_tlstream_test *test)
1740 {
1741 kbase_tlstream_test(
1742 test->tpw_count,
1743 test->msg_delay,
1744 test->msg_count,
1745 test->aux_msg);
1746
1747 return 0;
1748 }
1749
kbase_api_tlstream_stats(struct kbase_context * kctx,struct kbase_ioctl_tlstream_stats * stats)1750 static int kbase_api_tlstream_stats(struct kbase_context *kctx,
1751 struct kbase_ioctl_tlstream_stats *stats)
1752 {
1753 kbase_tlstream_stats(
1754 &stats->bytes_collected,
1755 &stats->bytes_generated);
1756
1757 return 0;
1758 }
1759 #endif /* MALI_UNIT_TEST */
1760
1761 #define KBASE_HANDLE_IOCTL(cmd, function) \
1762 case cmd: \
1763 do { \
1764 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
1765 return function(kctx); \
1766 } while (0)
1767
1768 #define KBASE_HANDLE_IOCTL_IN(cmd, function, type) \
1769 case cmd: \
1770 do { \
1771 type param; \
1772 int err; \
1773 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
1774 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1775 err = copy_from_user(¶m, uarg, sizeof(param)); \
1776 if (err) \
1777 return -EFAULT; \
1778 return function(kctx, ¶m); \
1779 } while (0)
1780
1781 #define KBASE_HANDLE_IOCTL_OUT(cmd, function, type) \
1782 case cmd: \
1783 do { \
1784 type param; \
1785 int ret, err; \
1786 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
1787 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1788 ret = function(kctx, ¶m); \
1789 err = copy_to_user(uarg, ¶m, sizeof(param)); \
1790 if (err) \
1791 return -EFAULT; \
1792 return ret; \
1793 } while (0)
1794
1795 #define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type) \
1796 case cmd: \
1797 do { \
1798 type param; \
1799 int ret, err; \
1800 BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \
1801 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1802 err = copy_from_user(¶m, uarg, sizeof(param)); \
1803 if (err) \
1804 return -EFAULT; \
1805 ret = function(kctx, ¶m); \
1806 err = copy_to_user(uarg, ¶m, sizeof(param)); \
1807 if (err) \
1808 return -EFAULT; \
1809 return ret; \
1810 } while (0)
1811
kbase_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1812 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1813 {
1814 struct kbase_context *kctx = filp->private_data;
1815 struct kbase_device *kbdev = kctx->kbdev;
1816 void __user *uarg = (void __user *)arg;
1817
1818 /* The UK ioctl values overflow the cmd field causing the type to be
1819 * incremented
1820 */
1821 if (_IOC_TYPE(cmd) == LINUX_UK_BASE_MAGIC+2)
1822 return kbase_legacy_ioctl(filp, cmd, arg);
1823
1824 /* The UK version check IOCTL doesn't overflow the cmd field, so is
1825 * handled separately here
1826 */
1827 if (cmd == _IOC(_IOC_READ|_IOC_WRITE, LINUX_UK_BASE_MAGIC,
1828 UKP_FUNC_ID_CHECK_VERSION,
1829 sizeof(struct uku_version_check_args)))
1830 return kbase_legacy_ioctl(filp, cmd, arg);
1831
1832 /* Only these ioctls are available until setup is complete */
1833 switch (cmd) {
1834 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
1835 kbase_api_handshake,
1836 struct kbase_ioctl_version_check);
1837 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
1838 kbase_api_set_flags,
1839 struct kbase_ioctl_set_flags);
1840 }
1841
1842 /* Block call until version handshake and setup is complete */
1843 if (kctx->api_version == 0 || !atomic_read(&kctx->setup_complete))
1844 return -EINVAL;
1845
1846 /* Normal ioctls */
1847 switch (cmd) {
1848 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
1849 kbase_api_job_submit,
1850 struct kbase_ioctl_job_submit);
1851 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
1852 kbase_api_get_gpuprops,
1853 struct kbase_ioctl_get_gpuprops);
1854 KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
1855 kbase_api_post_term);
1856 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
1857 kbase_api_mem_alloc,
1858 union kbase_ioctl_mem_alloc);
1859 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
1860 kbase_api_mem_query,
1861 union kbase_ioctl_mem_query);
1862 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
1863 kbase_api_mem_free,
1864 struct kbase_ioctl_mem_free);
1865 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
1866 kbase_api_hwcnt_reader_setup,
1867 struct kbase_ioctl_hwcnt_reader_setup);
1868 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
1869 kbase_api_hwcnt_enable,
1870 struct kbase_ioctl_hwcnt_enable);
1871 KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
1872 kbase_api_hwcnt_dump);
1873 KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
1874 kbase_api_hwcnt_clear);
1875 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
1876 kbase_api_disjoint_query,
1877 struct kbase_ioctl_disjoint_query);
1878 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
1879 kbase_api_get_ddk_version,
1880 struct kbase_ioctl_get_ddk_version);
1881 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
1882 kbase_api_mem_jit_init,
1883 struct kbase_ioctl_mem_jit_init);
1884 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
1885 kbase_api_mem_sync,
1886 struct kbase_ioctl_mem_sync);
1887 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
1888 kbase_api_mem_find_cpu_offset,
1889 union kbase_ioctl_mem_find_cpu_offset);
1890 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
1891 kbase_api_get_context_id,
1892 struct kbase_ioctl_get_context_id);
1893 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
1894 kbase_api_tlstream_acquire,
1895 struct kbase_ioctl_tlstream_acquire);
1896 KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
1897 kbase_api_tlstream_flush);
1898 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
1899 kbase_api_mem_commit,
1900 struct kbase_ioctl_mem_commit);
1901 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
1902 kbase_api_mem_alias,
1903 union kbase_ioctl_mem_alias);
1904 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
1905 kbase_api_mem_import,
1906 union kbase_ioctl_mem_import);
1907 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
1908 kbase_api_mem_flags_change,
1909 struct kbase_ioctl_mem_flags_change);
1910 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
1911 kbase_api_stream_create,
1912 struct kbase_ioctl_stream_create);
1913 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
1914 kbase_api_fence_validate,
1915 struct kbase_ioctl_fence_validate);
1916 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_PROFILING_CONTROLS,
1917 kbase_api_get_profiling_controls,
1918 struct kbase_ioctl_get_profiling_controls);
1919 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
1920 kbase_api_mem_profile_add,
1921 struct kbase_ioctl_mem_profile_add);
1922 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
1923 kbase_api_soft_event_update,
1924 struct kbase_ioctl_soft_event_update);
1925
1926 #if MALI_UNIT_TEST
1927 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
1928 kbase_api_tlstream_test,
1929 struct kbase_ioctl_tlstream_test);
1930 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
1931 kbase_api_tlstream_stats,
1932 struct kbase_ioctl_tlstream_stats);
1933 #endif
1934 }
1935
1936 dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
1937
1938 return -ENOIOCTLCMD;
1939 }
1940
kbase_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)1941 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1942 {
1943 struct kbase_context *kctx = filp->private_data;
1944 struct base_jd_event_v2 uevent;
1945 int out_count = 0;
1946
1947 if (count < sizeof(uevent))
1948 return -ENOBUFS;
1949
1950 do {
1951 while (kbase_event_dequeue(kctx, &uevent)) {
1952 if (out_count > 0)
1953 goto out;
1954
1955 if (filp->f_flags & O_NONBLOCK)
1956 return -EAGAIN;
1957
1958 if (wait_event_interruptible(kctx->event_queue,
1959 kbase_event_pending(kctx)) != 0)
1960 return -ERESTARTSYS;
1961 }
1962 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1963 if (out_count == 0)
1964 return -EPIPE;
1965 goto out;
1966 }
1967
1968 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1969 return -EFAULT;
1970
1971 buf += sizeof(uevent);
1972 out_count++;
1973 count -= sizeof(uevent);
1974 } while (count >= sizeof(uevent));
1975
1976 out:
1977 return out_count * sizeof(uevent);
1978 }
1979
kbase_poll(struct file * filp,poll_table * wait)1980 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1981 {
1982 struct kbase_context *kctx = filp->private_data;
1983
1984 poll_wait(filp, &kctx->event_queue, wait);
1985 if (kbase_event_pending(kctx))
1986 return POLLIN | POLLRDNORM;
1987
1988 return 0;
1989 }
1990
kbase_event_wakeup(struct kbase_context * kctx)1991 void kbase_event_wakeup(struct kbase_context *kctx)
1992 {
1993 KBASE_DEBUG_ASSERT(kctx);
1994
1995 wake_up_interruptible(&kctx->event_queue);
1996 }
1997
1998 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1999
kbase_check_flags(int flags)2000 static int kbase_check_flags(int flags)
2001 {
2002 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
2003 * closes the file descriptor in a child process.
2004 */
2005 if (0 == (flags & O_CLOEXEC))
2006 return -EINVAL;
2007
2008 return 0;
2009 }
2010
2011
2012 /**
2013 * align_and_check - Align the specified pointer to the provided alignment and
2014 * check that it is still in range.
2015 * @gap_end: Highest possible start address for allocation (end of gap in
2016 * address space)
2017 * @gap_start: Start address of current memory area / gap in address space
2018 * @info: vm_unmapped_area_info structure passed to caller, containing
2019 * alignment, length and limits for the allocation
2020 * @is_shader_code: True if the allocation is for shader code (which has
2021 * additional alignment requirements)
2022 *
2023 * Return: true if gap_end is now aligned correctly and is still in range,
2024 * false otherwise
2025 */
align_and_check(unsigned long * gap_end,unsigned long gap_start,struct vm_unmapped_area_info * info,bool is_shader_code)2026 static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
2027 struct vm_unmapped_area_info *info, bool is_shader_code)
2028 {
2029 /* Compute highest gap address at the desired alignment */
2030 (*gap_end) -= info->length;
2031 (*gap_end) -= (*gap_end - info->align_offset) & info->align_mask;
2032
2033 if (is_shader_code) {
2034 /* Check for 4GB boundary */
2035 if (0 == (*gap_end & BASE_MEM_MASK_4GB))
2036 (*gap_end) -= (info->align_offset ? info->align_offset :
2037 info->length);
2038 if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB))
2039 (*gap_end) -= (info->align_offset ? info->align_offset :
2040 info->length);
2041
2042 if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
2043 info->length) & BASE_MEM_MASK_4GB))
2044 return false;
2045 }
2046
2047
2048 if ((*gap_end < info->low_limit) || (*gap_end < gap_start))
2049 return false;
2050
2051
2052 return true;
2053 }
2054
2055 /* The following function is taken from the kernel and just
2056 * renamed. As it's not exported to modules we must copy-paste it here.
2057 */
2058
kbase_unmapped_area_topdown(struct vm_unmapped_area_info * info,bool is_shader_code)2059 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
2060 *info, bool is_shader_code)
2061 {
2062 struct mm_struct *mm = current->mm;
2063 struct vm_area_struct *vma;
2064 unsigned long length, low_limit, high_limit, gap_start, gap_end;
2065
2066 /* Adjust search length to account for worst case alignment overhead */
2067 length = info->length + info->align_mask;
2068 if (length < info->length)
2069 return -ENOMEM;
2070
2071 /*
2072 * Adjust search limits by the desired length.
2073 * See implementation comment at top of unmapped_area().
2074 */
2075 gap_end = info->high_limit;
2076 if (gap_end < length)
2077 return -ENOMEM;
2078 high_limit = gap_end - length;
2079
2080 if (info->low_limit > high_limit)
2081 return -ENOMEM;
2082 low_limit = info->low_limit + length;
2083
2084 /* Check highest gap, which does not precede any rbtree node */
2085 gap_start = mm->highest_vm_end;
2086 if (gap_start <= high_limit) {
2087 if (align_and_check(&gap_end, gap_start, info, is_shader_code))
2088 return gap_end;
2089 }
2090
2091 /* Check if rbtree root looks promising */
2092 if (RB_EMPTY_ROOT(&mm->mm_rb))
2093 return -ENOMEM;
2094 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2095 if (vma->rb_subtree_gap < length)
2096 return -ENOMEM;
2097
2098 while (true) {
2099 /* Visit right subtree if it looks promising */
2100 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
2101 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2102 struct vm_area_struct *right =
2103 rb_entry(vma->vm_rb.rb_right,
2104 struct vm_area_struct, vm_rb);
2105 if (right->rb_subtree_gap >= length) {
2106 vma = right;
2107 continue;
2108 }
2109 }
2110
2111 check_current:
2112 /* Check if current node has a suitable gap */
2113 gap_end = vma->vm_start;
2114 if (gap_end < low_limit)
2115 return -ENOMEM;
2116 if (gap_start <= high_limit && gap_end - gap_start >= length) {
2117 /* We found a suitable gap. Clip it with the original
2118 * high_limit. */
2119 if (gap_end > info->high_limit)
2120 gap_end = info->high_limit;
2121
2122 if (align_and_check(&gap_end, gap_start, info,
2123 is_shader_code))
2124 return gap_end;
2125 }
2126
2127 /* Visit left subtree if it looks promising */
2128 if (vma->vm_rb.rb_left) {
2129 struct vm_area_struct *left =
2130 rb_entry(vma->vm_rb.rb_left,
2131 struct vm_area_struct, vm_rb);
2132 if (left->rb_subtree_gap >= length) {
2133 vma = left;
2134 continue;
2135 }
2136 }
2137
2138 /* Go back up the rbtree to find next candidate node */
2139 while (true) {
2140 struct rb_node *prev = &vma->vm_rb;
2141 if (!rb_parent(prev))
2142 return -ENOMEM;
2143 vma = rb_entry(rb_parent(prev),
2144 struct vm_area_struct, vm_rb);
2145 if (prev == vma->vm_rb.rb_right) {
2146 gap_start = vma->vm_prev ?
2147 vma->vm_prev->vm_end : 0;
2148 goto check_current;
2149 }
2150 }
2151 }
2152
2153 return -ENOMEM;
2154 }
2155
kbase_get_unmapped_area(struct file * filp,const unsigned long addr,const unsigned long len,const unsigned long pgoff,const unsigned long flags)2156 static unsigned long kbase_get_unmapped_area(struct file *filp,
2157 const unsigned long addr, const unsigned long len,
2158 const unsigned long pgoff, const unsigned long flags)
2159 {
2160 /* based on get_unmapped_area, but simplified slightly due to that some
2161 * values are known in advance */
2162 struct kbase_context *kctx = filp->private_data;
2163 struct mm_struct *mm = current->mm;
2164 struct vm_unmapped_area_info info;
2165 unsigned long align_offset = 0;
2166 unsigned long align_mask = 0;
2167 unsigned long high_limit = mm->mmap_base;
2168 unsigned long low_limit = PAGE_SIZE;
2169 int cpu_va_bits = BITS_PER_LONG;
2170 int gpu_pc_bits =
2171 kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
2172 bool is_shader_code = false;
2173 unsigned long ret;
2174
2175 /* err on fixed address */
2176 if ((flags & MAP_FIXED) || addr)
2177 return -EINVAL;
2178
2179 #ifdef CONFIG_64BIT
2180 /* too big? */
2181 if (len > TASK_SIZE - SZ_2M)
2182 return -ENOMEM;
2183
2184 if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
2185
2186 if (kbase_hw_has_feature(kctx->kbdev,
2187 BASE_HW_FEATURE_33BIT_VA)) {
2188 high_limit = kctx->same_va_end << PAGE_SHIFT;
2189 } else {
2190 high_limit = min_t(unsigned long, mm->mmap_base,
2191 (kctx->same_va_end << PAGE_SHIFT));
2192 if (len >= SZ_2M) {
2193 align_offset = SZ_2M;
2194 align_mask = SZ_2M - 1;
2195 }
2196 }
2197
2198 low_limit = SZ_2M;
2199 } else {
2200 cpu_va_bits = 32;
2201 }
2202 #endif /* CONFIG_64BIT */
2203 if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
2204 (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
2205 int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
2206
2207 if (!kctx->pending_regions[cookie])
2208 return -EINVAL;
2209
2210 if (!(kctx->pending_regions[cookie]->flags &
2211 KBASE_REG_GPU_NX)) {
2212 if (cpu_va_bits > gpu_pc_bits) {
2213 align_offset = 1ULL << gpu_pc_bits;
2214 align_mask = align_offset - 1;
2215 is_shader_code = true;
2216 }
2217 }
2218 #ifndef CONFIG_64BIT
2219 } else {
2220 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
2221 flags);
2222 #endif
2223 }
2224
2225 info.flags = 0;
2226 info.length = len;
2227 info.low_limit = low_limit;
2228 info.high_limit = high_limit;
2229 info.align_offset = align_offset;
2230 info.align_mask = align_mask;
2231
2232 ret = kbase_unmapped_area_topdown(&info, is_shader_code);
2233
2234 if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
2235 high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
2236 /* Retry above mmap_base */
2237 info.low_limit = mm->mmap_base;
2238 info.high_limit = min_t(u64, TASK_SIZE,
2239 (kctx->same_va_end << PAGE_SHIFT));
2240
2241 ret = kbase_unmapped_area_topdown(&info, is_shader_code);
2242 }
2243
2244 return ret;
2245 }
2246
2247 static const struct file_operations kbase_fops = {
2248 .owner = THIS_MODULE,
2249 .open = kbase_open,
2250 .release = kbase_release,
2251 .read = kbase_read,
2252 .poll = kbase_poll,
2253 .unlocked_ioctl = kbase_ioctl,
2254 .compat_ioctl = kbase_ioctl,
2255 .mmap = kbase_mmap,
2256 .check_flags = kbase_check_flags,
2257 .get_unmapped_area = kbase_get_unmapped_area,
2258 };
2259
2260 #ifndef CONFIG_MALI_NO_MALI
kbase_os_reg_write(struct kbase_device * kbdev,u16 offset,u32 value)2261 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
2262 {
2263 writel(value, kbdev->reg + offset);
2264 }
2265
kbase_os_reg_read(struct kbase_device * kbdev,u16 offset)2266 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
2267 {
2268 return readl(kbdev->reg + offset);
2269 }
2270 #endif /* !CONFIG_MALI_NO_MALI */
2271
2272 /**
2273 * show_policy - Show callback for the power_policy sysfs file.
2274 *
2275 * This function is called to get the contents of the power_policy sysfs
2276 * file. This is a list of the available policies with the currently active one
2277 * surrounded by square brackets.
2278 *
2279 * @dev: The device this sysfs file is for
2280 * @attr: The attributes of the sysfs file
2281 * @buf: The output buffer for the sysfs file contents
2282 *
2283 * Return: The number of bytes output to @buf.
2284 */
show_policy(struct device * dev,struct device_attribute * attr,char * const buf)2285 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
2286 {
2287 struct kbase_device *kbdev;
2288 const struct kbase_pm_policy *current_policy;
2289 const struct kbase_pm_policy *const *policy_list;
2290 int policy_count;
2291 int i;
2292 ssize_t ret = 0;
2293
2294 kbdev = to_kbase_device(dev);
2295
2296 if (!kbdev)
2297 return -ENODEV;
2298
2299 current_policy = kbase_pm_get_policy(kbdev);
2300
2301 policy_count = kbase_pm_list_policies(&policy_list);
2302
2303 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
2304 if (policy_list[i] == current_policy)
2305 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
2306 else
2307 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
2308 }
2309
2310 if (ret < PAGE_SIZE - 1) {
2311 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2312 } else {
2313 buf[PAGE_SIZE - 2] = '\n';
2314 buf[PAGE_SIZE - 1] = '\0';
2315 ret = PAGE_SIZE - 1;
2316 }
2317
2318 return ret;
2319 }
2320
2321 /**
2322 * set_policy - Store callback for the power_policy sysfs file.
2323 *
2324 * This function is called when the power_policy sysfs file is written to.
2325 * It matches the requested policy against the available policies and if a
2326 * matching policy is found calls kbase_pm_set_policy() to change the
2327 * policy.
2328 *
2329 * @dev: The device with sysfs file is for
2330 * @attr: The attributes of the sysfs file
2331 * @buf: The value written to the sysfs file
2332 * @count: The number of bytes written to the sysfs file
2333 *
2334 * Return: @count if the function succeeded. An error code on failure.
2335 */
set_policy(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2336 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2337 {
2338 struct kbase_device *kbdev;
2339 const struct kbase_pm_policy *new_policy = NULL;
2340 const struct kbase_pm_policy *const *policy_list;
2341 int policy_count;
2342 int i;
2343
2344 kbdev = to_kbase_device(dev);
2345
2346 if (!kbdev)
2347 return -ENODEV;
2348
2349 policy_count = kbase_pm_list_policies(&policy_list);
2350
2351 for (i = 0; i < policy_count; i++) {
2352 if (sysfs_streq(policy_list[i]->name, buf)) {
2353 new_policy = policy_list[i];
2354 break;
2355 }
2356 }
2357
2358 if (!new_policy) {
2359 dev_err(dev, "power_policy: policy not found\n");
2360 return -EINVAL;
2361 }
2362
2363 kbase_pm_set_policy(kbdev, new_policy);
2364
2365 return count;
2366 }
2367
2368 /*
2369 * The sysfs file power_policy.
2370 *
2371 * This is used for obtaining information about the available policies,
2372 * determining which policy is currently active, and changing the active
2373 * policy.
2374 */
2375 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
2376
2377 /**
2378 * show_ca_policy - Show callback for the core_availability_policy sysfs file.
2379 *
2380 * This function is called to get the contents of the core_availability_policy
2381 * sysfs file. This is a list of the available policies with the currently
2382 * active one surrounded by square brackets.
2383 *
2384 * @dev: The device this sysfs file is for
2385 * @attr: The attributes of the sysfs file
2386 * @buf: The output buffer for the sysfs file contents
2387 *
2388 * Return: The number of bytes output to @buf.
2389 */
show_ca_policy(struct device * dev,struct device_attribute * attr,char * const buf)2390 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
2391 {
2392 struct kbase_device *kbdev;
2393 const struct kbase_pm_ca_policy *current_policy;
2394 const struct kbase_pm_ca_policy *const *policy_list;
2395 int policy_count;
2396 int i;
2397 ssize_t ret = 0;
2398
2399 kbdev = to_kbase_device(dev);
2400
2401 if (!kbdev)
2402 return -ENODEV;
2403
2404 current_policy = kbase_pm_ca_get_policy(kbdev);
2405
2406 policy_count = kbase_pm_ca_list_policies(&policy_list);
2407
2408 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
2409 if (policy_list[i] == current_policy)
2410 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
2411 else
2412 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
2413 }
2414
2415 if (ret < PAGE_SIZE - 1) {
2416 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2417 } else {
2418 buf[PAGE_SIZE - 2] = '\n';
2419 buf[PAGE_SIZE - 1] = '\0';
2420 ret = PAGE_SIZE - 1;
2421 }
2422
2423 return ret;
2424 }
2425
2426 /**
2427 * set_ca_policy - Store callback for the core_availability_policy sysfs file.
2428 *
2429 * This function is called when the core_availability_policy sysfs file is
2430 * written to. It matches the requested policy against the available policies
2431 * and if a matching policy is found calls kbase_pm_set_policy() to change
2432 * the policy.
2433 *
2434 * @dev: The device with sysfs file is for
2435 * @attr: The attributes of the sysfs file
2436 * @buf: The value written to the sysfs file
2437 * @count: The number of bytes written to the sysfs file
2438 *
2439 * Return: @count if the function succeeded. An error code on failure.
2440 */
set_ca_policy(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2441 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2442 {
2443 struct kbase_device *kbdev;
2444 const struct kbase_pm_ca_policy *new_policy = NULL;
2445 const struct kbase_pm_ca_policy *const *policy_list;
2446 int policy_count;
2447 int i;
2448
2449 kbdev = to_kbase_device(dev);
2450
2451 if (!kbdev)
2452 return -ENODEV;
2453
2454 policy_count = kbase_pm_ca_list_policies(&policy_list);
2455
2456 for (i = 0; i < policy_count; i++) {
2457 if (sysfs_streq(policy_list[i]->name, buf)) {
2458 new_policy = policy_list[i];
2459 break;
2460 }
2461 }
2462
2463 if (!new_policy) {
2464 dev_err(dev, "core_availability_policy: policy not found\n");
2465 return -EINVAL;
2466 }
2467
2468 kbase_pm_ca_set_policy(kbdev, new_policy);
2469
2470 return count;
2471 }
2472
2473 /*
2474 * The sysfs file core_availability_policy
2475 *
2476 * This is used for obtaining information about the available policies,
2477 * determining which policy is currently active, and changing the active
2478 * policy.
2479 */
2480 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
2481
2482 /*
2483 * show_core_mask - Show callback for the core_mask sysfs file.
2484 *
2485 * This function is called to get the contents of the core_mask sysfs file.
2486 *
2487 * @dev: The device this sysfs file is for
2488 * @attr: The attributes of the sysfs file
2489 * @buf: The output buffer for the sysfs file contents
2490 *
2491 * Return: The number of bytes output to @buf.
2492 */
show_core_mask(struct device * dev,struct device_attribute * attr,char * const buf)2493 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
2494 {
2495 struct kbase_device *kbdev;
2496 ssize_t ret = 0;
2497
2498 kbdev = to_kbase_device(dev);
2499
2500 if (!kbdev)
2501 return -ENODEV;
2502
2503 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2504 "Current core mask (JS0) : 0x%llX\n",
2505 kbdev->pm.debug_core_mask[0]);
2506 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2507 "Current core mask (JS1) : 0x%llX\n",
2508 kbdev->pm.debug_core_mask[1]);
2509 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2510 "Current core mask (JS2) : 0x%llX\n",
2511 kbdev->pm.debug_core_mask[2]);
2512 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2513 "Available core mask : 0x%llX\n",
2514 kbdev->gpu_props.props.raw_props.shader_present);
2515
2516 return ret;
2517 }
2518
2519 /**
2520 * set_core_mask - Store callback for the core_mask sysfs file.
2521 *
2522 * This function is called when the core_mask sysfs file is written to.
2523 *
2524 * @dev: The device with sysfs file is for
2525 * @attr: The attributes of the sysfs file
2526 * @buf: The value written to the sysfs file
2527 * @count: The number of bytes written to the sysfs file
2528 *
2529 * Return: @count if the function succeeded. An error code on failure.
2530 */
set_core_mask(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2531 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2532 {
2533 struct kbase_device *kbdev;
2534 u64 new_core_mask[3];
2535 int items;
2536
2537 kbdev = to_kbase_device(dev);
2538
2539 if (!kbdev)
2540 return -ENODEV;
2541
2542 items = sscanf(buf, "%llx %llx %llx",
2543 &new_core_mask[0], &new_core_mask[1],
2544 &new_core_mask[2]);
2545
2546 if (items == 1)
2547 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
2548
2549 if (items == 1 || items == 3) {
2550 u64 shader_present =
2551 kbdev->gpu_props.props.raw_props.shader_present;
2552 u64 group0_core_mask =
2553 kbdev->gpu_props.props.coherency_info.group[0].
2554 core_mask;
2555
2556 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
2557 !(new_core_mask[0] & group0_core_mask) ||
2558 (new_core_mask[1] & shader_present) !=
2559 new_core_mask[1] ||
2560 !(new_core_mask[1] & group0_core_mask) ||
2561 (new_core_mask[2] & shader_present) !=
2562 new_core_mask[2] ||
2563 !(new_core_mask[2] & group0_core_mask)) {
2564 dev_err(dev, "power_policy: invalid core specification\n");
2565 return -EINVAL;
2566 }
2567
2568 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
2569 kbdev->pm.debug_core_mask[1] !=
2570 new_core_mask[1] ||
2571 kbdev->pm.debug_core_mask[2] !=
2572 new_core_mask[2]) {
2573 unsigned long flags;
2574
2575 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2576
2577 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
2578 new_core_mask[1], new_core_mask[2]);
2579
2580 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2581 }
2582
2583 return count;
2584 }
2585
2586 dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
2587 "Use format <core_mask>\n"
2588 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
2589 return -EINVAL;
2590 }
2591
2592 /*
2593 * The sysfs file core_mask.
2594 *
2595 * This is used to restrict shader core availability for debugging purposes.
2596 * Reading it will show the current core mask and the mask of cores available.
2597 * Writing to it will set the current core mask.
2598 */
2599 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
2600
2601 /**
2602 * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs
2603 * file.
2604 *
2605 * @dev: The device this sysfs file is for.
2606 * @attr: The attributes of the sysfs file.
2607 * @buf: The value written to the sysfs file.
2608 * @count: The number of bytes written to the sysfs file.
2609 *
2610 * This allows setting the timeout for software jobs. Waiting soft event wait
2611 * jobs will be cancelled after this period expires, while soft fence wait jobs
2612 * will print debug information if the fence debug feature is enabled.
2613 *
2614 * This is expressed in milliseconds.
2615 *
2616 * Return: count if the function succeeded. An error code on failure.
2617 */
set_soft_job_timeout(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2618 static ssize_t set_soft_job_timeout(struct device *dev,
2619 struct device_attribute *attr,
2620 const char *buf, size_t count)
2621 {
2622 struct kbase_device *kbdev;
2623 int soft_job_timeout_ms;
2624
2625 kbdev = to_kbase_device(dev);
2626 if (!kbdev)
2627 return -ENODEV;
2628
2629 if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
2630 (soft_job_timeout_ms <= 0))
2631 return -EINVAL;
2632
2633 atomic_set(&kbdev->js_data.soft_job_timeout_ms,
2634 soft_job_timeout_ms);
2635
2636 return count;
2637 }
2638
2639 /**
2640 * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs
2641 * file.
2642 *
2643 * This will return the timeout for the software jobs.
2644 *
2645 * @dev: The device this sysfs file is for.
2646 * @attr: The attributes of the sysfs file.
2647 * @buf: The output buffer for the sysfs file contents.
2648 *
2649 * Return: The number of bytes output to buf.
2650 */
show_soft_job_timeout(struct device * dev,struct device_attribute * attr,char * const buf)2651 static ssize_t show_soft_job_timeout(struct device *dev,
2652 struct device_attribute *attr,
2653 char * const buf)
2654 {
2655 struct kbase_device *kbdev;
2656
2657 kbdev = to_kbase_device(dev);
2658 if (!kbdev)
2659 return -ENODEV;
2660
2661 return scnprintf(buf, PAGE_SIZE, "%i\n",
2662 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
2663 }
2664
2665 static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
2666 show_soft_job_timeout, set_soft_job_timeout);
2667
timeout_ms_to_ticks(struct kbase_device * kbdev,long timeout_ms,int default_ticks,u32 old_ticks)2668 static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
2669 int default_ticks, u32 old_ticks)
2670 {
2671 if (timeout_ms > 0) {
2672 u64 ticks = timeout_ms * 1000000ULL;
2673 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2674 if (!ticks)
2675 return 1;
2676 return ticks;
2677 } else if (timeout_ms < 0) {
2678 return default_ticks;
2679 } else {
2680 return old_ticks;
2681 }
2682 }
2683
2684 /**
2685 * set_js_timeouts - Store callback for the js_timeouts sysfs file.
2686 *
2687 * This function is called to get the contents of the js_timeouts sysfs
2688 * file. This file contains five values separated by whitespace. The values
2689 * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
2690 * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
2691 * configuration values (in that order), with the difference that the js_timeout
2692 * values are expressed in MILLISECONDS.
2693 *
2694 * The js_timeouts sysfile file allows the current values in
2695 * use by the job scheduler to get override. Note that a value needs to
2696 * be other than 0 for it to override the current job scheduler value.
2697 *
2698 * @dev: The device with sysfs file is for
2699 * @attr: The attributes of the sysfs file
2700 * @buf: The value written to the sysfs file
2701 * @count: The number of bytes written to the sysfs file
2702 *
2703 * Return: @count if the function succeeded. An error code on failure.
2704 */
set_js_timeouts(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2705 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2706 {
2707 struct kbase_device *kbdev;
2708 int items;
2709 long js_soft_stop_ms;
2710 long js_soft_stop_ms_cl;
2711 long js_hard_stop_ms_ss;
2712 long js_hard_stop_ms_cl;
2713 long js_hard_stop_ms_dumping;
2714 long js_reset_ms_ss;
2715 long js_reset_ms_cl;
2716 long js_reset_ms_dumping;
2717
2718 kbdev = to_kbase_device(dev);
2719 if (!kbdev)
2720 return -ENODEV;
2721
2722 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2723 &js_soft_stop_ms, &js_soft_stop_ms_cl,
2724 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2725 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2726 &js_reset_ms_cl, &js_reset_ms_dumping);
2727
2728 if (items == 8) {
2729 struct kbasep_js_device_data *js_data = &kbdev->js_data;
2730 unsigned long flags;
2731
2732 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2733
2734 #define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
2735 js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
2736 default, js_data->ticks_name); \
2737 dev_dbg(kbdev->dev, "Overriding " #ticks_name \
2738 " with %lu ticks (%lu ms)\n", \
2739 (unsigned long)js_data->ticks_name, \
2740 ms_name); \
2741 } while (0)
2742
2743 UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
2744 DEFAULT_JS_SOFT_STOP_TICKS);
2745 UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
2746 DEFAULT_JS_SOFT_STOP_TICKS_CL);
2747 UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
2748 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2749 DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
2750 DEFAULT_JS_HARD_STOP_TICKS_SS);
2751 UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
2752 DEFAULT_JS_HARD_STOP_TICKS_CL);
2753 UPDATE_TIMEOUT(hard_stop_ticks_dumping,
2754 js_hard_stop_ms_dumping,
2755 DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
2756 UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
2757 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2758 DEFAULT_JS_RESET_TICKS_SS_8408 :
2759 DEFAULT_JS_RESET_TICKS_SS);
2760 UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
2761 DEFAULT_JS_RESET_TICKS_CL);
2762 UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
2763 DEFAULT_JS_RESET_TICKS_DUMPING);
2764
2765 kbase_js_set_timeouts(kbdev);
2766
2767 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2768
2769 return count;
2770 }
2771
2772 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2773 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2774 "Write 0 for no change, -1 to restore default timeout\n");
2775 return -EINVAL;
2776 }
2777
get_js_timeout_in_ms(u32 scheduling_period_ns,u32 ticks)2778 static unsigned long get_js_timeout_in_ms(
2779 u32 scheduling_period_ns,
2780 u32 ticks)
2781 {
2782 u64 ms = (u64)ticks * scheduling_period_ns;
2783
2784 do_div(ms, 1000000UL);
2785 return ms;
2786 }
2787
2788 /**
2789 * show_js_timeouts - Show callback for the js_timeouts sysfs file.
2790 *
2791 * This function is called to get the contents of the js_timeouts sysfs
2792 * file. It returns the last set values written to the js_timeouts sysfs file.
2793 * If the file didn't get written yet, the values will be current setting in
2794 * use.
2795 * @dev: The device this sysfs file is for
2796 * @attr: The attributes of the sysfs file
2797 * @buf: The output buffer for the sysfs file contents
2798 *
2799 * Return: The number of bytes output to @buf.
2800 */
show_js_timeouts(struct device * dev,struct device_attribute * attr,char * const buf)2801 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2802 {
2803 struct kbase_device *kbdev;
2804 ssize_t ret;
2805 unsigned long js_soft_stop_ms;
2806 unsigned long js_soft_stop_ms_cl;
2807 unsigned long js_hard_stop_ms_ss;
2808 unsigned long js_hard_stop_ms_cl;
2809 unsigned long js_hard_stop_ms_dumping;
2810 unsigned long js_reset_ms_ss;
2811 unsigned long js_reset_ms_cl;
2812 unsigned long js_reset_ms_dumping;
2813 u32 scheduling_period_ns;
2814
2815 kbdev = to_kbase_device(dev);
2816 if (!kbdev)
2817 return -ENODEV;
2818
2819 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2820
2821 #define GET_TIMEOUT(name) get_js_timeout_in_ms(\
2822 scheduling_period_ns, \
2823 kbdev->js_data.name)
2824
2825 js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
2826 js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
2827 js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
2828 js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
2829 js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
2830 js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
2831 js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
2832 js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
2833
2834 #undef GET_TIMEOUT
2835
2836 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2837 js_soft_stop_ms, js_soft_stop_ms_cl,
2838 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2839 js_hard_stop_ms_dumping, js_reset_ms_ss,
2840 js_reset_ms_cl, js_reset_ms_dumping);
2841
2842 if (ret >= PAGE_SIZE) {
2843 buf[PAGE_SIZE - 2] = '\n';
2844 buf[PAGE_SIZE - 1] = '\0';
2845 ret = PAGE_SIZE - 1;
2846 }
2847
2848 return ret;
2849 }
2850
2851 /*
2852 * The sysfs file js_timeouts.
2853 *
2854 * This is used to override the current job scheduler values for
2855 * JS_STOP_STOP_TICKS_SS
2856 * JS_STOP_STOP_TICKS_CL
2857 * JS_HARD_STOP_TICKS_SS
2858 * JS_HARD_STOP_TICKS_CL
2859 * JS_HARD_STOP_TICKS_DUMPING
2860 * JS_RESET_TICKS_SS
2861 * JS_RESET_TICKS_CL
2862 * JS_RESET_TICKS_DUMPING.
2863 */
2864 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2865
get_new_js_timeout(u32 old_period,u32 old_ticks,u32 new_scheduling_period_ns)2866 static u32 get_new_js_timeout(
2867 u32 old_period,
2868 u32 old_ticks,
2869 u32 new_scheduling_period_ns)
2870 {
2871 u64 ticks = (u64)old_period * (u64)old_ticks;
2872 do_div(ticks, new_scheduling_period_ns);
2873 return ticks?ticks:1;
2874 }
2875
2876 /**
2877 * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2878 * file
2879 * @dev: The device the sysfs file is for
2880 * @attr: The attributes of the sysfs file
2881 * @buf: The value written to the sysfs file
2882 * @count: The number of bytes written to the sysfs file
2883 *
2884 * This function is called when the js_scheduling_period sysfs file is written
2885 * to. It checks the data written, and if valid updates the js_scheduling_period
2886 * value
2887 *
2888 * Return: @count if the function succeeded. An error code on failure.
2889 */
set_js_scheduling_period(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2890 static ssize_t set_js_scheduling_period(struct device *dev,
2891 struct device_attribute *attr, const char *buf, size_t count)
2892 {
2893 struct kbase_device *kbdev;
2894 int ret;
2895 unsigned int js_scheduling_period;
2896 u32 new_scheduling_period_ns;
2897 u32 old_period;
2898 struct kbasep_js_device_data *js_data;
2899 unsigned long flags;
2900
2901 kbdev = to_kbase_device(dev);
2902 if (!kbdev)
2903 return -ENODEV;
2904
2905 js_data = &kbdev->js_data;
2906
2907 ret = kstrtouint(buf, 0, &js_scheduling_period);
2908 if (ret || !js_scheduling_period) {
2909 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2910 "Use format <js_scheduling_period_ms>\n");
2911 return -EINVAL;
2912 }
2913
2914 new_scheduling_period_ns = js_scheduling_period * 1000000;
2915
2916 /* Update scheduling timeouts */
2917 mutex_lock(&js_data->runpool_mutex);
2918 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2919
2920 /* If no contexts have been scheduled since js_timeouts was last written
2921 * to, the new timeouts might not have been latched yet. So check if an
2922 * update is pending and use the new values if necessary. */
2923
2924 /* Use previous 'new' scheduling period as a base if present. */
2925 old_period = js_data->scheduling_period_ns;
2926
2927 #define SET_TIMEOUT(name) \
2928 (js_data->name = get_new_js_timeout(\
2929 old_period, \
2930 kbdev->js_data.name, \
2931 new_scheduling_period_ns))
2932
2933 SET_TIMEOUT(soft_stop_ticks);
2934 SET_TIMEOUT(soft_stop_ticks_cl);
2935 SET_TIMEOUT(hard_stop_ticks_ss);
2936 SET_TIMEOUT(hard_stop_ticks_cl);
2937 SET_TIMEOUT(hard_stop_ticks_dumping);
2938 SET_TIMEOUT(gpu_reset_ticks_ss);
2939 SET_TIMEOUT(gpu_reset_ticks_cl);
2940 SET_TIMEOUT(gpu_reset_ticks_dumping);
2941
2942 #undef SET_TIMEOUT
2943
2944 js_data->scheduling_period_ns = new_scheduling_period_ns;
2945
2946 kbase_js_set_timeouts(kbdev);
2947
2948 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2949 mutex_unlock(&js_data->runpool_mutex);
2950
2951 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2952 js_scheduling_period);
2953
2954 return count;
2955 }
2956
2957 /**
2958 * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2959 * entry.
2960 * @dev: The device this sysfs file is for.
2961 * @attr: The attributes of the sysfs file.
2962 * @buf: The output buffer to receive the GPU information.
2963 *
2964 * This function is called to get the current period used for the JS scheduling
2965 * period.
2966 *
2967 * Return: The number of bytes output to @buf.
2968 */
show_js_scheduling_period(struct device * dev,struct device_attribute * attr,char * const buf)2969 static ssize_t show_js_scheduling_period(struct device *dev,
2970 struct device_attribute *attr, char * const buf)
2971 {
2972 struct kbase_device *kbdev;
2973 u32 period;
2974 ssize_t ret;
2975
2976 kbdev = to_kbase_device(dev);
2977 if (!kbdev)
2978 return -ENODEV;
2979
2980 period = kbdev->js_data.scheduling_period_ns;
2981
2982 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2983 period / 1000000);
2984
2985 return ret;
2986 }
2987
2988 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2989 show_js_scheduling_period, set_js_scheduling_period);
2990
2991 #if !MALI_CUSTOMER_RELEASE
2992 /**
2993 * set_force_replay - Store callback for the force_replay sysfs file.
2994 *
2995 * @dev: The device with sysfs file is for
2996 * @attr: The attributes of the sysfs file
2997 * @buf: The value written to the sysfs file
2998 * @count: The number of bytes written to the sysfs file
2999 *
3000 * Return: @count if the function succeeded. An error code on failure.
3001 */
set_force_replay(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3002 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3003 {
3004 struct kbase_device *kbdev;
3005
3006 kbdev = to_kbase_device(dev);
3007 if (!kbdev)
3008 return -ENODEV;
3009
3010 if (!strncmp("limit=", buf, MIN(6, count))) {
3011 int force_replay_limit;
3012 int items = sscanf(buf, "limit=%u", &force_replay_limit);
3013
3014 if (items == 1) {
3015 kbdev->force_replay_random = false;
3016 kbdev->force_replay_limit = force_replay_limit;
3017 kbdev->force_replay_count = 0;
3018
3019 return count;
3020 }
3021 } else if (!strncmp("random_limit", buf, MIN(12, count))) {
3022 kbdev->force_replay_random = true;
3023 kbdev->force_replay_count = 0;
3024
3025 return count;
3026 } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
3027 kbdev->force_replay_random = false;
3028 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
3029 kbdev->force_replay_count = 0;
3030
3031 return count;
3032 } else if (!strncmp("core_req=", buf, MIN(9, count))) {
3033 unsigned int core_req;
3034 int items = sscanf(buf, "core_req=%x", &core_req);
3035
3036 if (items == 1) {
3037 kbdev->force_replay_core_req = (base_jd_core_req)core_req;
3038
3039 return count;
3040 }
3041 }
3042 dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
3043 return -EINVAL;
3044 }
3045
3046 /**
3047 * show_force_replay - Show callback for the force_replay sysfs file.
3048 *
3049 * This function is called to get the contents of the force_replay sysfs
3050 * file. It returns the last set value written to the force_replay sysfs file.
3051 * If the file didn't get written yet, the values will be 0.
3052 *
3053 * @dev: The device this sysfs file is for
3054 * @attr: The attributes of the sysfs file
3055 * @buf: The output buffer for the sysfs file contents
3056 *
3057 * Return: The number of bytes output to @buf.
3058 */
show_force_replay(struct device * dev,struct device_attribute * attr,char * const buf)3059 static ssize_t show_force_replay(struct device *dev,
3060 struct device_attribute *attr, char * const buf)
3061 {
3062 struct kbase_device *kbdev;
3063 ssize_t ret;
3064
3065 kbdev = to_kbase_device(dev);
3066 if (!kbdev)
3067 return -ENODEV;
3068
3069 if (kbdev->force_replay_random)
3070 ret = scnprintf(buf, PAGE_SIZE,
3071 "limit=0\nrandom_limit\ncore_req=%x\n",
3072 kbdev->force_replay_core_req);
3073 else
3074 ret = scnprintf(buf, PAGE_SIZE,
3075 "limit=%u\nnorandom_limit\ncore_req=%x\n",
3076 kbdev->force_replay_limit,
3077 kbdev->force_replay_core_req);
3078
3079 if (ret >= PAGE_SIZE) {
3080 buf[PAGE_SIZE - 2] = '\n';
3081 buf[PAGE_SIZE - 1] = '\0';
3082 ret = PAGE_SIZE - 1;
3083 }
3084
3085 return ret;
3086 }
3087
3088 /*
3089 * The sysfs file force_replay.
3090 */
3091 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
3092 set_force_replay);
3093 #endif /* !MALI_CUSTOMER_RELEASE */
3094
3095 #ifdef CONFIG_MALI_DEBUG
set_js_softstop_always(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3096 static ssize_t set_js_softstop_always(struct device *dev,
3097 struct device_attribute *attr, const char *buf, size_t count)
3098 {
3099 struct kbase_device *kbdev;
3100 int ret;
3101 int softstop_always;
3102
3103 kbdev = to_kbase_device(dev);
3104 if (!kbdev)
3105 return -ENODEV;
3106
3107 ret = kstrtoint(buf, 0, &softstop_always);
3108 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
3109 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
3110 "Use format <soft_stop_always>\n");
3111 return -EINVAL;
3112 }
3113
3114 kbdev->js_data.softstop_always = (bool) softstop_always;
3115 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
3116 (kbdev->js_data.softstop_always) ?
3117 "Enabled" : "Disabled");
3118 return count;
3119 }
3120
show_js_softstop_always(struct device * dev,struct device_attribute * attr,char * const buf)3121 static ssize_t show_js_softstop_always(struct device *dev,
3122 struct device_attribute *attr, char * const buf)
3123 {
3124 struct kbase_device *kbdev;
3125 ssize_t ret;
3126
3127 kbdev = to_kbase_device(dev);
3128 if (!kbdev)
3129 return -ENODEV;
3130
3131 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
3132
3133 if (ret >= PAGE_SIZE) {
3134 buf[PAGE_SIZE - 2] = '\n';
3135 buf[PAGE_SIZE - 1] = '\0';
3136 ret = PAGE_SIZE - 1;
3137 }
3138
3139 return ret;
3140 }
3141
3142 /*
3143 * By default, soft-stops are disabled when only a single context is present.
3144 * The ability to enable soft-stop when only a single context is present can be
3145 * used for debug and unit-testing purposes.
3146 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
3147 */
3148 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
3149 #endif /* CONFIG_MALI_DEBUG */
3150
3151 #ifdef CONFIG_MALI_DEBUG
3152 typedef void (kbasep_debug_command_func) (struct kbase_device *);
3153
3154 enum kbasep_debug_command_code {
3155 KBASEP_DEBUG_COMMAND_DUMPTRACE,
3156
3157 /* This must be the last enum */
3158 KBASEP_DEBUG_COMMAND_COUNT
3159 };
3160
3161 struct kbasep_debug_command {
3162 char *str;
3163 kbasep_debug_command_func *func;
3164 };
3165
3166 /* Debug commands supported by the driver */
3167 static const struct kbasep_debug_command debug_commands[] = {
3168 {
3169 .str = "dumptrace",
3170 .func = &kbasep_trace_dump,
3171 }
3172 };
3173
3174 /**
3175 * show_debug - Show callback for the debug_command sysfs file.
3176 *
3177 * This function is called to get the contents of the debug_command sysfs
3178 * file. This is a list of the available debug commands, separated by newlines.
3179 *
3180 * @dev: The device this sysfs file is for
3181 * @attr: The attributes of the sysfs file
3182 * @buf: The output buffer for the sysfs file contents
3183 *
3184 * Return: The number of bytes output to @buf.
3185 */
show_debug(struct device * dev,struct device_attribute * attr,char * const buf)3186 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
3187 {
3188 struct kbase_device *kbdev;
3189 int i;
3190 ssize_t ret = 0;
3191
3192 kbdev = to_kbase_device(dev);
3193
3194 if (!kbdev)
3195 return -ENODEV;
3196
3197 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
3198 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
3199
3200 if (ret >= PAGE_SIZE) {
3201 buf[PAGE_SIZE - 2] = '\n';
3202 buf[PAGE_SIZE - 1] = '\0';
3203 ret = PAGE_SIZE - 1;
3204 }
3205
3206 return ret;
3207 }
3208
3209 /**
3210 * issue_debug - Store callback for the debug_command sysfs file.
3211 *
3212 * This function is called when the debug_command sysfs file is written to.
3213 * It matches the requested command against the available commands, and if
3214 * a matching command is found calls the associated function from
3215 * @debug_commands to issue the command.
3216 *
3217 * @dev: The device with sysfs file is for
3218 * @attr: The attributes of the sysfs file
3219 * @buf: The value written to the sysfs file
3220 * @count: The number of bytes written to the sysfs file
3221 *
3222 * Return: @count if the function succeeded. An error code on failure.
3223 */
issue_debug(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3224 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3225 {
3226 struct kbase_device *kbdev;
3227 int i;
3228
3229 kbdev = to_kbase_device(dev);
3230
3231 if (!kbdev)
3232 return -ENODEV;
3233
3234 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
3235 if (sysfs_streq(debug_commands[i].str, buf)) {
3236 debug_commands[i].func(kbdev);
3237 return count;
3238 }
3239 }
3240
3241 /* Debug Command not found */
3242 dev_err(dev, "debug_command: command not known\n");
3243 return -EINVAL;
3244 }
3245
3246 /* The sysfs file debug_command.
3247 *
3248 * This is used to issue general debug commands to the device driver.
3249 * Reading it will produce a list of debug commands, separated by newlines.
3250 * Writing to it with one of those commands will issue said command.
3251 */
3252 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
3253 #endif /* CONFIG_MALI_DEBUG */
3254
3255 /**
3256 * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
3257 * @dev: The device this sysfs file is for.
3258 * @attr: The attributes of the sysfs file.
3259 * @buf: The output buffer to receive the GPU information.
3260 *
3261 * This function is called to get a description of the present Mali
3262 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
3263 * number of cores, the hardware version and the raw product id. For
3264 * example
3265 *
3266 * Mali-T60x MP4 r0p0 0x6956
3267 *
3268 * Return: The number of bytes output to @buf.
3269 */
kbase_show_gpuinfo(struct device * dev,struct device_attribute * attr,char * buf)3270 static ssize_t kbase_show_gpuinfo(struct device *dev,
3271 struct device_attribute *attr, char *buf)
3272 {
3273 static const struct gpu_product_id_name {
3274 unsigned id;
3275 char *name;
3276 } gpu_product_id_names[] = {
3277 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
3278 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
3279 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
3280 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
3281 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
3282 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
3283 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
3284 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
3285 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3286 .name = "Mali-G71" },
3287 { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3288 .name = "Mali-THEx" },
3289 { .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3290 .name = "Mali-G51" },
3291 };
3292 const char *product_name = "(Unknown Mali GPU)";
3293 struct kbase_device *kbdev;
3294 u32 gpu_id;
3295 unsigned product_id, product_id_mask;
3296 unsigned i;
3297 bool is_new_format;
3298
3299 kbdev = to_kbase_device(dev);
3300 if (!kbdev)
3301 return -ENODEV;
3302
3303 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3304 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3305 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
3306 product_id_mask =
3307 (is_new_format ?
3308 GPU_ID2_PRODUCT_MODEL :
3309 GPU_ID_VERSION_PRODUCT_ID) >>
3310 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3311
3312 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
3313 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
3314
3315 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
3316 (p->id & product_id_mask) ==
3317 (product_id & product_id_mask)) {
3318 product_name = p->name;
3319 break;
3320 }
3321 }
3322
3323 return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n",
3324 product_name, kbdev->gpu_props.num_cores,
3325 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
3326 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
3327 product_id);
3328 }
3329 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
3330
3331 /**
3332 * set_dvfs_period - Store callback for the dvfs_period sysfs file.
3333 * @dev: The device with sysfs file is for
3334 * @attr: The attributes of the sysfs file
3335 * @buf: The value written to the sysfs file
3336 * @count: The number of bytes written to the sysfs file
3337 *
3338 * This function is called when the dvfs_period sysfs file is written to. It
3339 * checks the data written, and if valid updates the DVFS period variable,
3340 *
3341 * Return: @count if the function succeeded. An error code on failure.
3342 */
set_dvfs_period(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3343 static ssize_t set_dvfs_period(struct device *dev,
3344 struct device_attribute *attr, const char *buf, size_t count)
3345 {
3346 struct kbase_device *kbdev;
3347 int ret;
3348 int dvfs_period;
3349
3350 kbdev = to_kbase_device(dev);
3351 if (!kbdev)
3352 return -ENODEV;
3353
3354 ret = kstrtoint(buf, 0, &dvfs_period);
3355 if (ret || dvfs_period <= 0) {
3356 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
3357 "Use format <dvfs_period_ms>\n");
3358 return -EINVAL;
3359 }
3360
3361 kbdev->pm.dvfs_period = dvfs_period;
3362 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
3363
3364 return count;
3365 }
3366
3367 /**
3368 * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
3369 * @dev: The device this sysfs file is for.
3370 * @attr: The attributes of the sysfs file.
3371 * @buf: The output buffer to receive the GPU information.
3372 *
3373 * This function is called to get the current period used for the DVFS sample
3374 * timer.
3375 *
3376 * Return: The number of bytes output to @buf.
3377 */
show_dvfs_period(struct device * dev,struct device_attribute * attr,char * const buf)3378 static ssize_t show_dvfs_period(struct device *dev,
3379 struct device_attribute *attr, char * const buf)
3380 {
3381 struct kbase_device *kbdev;
3382 ssize_t ret;
3383
3384 kbdev = to_kbase_device(dev);
3385 if (!kbdev)
3386 return -ENODEV;
3387
3388 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
3389
3390 return ret;
3391 }
3392
3393 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
3394 set_dvfs_period);
3395
3396 /**
3397 * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
3398 * @dev: The device with sysfs file is for
3399 * @attr: The attributes of the sysfs file
3400 * @buf: The value written to the sysfs file
3401 * @count: The number of bytes written to the sysfs file
3402 *
3403 * This function is called when the pm_poweroff sysfs file is written to.
3404 *
3405 * This file contains three values separated by whitespace. The values
3406 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
3407 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
3408 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
3409 * ticks before the GPU is powered off), in that order.
3410 *
3411 * Return: @count if the function succeeded. An error code on failure.
3412 */
set_pm_poweroff(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3413 static ssize_t set_pm_poweroff(struct device *dev,
3414 struct device_attribute *attr, const char *buf, size_t count)
3415 {
3416 struct kbase_device *kbdev;
3417 int items;
3418 s64 gpu_poweroff_time;
3419 int poweroff_shader_ticks, poweroff_gpu_ticks;
3420
3421 kbdev = to_kbase_device(dev);
3422 if (!kbdev)
3423 return -ENODEV;
3424
3425 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
3426 &poweroff_shader_ticks,
3427 &poweroff_gpu_ticks);
3428 if (items != 3) {
3429 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
3430 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
3431 return -EINVAL;
3432 }
3433
3434 kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
3435 kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
3436 kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
3437
3438 return count;
3439 }
3440
3441 /**
3442 * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
3443 * @dev: The device this sysfs file is for.
3444 * @attr: The attributes of the sysfs file.
3445 * @buf: The output buffer to receive the GPU information.
3446 *
3447 * This function is called to get the current period used for the DVFS sample
3448 * timer.
3449 *
3450 * Return: The number of bytes output to @buf.
3451 */
show_pm_poweroff(struct device * dev,struct device_attribute * attr,char * const buf)3452 static ssize_t show_pm_poweroff(struct device *dev,
3453 struct device_attribute *attr, char * const buf)
3454 {
3455 struct kbase_device *kbdev;
3456 ssize_t ret;
3457
3458 kbdev = to_kbase_device(dev);
3459 if (!kbdev)
3460 return -ENODEV;
3461
3462 ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
3463 ktime_to_ns(kbdev->pm.gpu_poweroff_time),
3464 kbdev->pm.poweroff_shader_ticks,
3465 kbdev->pm.poweroff_gpu_ticks);
3466
3467 return ret;
3468 }
3469
3470 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
3471 set_pm_poweroff);
3472
3473 /**
3474 * set_reset_timeout - Store callback for the reset_timeout sysfs file.
3475 * @dev: The device with sysfs file is for
3476 * @attr: The attributes of the sysfs file
3477 * @buf: The value written to the sysfs file
3478 * @count: The number of bytes written to the sysfs file
3479 *
3480 * This function is called when the reset_timeout sysfs file is written to. It
3481 * checks the data written, and if valid updates the reset timeout.
3482 *
3483 * Return: @count if the function succeeded. An error code on failure.
3484 */
set_reset_timeout(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3485 static ssize_t set_reset_timeout(struct device *dev,
3486 struct device_attribute *attr, const char *buf, size_t count)
3487 {
3488 struct kbase_device *kbdev;
3489 int ret;
3490 int reset_timeout;
3491
3492 kbdev = to_kbase_device(dev);
3493 if (!kbdev)
3494 return -ENODEV;
3495
3496 ret = kstrtoint(buf, 0, &reset_timeout);
3497 if (ret || reset_timeout <= 0) {
3498 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
3499 "Use format <reset_timeout_ms>\n");
3500 return -EINVAL;
3501 }
3502
3503 kbdev->reset_timeout_ms = reset_timeout;
3504 dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
3505
3506 return count;
3507 }
3508
3509 /**
3510 * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
3511 * @dev: The device this sysfs file is for.
3512 * @attr: The attributes of the sysfs file.
3513 * @buf: The output buffer to receive the GPU information.
3514 *
3515 * This function is called to get the current reset timeout.
3516 *
3517 * Return: The number of bytes output to @buf.
3518 */
show_reset_timeout(struct device * dev,struct device_attribute * attr,char * const buf)3519 static ssize_t show_reset_timeout(struct device *dev,
3520 struct device_attribute *attr, char * const buf)
3521 {
3522 struct kbase_device *kbdev;
3523 ssize_t ret;
3524
3525 kbdev = to_kbase_device(dev);
3526 if (!kbdev)
3527 return -ENODEV;
3528
3529 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
3530
3531 return ret;
3532 }
3533
3534 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
3535 set_reset_timeout);
3536
3537
3538
show_mem_pool_size(struct device * dev,struct device_attribute * attr,char * const buf)3539 static ssize_t show_mem_pool_size(struct device *dev,
3540 struct device_attribute *attr, char * const buf)
3541 {
3542 struct kbase_device *kbdev;
3543 ssize_t ret;
3544
3545 kbdev = to_kbase_device(dev);
3546 if (!kbdev)
3547 return -ENODEV;
3548
3549 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3550 kbase_mem_pool_size(&kbdev->mem_pool));
3551
3552 return ret;
3553 }
3554
set_mem_pool_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3555 static ssize_t set_mem_pool_size(struct device *dev,
3556 struct device_attribute *attr, const char *buf, size_t count)
3557 {
3558 struct kbase_device *kbdev;
3559 size_t new_size;
3560 int err;
3561
3562 kbdev = to_kbase_device(dev);
3563 if (!kbdev)
3564 return -ENODEV;
3565
3566 err = kstrtoul(buf, 0, (unsigned long *)&new_size);
3567 if (err)
3568 return err;
3569
3570 kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
3571
3572 return count;
3573 }
3574
3575 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
3576 set_mem_pool_size);
3577
show_mem_pool_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3578 static ssize_t show_mem_pool_max_size(struct device *dev,
3579 struct device_attribute *attr, char * const buf)
3580 {
3581 struct kbase_device *kbdev;
3582 ssize_t ret;
3583
3584 kbdev = to_kbase_device(dev);
3585 if (!kbdev)
3586 return -ENODEV;
3587
3588 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3589 kbase_mem_pool_max_size(&kbdev->mem_pool));
3590
3591 return ret;
3592 }
3593
set_mem_pool_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3594 static ssize_t set_mem_pool_max_size(struct device *dev,
3595 struct device_attribute *attr, const char *buf, size_t count)
3596 {
3597 struct kbase_device *kbdev;
3598 size_t new_max_size;
3599 int err;
3600
3601 kbdev = to_kbase_device(dev);
3602 if (!kbdev)
3603 return -ENODEV;
3604
3605 err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
3606 if (err)
3607 return -EINVAL;
3608
3609 kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
3610
3611 return count;
3612 }
3613
3614 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
3615 set_mem_pool_max_size);
3616
3617 #ifdef CONFIG_DEBUG_FS
3618
3619 /* Number of entries in serialize_jobs_settings[] */
3620 #define NR_SERIALIZE_JOBS_SETTINGS 5
3621 /* Maximum string length in serialize_jobs_settings[].name */
3622 #define MAX_SERIALIZE_JOBS_NAME_LEN 16
3623
3624 static struct
3625 {
3626 char *name;
3627 u8 setting;
3628 } serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
3629 {"none", 0},
3630 {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
3631 {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
3632 {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
3633 {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
3634 KBASE_SERIALIZE_RESET}
3635 };
3636
3637 /**
3638 * kbasep_serialize_jobs_seq_show - Show callback for the serialize_jobs debugfs
3639 * file
3640 * @sfile: seq_file pointer
3641 * @data: Private callback data
3642 *
3643 * This function is called to get the contents of the serialize_jobs debugfs
3644 * file. This is a list of the available settings with the currently active one
3645 * surrounded by square brackets.
3646 *
3647 * Return: 0 on success, or an error code on error
3648 */
kbasep_serialize_jobs_seq_show(struct seq_file * sfile,void * data)3649 static int kbasep_serialize_jobs_seq_show(struct seq_file *sfile, void *data)
3650 {
3651 struct kbase_device *kbdev = sfile->private;
3652 int i;
3653
3654 CSTD_UNUSED(data);
3655
3656 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
3657 if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
3658 seq_printf(sfile, "[%s] ",
3659 serialize_jobs_settings[i].name);
3660 else
3661 seq_printf(sfile, "%s ",
3662 serialize_jobs_settings[i].name);
3663 }
3664
3665 seq_puts(sfile, "\n");
3666
3667 return 0;
3668 }
3669
3670 /**
3671 * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
3672 * debugfs file.
3673 * @file: File pointer
3674 * @ubuf: User buffer containing data to store
3675 * @count: Number of bytes in user buffer
3676 * @ppos: File position
3677 *
3678 * This function is called when the serialize_jobs debugfs file is written to.
3679 * It matches the requested setting against the available settings and if a
3680 * matching setting is found updates kbdev->serialize_jobs.
3681 *
3682 * Return: @count if the function succeeded. An error code on failure.
3683 */
kbasep_serialize_jobs_debugfs_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)3684 static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
3685 const char __user *ubuf, size_t count, loff_t *ppos)
3686 {
3687 struct seq_file *s = file->private_data;
3688 struct kbase_device *kbdev = s->private;
3689 char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
3690 int i;
3691 bool valid = false;
3692
3693 CSTD_UNUSED(ppos);
3694
3695 count = min_t(size_t, sizeof(buf) - 1, count);
3696 if (copy_from_user(buf, ubuf, count))
3697 return -EFAULT;
3698
3699 buf[count] = 0;
3700
3701 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
3702 if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
3703 kbdev->serialize_jobs =
3704 serialize_jobs_settings[i].setting;
3705 valid = true;
3706 break;
3707 }
3708 }
3709
3710 if (!valid) {
3711 dev_err(kbdev->dev, "serialize_jobs: invalid setting\n");
3712 return -EINVAL;
3713 }
3714
3715 return count;
3716 }
3717
3718 /**
3719 * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
3720 * debugfs file
3721 * @in: inode pointer
3722 * @file: file pointer
3723 *
3724 * Return: Zero on success, error code on failure
3725 */
kbasep_serialize_jobs_debugfs_open(struct inode * in,struct file * file)3726 static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
3727 struct file *file)
3728 {
3729 return single_open(file, kbasep_serialize_jobs_seq_show, in->i_private);
3730 }
3731
3732 static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
3733 .open = kbasep_serialize_jobs_debugfs_open,
3734 .read = seq_read,
3735 .write = kbasep_serialize_jobs_debugfs_write,
3736 .llseek = seq_lseek,
3737 .release = single_release,
3738 };
3739
3740 #endif /* CONFIG_DEBUG_FS */
3741
kbasep_protected_mode_init(struct kbase_device * kbdev)3742 static int kbasep_protected_mode_init(struct kbase_device *kbdev)
3743 {
3744 #ifdef CONFIG_OF
3745 struct device_node *protected_node;
3746 struct platform_device *pdev;
3747 struct protected_mode_device *protected_dev;
3748 #endif
3749
3750 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
3751 /* Use native protected ops */
3752 kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
3753 GFP_KERNEL);
3754 if (!kbdev->protected_dev)
3755 return -ENOMEM;
3756 kbdev->protected_dev->data = kbdev;
3757 kbdev->protected_ops = &kbase_native_protected_ops;
3758 kbdev->protected_mode_support = true;
3759 return 0;
3760 }
3761
3762 kbdev->protected_mode_support = false;
3763
3764 #ifdef CONFIG_OF
3765 protected_node = of_parse_phandle(kbdev->dev->of_node,
3766 "protected-mode-switcher", 0);
3767
3768 if (!protected_node)
3769 protected_node = of_parse_phandle(kbdev->dev->of_node,
3770 "secure-mode-switcher", 0);
3771
3772 if (!protected_node) {
3773 /* If protected_node cannot be looked up then we assume
3774 * protected mode is not supported on this platform. */
3775 dev_info(kbdev->dev, "Protected mode not available\n");
3776 return 0;
3777 }
3778
3779 pdev = of_find_device_by_node(protected_node);
3780 if (!pdev)
3781 return -EINVAL;
3782
3783 protected_dev = platform_get_drvdata(pdev);
3784 if (!protected_dev)
3785 return -EPROBE_DEFER;
3786
3787 kbdev->protected_ops = &protected_dev->ops;
3788 kbdev->protected_dev = protected_dev;
3789
3790 if (kbdev->protected_ops) {
3791 int err;
3792
3793 /* Make sure protected mode is disabled on startup */
3794 mutex_lock(&kbdev->pm.lock);
3795 err = kbdev->protected_ops->protected_mode_disable(
3796 kbdev->protected_dev);
3797 mutex_unlock(&kbdev->pm.lock);
3798
3799 /* protected_mode_disable() returns -EINVAL if not supported */
3800 kbdev->protected_mode_support = (err != -EINVAL);
3801 }
3802 #endif
3803 return 0;
3804 }
3805
kbasep_protected_mode_term(struct kbase_device * kbdev)3806 static void kbasep_protected_mode_term(struct kbase_device *kbdev)
3807 {
3808 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
3809 kfree(kbdev->protected_dev);
3810 }
3811
3812 #ifdef CONFIG_MALI_NO_MALI
kbase_common_reg_map(struct kbase_device * kbdev)3813 static int kbase_common_reg_map(struct kbase_device *kbdev)
3814 {
3815 return 0;
3816 }
kbase_common_reg_unmap(struct kbase_device * const kbdev)3817 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3818 {
3819 }
3820 #else /* CONFIG_MALI_NO_MALI */
kbase_common_reg_map(struct kbase_device * kbdev)3821 static int kbase_common_reg_map(struct kbase_device *kbdev)
3822 {
3823 int err = -ENOMEM;
3824
3825 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3826 dev_err(kbdev->dev, "Register window unavailable\n");
3827 err = -EIO;
3828 goto out_region;
3829 }
3830
3831 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3832 if (!kbdev->reg) {
3833 dev_err(kbdev->dev, "Can't remap register window\n");
3834 err = -EINVAL;
3835 goto out_ioremap;
3836 }
3837
3838 return 0;
3839
3840 out_ioremap:
3841 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3842 out_region:
3843 return err;
3844 }
3845
kbase_common_reg_unmap(struct kbase_device * const kbdev)3846 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3847 {
3848 if (kbdev->reg) {
3849 iounmap(kbdev->reg);
3850 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3851 kbdev->reg = NULL;
3852 kbdev->reg_start = 0;
3853 kbdev->reg_size = 0;
3854 }
3855 }
3856 #endif /* CONFIG_MALI_NO_MALI */
3857
registers_map(struct kbase_device * const kbdev)3858 static int registers_map(struct kbase_device * const kbdev)
3859 {
3860
3861 /* the first memory resource is the physical address of the GPU
3862 * registers */
3863 struct platform_device *pdev = to_platform_device(kbdev->dev);
3864 struct resource *reg_res;
3865 int err;
3866
3867 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3868 if (!reg_res) {
3869 dev_err(kbdev->dev, "Invalid register resource\n");
3870 return -ENOENT;
3871 }
3872
3873 kbdev->reg_start = reg_res->start;
3874 kbdev->reg_size = resource_size(reg_res);
3875
3876 err = kbase_common_reg_map(kbdev);
3877 if (err) {
3878 dev_err(kbdev->dev, "Failed to map registers\n");
3879 return err;
3880 }
3881
3882 return 0;
3883 }
3884
registers_unmap(struct kbase_device * kbdev)3885 static void registers_unmap(struct kbase_device *kbdev)
3886 {
3887 kbase_common_reg_unmap(kbdev);
3888 }
3889
power_control_init(struct platform_device * pdev)3890 static int power_control_init(struct platform_device *pdev)
3891 {
3892 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3893 int err = 0;
3894
3895 if (!kbdev)
3896 return -ENODEV;
3897
3898 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3899 && defined(CONFIG_REGULATOR)
3900 kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3901 if (IS_ERR_OR_NULL(kbdev->regulator)) {
3902 err = PTR_ERR(kbdev->regulator);
3903 kbdev->regulator = NULL;
3904 if (err == -EPROBE_DEFER) {
3905 dev_err(&pdev->dev, "Failed to get regulator\n");
3906 return err;
3907 }
3908 dev_info(kbdev->dev,
3909 "Continuing without Mali regulator control\n");
3910 /* Allow probe to continue without regulator */
3911 }
3912 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3913
3914 kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3915 if (IS_ERR_OR_NULL(kbdev->clock)) {
3916 err = PTR_ERR(kbdev->clock);
3917 kbdev->clock = NULL;
3918 if (err == -EPROBE_DEFER) {
3919 dev_err(&pdev->dev, "Failed to get clock\n");
3920 goto fail;
3921 }
3922 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3923 /* Allow probe to continue without clock. */
3924 } else {
3925 err = clk_prepare(kbdev->clock);
3926 if (err) {
3927 dev_err(kbdev->dev,
3928 "Failed to prepare and enable clock (%d)\n",
3929 err);
3930 goto fail;
3931 }
3932 }
3933
3934 err = kbase_platform_rk_init_opp_table(kbdev);
3935 if (err)
3936 dev_err(kbdev->dev, "Failed to init_opp_table (%d)\n", err);
3937
3938 return 0;
3939
3940 fail:
3941
3942 if (kbdev->clock != NULL) {
3943 clk_put(kbdev->clock);
3944 kbdev->clock = NULL;
3945 }
3946
3947 #ifdef CONFIG_REGULATOR
3948 if (NULL != kbdev->regulator) {
3949 regulator_put(kbdev->regulator);
3950 kbdev->regulator = NULL;
3951 }
3952 #endif
3953
3954 return err;
3955 }
3956
power_control_term(struct kbase_device * kbdev)3957 static void power_control_term(struct kbase_device *kbdev)
3958 {
3959 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
3960 defined(LSK_OPPV2_BACKPORT)
3961 dev_pm_opp_of_remove_table(kbdev->dev);
3962 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3963 of_free_opp_table(kbdev->dev);
3964 #endif
3965
3966 if (kbdev->clock) {
3967 clk_unprepare(kbdev->clock);
3968 clk_put(kbdev->clock);
3969 kbdev->clock = NULL;
3970 }
3971
3972 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3973 && defined(CONFIG_REGULATOR)
3974 if (kbdev->regulator) {
3975 regulator_put(kbdev->regulator);
3976 kbdev->regulator = NULL;
3977 }
3978 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3979 }
3980
3981 #ifdef CONFIG_DEBUG_FS
3982
3983 #if KBASE_GPU_RESET_EN
3984 #include <mali_kbase_hwaccess_jm.h>
3985
trigger_quirks_reload(struct kbase_device * kbdev)3986 static void trigger_quirks_reload(struct kbase_device *kbdev)
3987 {
3988 kbase_pm_context_active(kbdev);
3989 if (kbase_prepare_to_reset_gpu(kbdev))
3990 kbase_reset_gpu(kbdev);
3991 kbase_pm_context_idle(kbdev);
3992 }
3993
3994 #define MAKE_QUIRK_ACCESSORS(type) \
3995 static int type##_quirks_set(void *data, u64 val) \
3996 { \
3997 struct kbase_device *kbdev; \
3998 kbdev = (struct kbase_device *)data; \
3999 kbdev->hw_quirks_##type = (u32)val; \
4000 trigger_quirks_reload(kbdev); \
4001 return 0;\
4002 } \
4003 \
4004 static int type##_quirks_get(void *data, u64 *val) \
4005 { \
4006 struct kbase_device *kbdev;\
4007 kbdev = (struct kbase_device *)data;\
4008 *val = kbdev->hw_quirks_##type;\
4009 return 0;\
4010 } \
4011 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
4012 type##_quirks_set, "%llu\n")
4013
4014 MAKE_QUIRK_ACCESSORS(sc);
4015 MAKE_QUIRK_ACCESSORS(tiler);
4016 MAKE_QUIRK_ACCESSORS(mmu);
4017 MAKE_QUIRK_ACCESSORS(jm);
4018
4019 #endif /* KBASE_GPU_RESET_EN */
4020
4021 /**
4022 * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
4023 * @file: File object to read is for
4024 * @buf: User buffer to populate with data
4025 * @len: Length of user buffer
4026 * @ppos: Offset within file object
4027 *
4028 * Retrieves the current status of protected debug mode
4029 * (0 = disabled, 1 = enabled)
4030 *
4031 * Return: Number of bytes added to user buffer
4032 */
debugfs_protected_debug_mode_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)4033 static ssize_t debugfs_protected_debug_mode_read(struct file *file,
4034 char __user *buf, size_t len, loff_t *ppos)
4035 {
4036 struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
4037 u32 gpu_status;
4038 ssize_t ret_val;
4039
4040 kbase_pm_context_active(kbdev);
4041 gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL);
4042 kbase_pm_context_idle(kbdev);
4043
4044 if (gpu_status & GPU_DBGEN)
4045 ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
4046 else
4047 ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
4048
4049 return ret_val;
4050 }
4051
4052 /*
4053 * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
4054 *
4055 * Contains the file operations for the "protected_debug_mode" debugfs file
4056 */
4057 static const struct file_operations fops_protected_debug_mode = {
4058 .open = simple_open,
4059 .read = debugfs_protected_debug_mode_read,
4060 .llseek = default_llseek,
4061 };
4062
kbase_device_debugfs_init(struct kbase_device * kbdev)4063 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
4064 {
4065 struct dentry *debugfs_ctx_defaults_directory;
4066 int err;
4067
4068 kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
4069 NULL);
4070 if (!kbdev->mali_debugfs_directory) {
4071 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
4072 err = -ENOMEM;
4073 goto out;
4074 }
4075
4076 kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
4077 kbdev->mali_debugfs_directory);
4078 if (!kbdev->debugfs_ctx_directory) {
4079 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
4080 err = -ENOMEM;
4081 goto out;
4082 }
4083
4084 debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
4085 kbdev->debugfs_ctx_directory);
4086 if (!debugfs_ctx_defaults_directory) {
4087 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
4088 err = -ENOMEM;
4089 goto out;
4090 }
4091
4092 #if !MALI_CUSTOMER_RELEASE
4093 kbasep_regs_dump_debugfs_init(kbdev);
4094 #endif /* !MALI_CUSTOMER_RELEASE */
4095 kbasep_regs_history_debugfs_init(kbdev);
4096
4097 kbase_debug_job_fault_debugfs_init(kbdev);
4098 kbasep_gpu_memory_debugfs_init(kbdev);
4099 kbase_as_fault_debugfs_init(kbdev);
4100 #if KBASE_GPU_RESET_EN
4101 /* fops_* variables created by invocations of macro
4102 * MAKE_QUIRK_ACCESSORS() above. */
4103 debugfs_create_file("quirks_sc", 0644,
4104 kbdev->mali_debugfs_directory, kbdev,
4105 &fops_sc_quirks);
4106 debugfs_create_file("quirks_tiler", 0644,
4107 kbdev->mali_debugfs_directory, kbdev,
4108 &fops_tiler_quirks);
4109 debugfs_create_file("quirks_mmu", 0644,
4110 kbdev->mali_debugfs_directory, kbdev,
4111 &fops_mmu_quirks);
4112 debugfs_create_file("quirks_jm", 0644,
4113 kbdev->mali_debugfs_directory, kbdev,
4114 &fops_jm_quirks);
4115 #endif /* KBASE_GPU_RESET_EN */
4116
4117 #ifndef CONFIG_MALI_COH_USER
4118 debugfs_create_bool("infinite_cache", 0644,
4119 debugfs_ctx_defaults_directory,
4120 (bool*)&(kbdev->infinite_cache_active_default));
4121 #endif /* CONFIG_MALI_COH_USER */
4122
4123 debugfs_create_size_t("mem_pool_max_size", 0644,
4124 debugfs_ctx_defaults_directory,
4125 &kbdev->mem_pool_max_size_default);
4126
4127 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
4128 debugfs_create_file("protected_debug_mode", S_IRUGO,
4129 kbdev->mali_debugfs_directory, kbdev,
4130 &fops_protected_debug_mode);
4131 }
4132
4133 #if KBASE_TRACE_ENABLE
4134 kbasep_trace_debugfs_init(kbdev);
4135 #endif /* KBASE_TRACE_ENABLE */
4136
4137 #ifdef CONFIG_MALI_TRACE_TIMELINE
4138 kbasep_trace_timeline_debugfs_init(kbdev);
4139 #endif /* CONFIG_MALI_TRACE_TIMELINE */
4140
4141 #ifdef CONFIG_MALI_DEVFREQ
4142 #ifdef CONFIG_DEVFREQ_THERMAL
4143 if (kbdev->inited_subsys & inited_devfreq)
4144 kbase_ipa_debugfs_init(kbdev);
4145 #endif /* CONFIG_DEVFREQ_THERMAL */
4146 #endif /* CONFIG_MALI_DEVFREQ */
4147
4148 #ifdef CONFIG_DEBUG_FS
4149 debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR,
4150 kbdev->mali_debugfs_directory, kbdev,
4151 &kbasep_serialize_jobs_debugfs_fops);
4152 #endif /* CONFIG_DEBUG_FS */
4153
4154 return 0;
4155
4156 out:
4157 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
4158 return err;
4159 }
4160
kbase_device_debugfs_term(struct kbase_device * kbdev)4161 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
4162 {
4163 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
4164 }
4165
4166 #else /* CONFIG_DEBUG_FS */
kbase_device_debugfs_init(struct kbase_device * kbdev)4167 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
4168 {
4169 return 0;
4170 }
4171
kbase_device_debugfs_term(struct kbase_device * kbdev)4172 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
4173 #endif /* CONFIG_DEBUG_FS */
4174
kbase_device_coherency_init(struct kbase_device * kbdev,unsigned prod_id)4175 static void kbase_device_coherency_init(struct kbase_device *kbdev,
4176 unsigned prod_id)
4177 {
4178 #ifdef CONFIG_OF
4179 u32 supported_coherency_bitmap =
4180 kbdev->gpu_props.props.raw_props.coherency_mode;
4181 const void *coherency_override_dts;
4182 u32 override_coherency;
4183
4184 /* Only for tMIx :
4185 * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
4186 * documented for tMIx so force correct value here.
4187 */
4188 if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
4189 (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
4190 GPU_ID2_PRODUCT_TMIX))
4191 if (supported_coherency_bitmap ==
4192 COHERENCY_FEATURE_BIT(COHERENCY_ACE))
4193 supported_coherency_bitmap |=
4194 COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
4195
4196 #endif /* CONFIG_OF */
4197
4198 kbdev->system_coherency = COHERENCY_NONE;
4199
4200 /* device tree may override the coherency */
4201 #ifdef CONFIG_OF
4202 coherency_override_dts = of_get_property(kbdev->dev->of_node,
4203 "system-coherency",
4204 NULL);
4205 if (coherency_override_dts) {
4206
4207 override_coherency = be32_to_cpup(coherency_override_dts);
4208
4209 if ((override_coherency <= COHERENCY_NONE) &&
4210 (supported_coherency_bitmap &
4211 COHERENCY_FEATURE_BIT(override_coherency))) {
4212
4213 kbdev->system_coherency = override_coherency;
4214
4215 dev_info(kbdev->dev,
4216 "Using coherency mode %u set from dtb",
4217 override_coherency);
4218 } else
4219 dev_warn(kbdev->dev,
4220 "Ignoring unsupported coherency mode %u set from dtb",
4221 override_coherency);
4222 }
4223
4224 #endif /* CONFIG_OF */
4225
4226 kbdev->gpu_props.props.raw_props.coherency_mode =
4227 kbdev->system_coherency;
4228 }
4229
4230 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
4231
4232 /* Callback used by the kbase bus logger client, to initiate a GPU reset
4233 * when the bus log is restarted. GPU reset is used as reference point
4234 * in HW bus log analyses.
4235 */
kbase_logging_started_cb(void * data)4236 static void kbase_logging_started_cb(void *data)
4237 {
4238 struct kbase_device *kbdev = (struct kbase_device *)data;
4239
4240 if (kbase_prepare_to_reset_gpu(kbdev))
4241 kbase_reset_gpu(kbdev);
4242 dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
4243 }
4244 #endif
4245
4246 static struct attribute *kbase_attrs[] = {
4247 #ifdef CONFIG_MALI_DEBUG
4248 &dev_attr_debug_command.attr,
4249 &dev_attr_js_softstop_always.attr,
4250 #endif
4251 #if !MALI_CUSTOMER_RELEASE
4252 &dev_attr_force_replay.attr,
4253 #endif
4254 &dev_attr_js_timeouts.attr,
4255 &dev_attr_soft_job_timeout.attr,
4256 &dev_attr_gpuinfo.attr,
4257 &dev_attr_dvfs_period.attr,
4258 &dev_attr_pm_poweroff.attr,
4259 &dev_attr_reset_timeout.attr,
4260 &dev_attr_js_scheduling_period.attr,
4261 &dev_attr_power_policy.attr,
4262 &dev_attr_core_availability_policy.attr,
4263 &dev_attr_core_mask.attr,
4264 &dev_attr_mem_pool_size.attr,
4265 &dev_attr_mem_pool_max_size.attr,
4266 NULL
4267 };
4268
4269 static const struct attribute_group kbase_attr_group = {
4270 .attrs = kbase_attrs,
4271 };
4272
kbase_platform_device_remove(struct platform_device * pdev)4273 static int kbase_platform_device_remove(struct platform_device *pdev)
4274 {
4275 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
4276 const struct list_head *dev_list;
4277
4278 if (!kbdev)
4279 return -ENODEV;
4280
4281 kfree(kbdev->gpu_props.prop_buffer);
4282
4283 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
4284 if (kbdev->inited_subsys & inited_buslogger) {
4285 bl_core_client_unregister(kbdev->buslogger);
4286 kbdev->inited_subsys &= ~inited_buslogger;
4287 }
4288 #endif
4289
4290
4291 if (kbdev->inited_subsys & inited_dev_list) {
4292 dev_list = kbase_dev_list_get();
4293 list_del(&kbdev->entry);
4294 kbase_dev_list_put(dev_list);
4295 kbdev->inited_subsys &= ~inited_dev_list;
4296 }
4297
4298 if (kbdev->inited_subsys & inited_misc_register) {
4299 misc_deregister(&kbdev->mdev);
4300 kbdev->inited_subsys &= ~inited_misc_register;
4301 }
4302
4303 if (kbdev->inited_subsys & inited_sysfs_group) {
4304 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
4305 kbdev->inited_subsys &= ~inited_sysfs_group;
4306 }
4307
4308 if (kbdev->inited_subsys & inited_get_device) {
4309 put_device(kbdev->dev);
4310 kbdev->inited_subsys &= ~inited_get_device;
4311 }
4312
4313 if (kbdev->inited_subsys & inited_debugfs) {
4314 kbase_device_debugfs_term(kbdev);
4315 kbdev->inited_subsys &= ~inited_debugfs;
4316 }
4317
4318 if (kbdev->inited_subsys & inited_job_fault) {
4319 kbase_debug_job_fault_dev_term(kbdev);
4320 kbdev->inited_subsys &= ~inited_job_fault;
4321 }
4322 if (kbdev->inited_subsys & inited_vinstr) {
4323 kbase_vinstr_term(kbdev->vinstr_ctx);
4324 kbdev->inited_subsys &= ~inited_vinstr;
4325 }
4326
4327 #ifdef CONFIG_MALI_DEVFREQ
4328 if (kbdev->inited_subsys & inited_devfreq) {
4329 kbase_devfreq_term(kbdev);
4330 kbdev->inited_subsys &= ~inited_devfreq;
4331 }
4332 #endif
4333
4334 if (kbdev->inited_subsys & inited_backend_late) {
4335 kbase_backend_late_term(kbdev);
4336 kbdev->inited_subsys &= ~inited_backend_late;
4337 }
4338
4339 if (kbdev->inited_subsys & inited_tlstream) {
4340 kbase_tlstream_term();
4341 kbdev->inited_subsys &= ~inited_tlstream;
4342 }
4343
4344 /* Bring job and mem sys to a halt before we continue termination */
4345
4346 if (kbdev->inited_subsys & inited_js)
4347 kbasep_js_devdata_halt(kbdev);
4348
4349 if (kbdev->inited_subsys & inited_mem)
4350 kbase_mem_halt(kbdev);
4351
4352 if (kbdev->inited_subsys & inited_protected) {
4353 kbasep_protected_mode_term(kbdev);
4354 kbdev->inited_subsys &= ~inited_protected;
4355 }
4356
4357 if (kbdev->inited_subsys & inited_js) {
4358 kbasep_js_devdata_term(kbdev);
4359 kbdev->inited_subsys &= ~inited_js;
4360 }
4361
4362 if (kbdev->inited_subsys & inited_mem) {
4363 kbase_mem_term(kbdev);
4364 kbdev->inited_subsys &= ~inited_mem;
4365 }
4366
4367 if (kbdev->inited_subsys & inited_pm_runtime_init) {
4368 kbdev->pm.callback_power_runtime_term(kbdev);
4369 kbdev->inited_subsys &= ~inited_pm_runtime_init;
4370 }
4371
4372 if (kbdev->inited_subsys & inited_ctx_sched) {
4373 kbase_ctx_sched_term(kbdev);
4374 kbdev->inited_subsys &= ~inited_ctx_sched;
4375 }
4376
4377 if (kbdev->inited_subsys & inited_device) {
4378 kbase_device_term(kbdev);
4379 kbdev->inited_subsys &= ~inited_device;
4380 }
4381
4382 if (kbdev->inited_subsys & inited_backend_early) {
4383 kbase_backend_early_term(kbdev);
4384 kbdev->inited_subsys &= ~inited_backend_early;
4385 }
4386
4387 if (kbdev->inited_subsys & inited_io_history) {
4388 kbase_io_history_term(&kbdev->io_history);
4389 kbdev->inited_subsys &= ~inited_io_history;
4390 }
4391
4392 if (kbdev->inited_subsys & inited_power_control) {
4393 power_control_term(kbdev);
4394 kbdev->inited_subsys &= ~inited_power_control;
4395 }
4396
4397 if (kbdev->inited_subsys & inited_registers_map) {
4398 registers_unmap(kbdev);
4399 kbdev->inited_subsys &= ~inited_registers_map;
4400 }
4401
4402 #ifdef CONFIG_MALI_NO_MALI
4403 if (kbdev->inited_subsys & inited_gpu_device) {
4404 gpu_device_destroy(kbdev);
4405 kbdev->inited_subsys &= ~inited_gpu_device;
4406 }
4407 #endif /* CONFIG_MALI_NO_MALI */
4408
4409 if (kbdev->inited_subsys != 0)
4410 dev_err(kbdev->dev, "Missing sub system termination\n");
4411
4412 kbase_device_free(kbdev);
4413
4414 return 0;
4415 }
4416
4417 extern void kbase_platform_rk_shutdown(struct kbase_device *kbdev);
kbase_platform_device_shutdown(struct platform_device * pdev)4418 static void kbase_platform_device_shutdown(struct platform_device *pdev)
4419 {
4420 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
4421
4422 kbase_platform_rk_shutdown(kbdev);
4423 }
4424
4425 /* Number of register accesses for the buffer that we allocate during
4426 * initialization time. The buffer size can be changed later via debugfs. */
4427 #define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
4428
kbase_platform_device_probe(struct platform_device * pdev)4429 static int kbase_platform_device_probe(struct platform_device *pdev)
4430 {
4431 struct kbase_device *kbdev;
4432 struct mali_base_gpu_core_props *core_props;
4433 u32 gpu_id;
4434 unsigned prod_id;
4435 const struct list_head *dev_list;
4436 int err = 0;
4437
4438 #ifdef CONFIG_OF
4439 err = kbase_platform_early_init();
4440 if (err) {
4441 dev_err(&pdev->dev, "Early platform initialization failed\n");
4442 kbase_platform_device_remove(pdev);
4443 return err;
4444 }
4445 #endif
4446 kbdev = kbase_device_alloc();
4447 if (!kbdev) {
4448 dev_err(&pdev->dev, "Allocate device failed\n");
4449 kbase_platform_device_remove(pdev);
4450 return -ENOMEM;
4451 }
4452
4453 kbdev->dev = &pdev->dev;
4454 dev_set_drvdata(kbdev->dev, kbdev);
4455
4456 #ifdef CONFIG_MALI_NO_MALI
4457 err = gpu_device_create(kbdev);
4458 if (err) {
4459 dev_err(&pdev->dev, "Dummy model initialization failed\n");
4460 kbase_platform_device_remove(pdev);
4461 return err;
4462 }
4463 kbdev->inited_subsys |= inited_gpu_device;
4464 #endif /* CONFIG_MALI_NO_MALI */
4465
4466 err = assign_irqs(pdev);
4467 if (err) {
4468 dev_err(&pdev->dev, "IRQ search failed\n");
4469 kbase_platform_device_remove(pdev);
4470 return err;
4471 }
4472
4473 err = registers_map(kbdev);
4474 if (err) {
4475 dev_err(&pdev->dev, "Register map failed\n");
4476 kbase_platform_device_remove(pdev);
4477 return err;
4478 }
4479 kbdev->inited_subsys |= inited_registers_map;
4480
4481 err = power_control_init(pdev);
4482 if (err) {
4483 dev_err(&pdev->dev, "Power control initialization failed\n");
4484 kbase_platform_device_remove(pdev);
4485 return err;
4486 }
4487 kbdev->inited_subsys |= inited_power_control;
4488
4489 err = kbase_io_history_init(&kbdev->io_history,
4490 KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
4491 if (err) {
4492 dev_err(&pdev->dev, "Register access history initialization failed\n");
4493 kbase_platform_device_remove(pdev);
4494 return -ENOMEM;
4495 }
4496 kbdev->inited_subsys |= inited_io_history;
4497
4498 err = kbase_backend_early_init(kbdev);
4499 if (err) {
4500 dev_err(kbdev->dev, "Early backend initialization failed\n");
4501 kbase_platform_device_remove(pdev);
4502 return err;
4503 }
4504 kbdev->inited_subsys |= inited_backend_early;
4505
4506 scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
4507 kbase_dev_nr);
4508
4509 kbase_disjoint_init(kbdev);
4510
4511 /* obtain min/max configured gpu frequencies */
4512 core_props = &(kbdev->gpu_props.props.core_props);
4513 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
4514 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
4515
4516 err = kbase_device_init(kbdev);
4517 if (err) {
4518 dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
4519 kbase_platform_device_remove(pdev);
4520 return err;
4521 }
4522 kbdev->inited_subsys |= inited_device;
4523
4524 err = kbase_ctx_sched_init(kbdev);
4525 if (err) {
4526 dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n",
4527 err);
4528 kbase_platform_device_remove(pdev);
4529 return err;
4530 }
4531 kbdev->inited_subsys |= inited_ctx_sched;
4532
4533 if (kbdev->pm.callback_power_runtime_init) {
4534 err = kbdev->pm.callback_power_runtime_init(kbdev);
4535 if (err) {
4536 dev_err(kbdev->dev,
4537 "Runtime PM initialization failed\n");
4538 kbase_platform_device_remove(pdev);
4539 return err;
4540 }
4541 kbdev->inited_subsys |= inited_pm_runtime_init;
4542 }
4543
4544 err = kbase_mem_init(kbdev);
4545 if (err) {
4546 dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
4547 kbase_platform_device_remove(pdev);
4548 return err;
4549 }
4550 kbdev->inited_subsys |= inited_mem;
4551
4552 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
4553 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
4554 prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
4555
4556 kbase_device_coherency_init(kbdev, prod_id);
4557
4558 err = kbasep_protected_mode_init(kbdev);
4559 if (err) {
4560 dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n");
4561 kbase_platform_device_remove(pdev);
4562 return err;
4563 }
4564 kbdev->inited_subsys |= inited_protected;
4565
4566 dev_list = kbase_dev_list_get();
4567 list_add(&kbdev->entry, &kbase_dev_list);
4568 kbase_dev_list_put(dev_list);
4569 kbdev->inited_subsys |= inited_dev_list;
4570
4571 err = kbasep_js_devdata_init(kbdev);
4572 if (err) {
4573 dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
4574 kbase_platform_device_remove(pdev);
4575 return err;
4576 }
4577 kbdev->inited_subsys |= inited_js;
4578
4579 err = kbase_tlstream_init();
4580 if (err) {
4581 dev_err(kbdev->dev, "Timeline stream initialization failed\n");
4582 kbase_platform_device_remove(pdev);
4583 return err;
4584 }
4585 kbdev->inited_subsys |= inited_tlstream;
4586
4587 err = kbase_backend_late_init(kbdev);
4588 if (err) {
4589 dev_err(kbdev->dev, "Late backend initialization failed\n");
4590 kbase_platform_device_remove(pdev);
4591 return err;
4592 }
4593 kbdev->inited_subsys |= inited_backend_late;
4594
4595 #ifdef CONFIG_MALI_DEVFREQ
4596 err = kbase_devfreq_init(kbdev);
4597 if (!err)
4598 kbdev->inited_subsys |= inited_devfreq;
4599 else
4600 dev_err(kbdev->dev, "Continuing without devfreq\n");
4601 #endif /* CONFIG_MALI_DEVFREQ */
4602
4603 kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
4604 if (!kbdev->vinstr_ctx) {
4605 dev_err(kbdev->dev,
4606 "Virtual instrumentation initialization failed\n");
4607 kbase_platform_device_remove(pdev);
4608 return -EINVAL;
4609 }
4610 kbdev->inited_subsys |= inited_vinstr;
4611
4612 err = kbase_debug_job_fault_dev_init(kbdev);
4613 if (err) {
4614 dev_err(kbdev->dev, "Job fault debug initialization failed\n");
4615 kbase_platform_device_remove(pdev);
4616 return err;
4617 }
4618 kbdev->inited_subsys |= inited_job_fault;
4619
4620 err = kbase_device_debugfs_init(kbdev);
4621 if (err) {
4622 dev_err(kbdev->dev, "DebugFS initialization failed");
4623 kbase_platform_device_remove(pdev);
4624 return err;
4625 }
4626 kbdev->inited_subsys |= inited_debugfs;
4627
4628 /* initialize the kctx list */
4629 mutex_init(&kbdev->kctx_list_lock);
4630 INIT_LIST_HEAD(&kbdev->kctx_list);
4631
4632 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
4633 kbdev->mdev.name = kbdev->devname;
4634 kbdev->mdev.fops = &kbase_fops;
4635 kbdev->mdev.parent = get_device(kbdev->dev);
4636 kbdev->inited_subsys |= inited_get_device;
4637
4638 /* This needs to happen before registering the device with misc_register(),
4639 * otherwise it causes a race condition between registering the device and a
4640 * uevent event being generated for userspace, causing udev rules to run
4641 * which might expect certain sysfs attributes present. As a result of the
4642 * race condition we avoid, some Mali sysfs entries may have appeared to
4643 * udev to not exist.
4644
4645 * For more information, see
4646 * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
4647 * paragraph that starts with "Word of warning", currently the second-last
4648 * paragraph.
4649 */
4650 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
4651 if (err) {
4652 dev_err(&pdev->dev, "SysFS group creation failed\n");
4653 kbase_platform_device_remove(pdev);
4654 return err;
4655 }
4656 kbdev->inited_subsys |= inited_sysfs_group;
4657
4658 err = misc_register(&kbdev->mdev);
4659 if (err) {
4660 dev_err(kbdev->dev, "Misc device registration failed for %s\n",
4661 kbdev->devname);
4662 kbase_platform_device_remove(pdev);
4663 return err;
4664 }
4665 kbdev->inited_subsys |= inited_misc_register;
4666
4667
4668 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
4669 err = bl_core_client_register(kbdev->devname,
4670 kbase_logging_started_cb,
4671 kbdev, &kbdev->buslogger,
4672 THIS_MODULE, NULL);
4673 if (err == 0) {
4674 kbdev->inited_subsys |= inited_buslogger;
4675 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
4676 } else {
4677 dev_warn(kbdev->dev, "Bus log client registration failed\n");
4678 err = 0;
4679 }
4680 #endif
4681
4682 err = kbase_gpuprops_populate_user_buffer(kbdev);
4683 if (err) {
4684 dev_err(&pdev->dev, "GPU property population failed");
4685 kbase_platform_device_remove(pdev);
4686 return err;
4687 }
4688
4689 dev_info(kbdev->dev,
4690 "Probed as %s\n", dev_name(kbdev->mdev.this_device));
4691
4692 kbase_dev_nr++;
4693
4694 return err;
4695 }
4696
4697 #undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
4698
4699 /**
4700 * kbase_device_suspend - Suspend callback from the OS.
4701 *
4702 * This is called by Linux when the device should suspend.
4703 *
4704 * @dev: The device to suspend
4705 *
4706 * Return: A standard Linux error code
4707 */
kbase_device_suspend(struct device * dev)4708 static int kbase_device_suspend(struct device *dev)
4709 {
4710 struct kbase_device *kbdev = to_kbase_device(dev);
4711
4712 if (!kbdev)
4713 return -ENODEV;
4714
4715 #if defined(CONFIG_MALI_DEVFREQ) && \
4716 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4717 if (kbdev->inited_subsys & inited_devfreq)
4718 devfreq_suspend_device(kbdev->devfreq);
4719 #endif
4720
4721 kbase_pm_suspend(kbdev);
4722 return 0;
4723 }
4724
4725 /**
4726 * kbase_device_resume - Resume callback from the OS.
4727 *
4728 * This is called by Linux when the device should resume from suspension.
4729 *
4730 * @dev: The device to resume
4731 *
4732 * Return: A standard Linux error code
4733 */
kbase_device_resume(struct device * dev)4734 static int kbase_device_resume(struct device *dev)
4735 {
4736 struct kbase_device *kbdev = to_kbase_device(dev);
4737
4738 if (!kbdev)
4739 return -ENODEV;
4740
4741 kbase_pm_resume(kbdev);
4742
4743 #if defined(CONFIG_MALI_DEVFREQ) && \
4744 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4745 if (kbdev->inited_subsys & inited_devfreq)
4746 devfreq_resume_device(kbdev->devfreq);
4747 #endif
4748 return 0;
4749 }
4750
4751 /**
4752 * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
4753 *
4754 * This is called by Linux when the device should prepare for a condition in
4755 * which it will not be able to communicate with the CPU(s) and RAM due to
4756 * power management.
4757 *
4758 * @dev: The device to suspend
4759 *
4760 * Return: A standard Linux error code
4761 */
4762 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_suspend(struct device * dev)4763 static int kbase_device_runtime_suspend(struct device *dev)
4764 {
4765 struct kbase_device *kbdev = to_kbase_device(dev);
4766
4767 if (!kbdev)
4768 return -ENODEV;
4769
4770 #if defined(CONFIG_MALI_DEVFREQ) && \
4771 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4772 if (kbdev->inited_subsys & inited_devfreq)
4773 devfreq_suspend_device(kbdev->devfreq);
4774 #endif
4775
4776 if (kbdev->pm.backend.callback_power_runtime_off) {
4777 kbdev->pm.backend.callback_power_runtime_off(kbdev);
4778 dev_dbg(dev, "runtime suspend\n");
4779 }
4780 return 0;
4781 }
4782 #endif /* KBASE_PM_RUNTIME */
4783
4784 /**
4785 * kbase_device_runtime_resume - Runtime resume callback from the OS.
4786 *
4787 * This is called by Linux when the device should go into a fully active state.
4788 *
4789 * @dev: The device to suspend
4790 *
4791 * Return: A standard Linux error code
4792 */
4793
4794 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_resume(struct device * dev)4795 static int kbase_device_runtime_resume(struct device *dev)
4796 {
4797 int ret = 0;
4798 struct kbase_device *kbdev = to_kbase_device(dev);
4799
4800 if (!kbdev)
4801 return -ENODEV;
4802
4803 if (kbdev->pm.backend.callback_power_runtime_on) {
4804 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
4805 dev_dbg(dev, "runtime resume\n");
4806 }
4807
4808 #if defined(CONFIG_MALI_DEVFREQ) && \
4809 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4810 if (kbdev->inited_subsys & inited_devfreq)
4811 devfreq_resume_device(kbdev->devfreq);
4812 #endif
4813
4814 return ret;
4815 }
4816 #endif /* KBASE_PM_RUNTIME */
4817
4818
4819 #ifdef KBASE_PM_RUNTIME
4820 /**
4821 * kbase_device_runtime_idle - Runtime idle callback from the OS.
4822 * @dev: The device to suspend
4823 *
4824 * This is called by Linux when the device appears to be inactive and it might
4825 * be placed into a low power state.
4826 *
4827 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
4828 * otherwise a standard Linux error code
4829 */
kbase_device_runtime_idle(struct device * dev)4830 static int kbase_device_runtime_idle(struct device *dev)
4831 {
4832 struct kbase_device *kbdev = to_kbase_device(dev);
4833
4834 if (!kbdev)
4835 return -ENODEV;
4836
4837 /* Use platform specific implementation if it exists. */
4838 if (kbdev->pm.backend.callback_power_runtime_idle)
4839 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
4840
4841 return 0;
4842 }
4843 #endif /* KBASE_PM_RUNTIME */
4844
4845 /* The power management operations for the platform driver.
4846 */
4847 static const struct dev_pm_ops kbase_pm_ops = {
4848 .suspend = kbase_device_suspend,
4849 .resume = kbase_device_resume,
4850 #ifdef KBASE_PM_RUNTIME
4851 .runtime_suspend = kbase_device_runtime_suspend,
4852 .runtime_resume = kbase_device_runtime_resume,
4853 .runtime_idle = kbase_device_runtime_idle,
4854 #endif /* KBASE_PM_RUNTIME */
4855 };
4856
4857 #ifdef CONFIG_OF
4858 static const struct of_device_id kbase_dt_ids[] = {
4859 { .compatible = "arm,malit7xx" },
4860 { .compatible = "arm,mali-midgard" },
4861 { /* sentinel */ }
4862 };
4863 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
4864 #endif
4865
4866 static struct platform_driver kbase_platform_driver = {
4867 .probe = kbase_platform_device_probe,
4868 .remove = kbase_platform_device_remove,
4869 .shutdown = kbase_platform_device_shutdown,
4870 .driver = {
4871 .name = "midgard",
4872 .owner = THIS_MODULE,
4873 .pm = &kbase_pm_ops,
4874 .of_match_table = of_match_ptr(kbase_dt_ids),
4875 },
4876 };
4877
4878 /*
4879 * The driver will not provide a shortcut to create the Mali platform device
4880 * anymore when using Device Tree.
4881 */
4882 #ifdef CONFIG_OF
4883 module_platform_driver(kbase_platform_driver);
4884 #else
4885
rockchip_gpu_init_driver(void)4886 static int __init rockchip_gpu_init_driver(void)
4887 {
4888 return platform_driver_register(&kbase_platform_driver);
4889 }
4890 late_initcall(rockchip_gpu_init_driver);
4891
kbase_driver_init(void)4892 static int __init kbase_driver_init(void)
4893 {
4894 int ret;
4895
4896 ret = kbase_platform_early_init();
4897 if (ret)
4898 return ret;
4899
4900 #ifdef CONFIG_MALI_PLATFORM_FAKE
4901 ret = kbase_platform_fake_register();
4902 if (ret)
4903 return ret;
4904 #endif
4905 ret = platform_driver_register(&kbase_platform_driver);
4906 #ifdef CONFIG_MALI_PLATFORM_FAKE
4907 if (ret)
4908 kbase_platform_fake_unregister();
4909 #endif
4910 return ret;
4911 }
4912
kbase_driver_exit(void)4913 static void __exit kbase_driver_exit(void)
4914 {
4915 platform_driver_unregister(&kbase_platform_driver);
4916 #ifdef CONFIG_MALI_PLATFORM_FAKE
4917 kbase_platform_fake_unregister();
4918 #endif
4919 }
4920
4921 module_init(kbase_driver_init);
4922 module_exit(kbase_driver_exit);
4923
4924 #endif /* CONFIG_OF */
4925
4926 MODULE_LICENSE("GPL");
4927 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4928 __stringify(BASE_UK_VERSION_MAJOR) "." \
4929 __stringify(BASE_UK_VERSION_MINOR) ")");
4930
4931 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4932 #define CREATE_TRACE_POINTS
4933 #endif
4934
4935 #ifdef CONFIG_MALI_GATOR_SUPPORT
4936 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4937 #include "mali_linux_trace.h"
4938
4939 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4940 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4941 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4942 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4943 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4944 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4945 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4946 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4947
kbase_trace_mali_pm_status(u32 event,u64 value)4948 void kbase_trace_mali_pm_status(u32 event, u64 value)
4949 {
4950 trace_mali_pm_status(event, value);
4951 }
4952
kbase_trace_mali_pm_power_off(u32 event,u64 value)4953 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4954 {
4955 trace_mali_pm_power_off(event, value);
4956 }
4957
kbase_trace_mali_pm_power_on(u32 event,u64 value)4958 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4959 {
4960 trace_mali_pm_power_on(event, value);
4961 }
4962
kbase_trace_mali_job_slots_event(u32 event,const struct kbase_context * kctx,u8 atom_id)4963 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4964 {
4965 trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4966 }
4967
kbase_trace_mali_page_fault_insert_pages(int event,u32 value)4968 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4969 {
4970 trace_mali_page_fault_insert_pages(event, value);
4971 }
4972
kbase_trace_mali_mmu_as_in_use(int event)4973 void kbase_trace_mali_mmu_as_in_use(int event)
4974 {
4975 trace_mali_mmu_as_in_use(event);
4976 }
4977
kbase_trace_mali_mmu_as_released(int event)4978 void kbase_trace_mali_mmu_as_released(int event)
4979 {
4980 trace_mali_mmu_as_released(event);
4981 }
4982
kbase_trace_mali_total_alloc_pages_change(long long int event)4983 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4984 {
4985 trace_mali_total_alloc_pages_change(event);
4986 }
4987 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4988 #ifdef CONFIG_MALI_SYSTEM_TRACE
4989 #include "mali_linux_kbase_trace.h"
4990 #endif
4991