xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Base kernel device APIs
24  */
25 
26 #include <linux/debugfs.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/seq_file.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/of_platform.h>
32 #include <linux/types.h>
33 #include <linux/oom.h>
34 
35 #include <mali_kbase.h>
36 #include <mali_kbase_defs.h>
37 #include <mali_kbase_hwaccess_instr.h>
38 #include <mali_kbase_hwaccess_time.h>
39 #include <mali_kbase_hw.h>
40 #include <mali_kbase_config_defaults.h>
41 #include <linux/priority_control_manager.h>
42 
43 #include <tl/mali_kbase_timeline.h>
44 #include "mali_kbase_kinstr_prfcnt.h"
45 #include "mali_kbase_vinstr.h"
46 #include "hwcnt/mali_kbase_hwcnt_context.h"
47 #include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
48 
49 #include "mali_kbase_device.h"
50 #include "mali_kbase_device_internal.h"
51 #include "backend/gpu/mali_kbase_pm_internal.h"
52 #include "backend/gpu/mali_kbase_irq_internal.h"
53 #include "mali_kbase_regs_history_debugfs.h"
54 #include "mali_kbase_pbha.h"
55 
56 #ifdef CONFIG_MALI_ARBITER_SUPPORT
57 #include "arbiter/mali_kbase_arbiter_pm.h"
58 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
59 
60 #if defined(CONFIG_DEBUG_FS) && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
61 
62 /* Number of register accesses for the buffer that we allocate during
63  * initialization time. The buffer size can be changed later via debugfs.
64  */
65 #define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
66 
67 #endif /* defined(CONFIG_DEBUG_FS) && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
68 
69 static DEFINE_MUTEX(kbase_dev_list_lock);
70 static LIST_HEAD(kbase_dev_list);
71 static int kbase_dev_nr;
72 
kbase_device_alloc(void)73 struct kbase_device *kbase_device_alloc(void)
74 {
75 	return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
76 }
77 
78 /**
79  * kbase_device_all_as_init() - Initialise address space objects of the device.
80  *
81  * @kbdev: Pointer to kbase device.
82  *
83  * Return: 0 on success otherwise non-zero.
84  */
kbase_device_all_as_init(struct kbase_device * kbdev)85 static int kbase_device_all_as_init(struct kbase_device *kbdev)
86 {
87 	int i, err = 0;
88 
89 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
90 		err = kbase_mmu_as_init(kbdev, i);
91 		if (err)
92 			break;
93 	}
94 
95 	if (err) {
96 		while (i-- > 0)
97 			kbase_mmu_as_term(kbdev, i);
98 	}
99 
100 	return err;
101 }
102 
kbase_device_all_as_term(struct kbase_device * kbdev)103 static void kbase_device_all_as_term(struct kbase_device *kbdev)
104 {
105 	int i;
106 
107 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
108 		kbase_mmu_as_term(kbdev, i);
109 }
110 
kbase_device_pcm_dev_init(struct kbase_device * const kbdev)111 int kbase_device_pcm_dev_init(struct kbase_device *const kbdev)
112 {
113 	int err = 0;
114 
115 #if IS_ENABLED(CONFIG_OF)
116 	struct device_node *prio_ctrl_node;
117 
118 	/* Check to see whether or not a platform specific priority control manager
119 	 * is available.
120 	 */
121 	prio_ctrl_node = of_parse_phandle(kbdev->dev->of_node,
122 			"priority-control-manager", 0);
123 	if (!prio_ctrl_node) {
124 		dev_info(kbdev->dev,
125 			"No priority control manager is configured");
126 	} else {
127 		struct platform_device *const pdev =
128 			of_find_device_by_node(prio_ctrl_node);
129 
130 		if (!pdev) {
131 			dev_err(kbdev->dev,
132 				"The configured priority control manager was not found");
133 		} else {
134 			struct priority_control_manager_device *pcm_dev =
135 						platform_get_drvdata(pdev);
136 			if (!pcm_dev) {
137 				dev_info(kbdev->dev, "Priority control manager is not ready");
138 				err = -EPROBE_DEFER;
139 			} else if (!try_module_get(pcm_dev->owner)) {
140 				dev_err(kbdev->dev, "Failed to get priority control manager module");
141 				err = -ENODEV;
142 			} else {
143 				dev_info(kbdev->dev, "Priority control manager successfully loaded");
144 				kbdev->pcm_dev = pcm_dev;
145 			}
146 		}
147 		of_node_put(prio_ctrl_node);
148 	}
149 #endif /* CONFIG_OF */
150 
151 	return err;
152 }
153 
kbase_device_pcm_dev_term(struct kbase_device * const kbdev)154 void kbase_device_pcm_dev_term(struct kbase_device *const kbdev)
155 {
156 	if (kbdev->pcm_dev)
157 		module_put(kbdev->pcm_dev->owner);
158 }
159 
160 #define KBASE_PAGES_TO_KIB(pages) (((unsigned int)pages) << (PAGE_SHIFT - 10))
161 
162 /**
163  * mali_oom_notifier_handler - Mali driver out-of-memory handler
164  *
165  * @nb: notifier block - used to retrieve kbdev pointer
166  * @action: action (unused)
167  * @data: data pointer (unused)
168  *
169  * This function simply lists memory usage by the Mali driver, per GPU device,
170  * for diagnostic purposes.
171  *
172  * Return: NOTIFY_OK on success, NOTIFY_BAD otherwise.
173  */
mali_oom_notifier_handler(struct notifier_block * nb,unsigned long action,void * data)174 static int mali_oom_notifier_handler(struct notifier_block *nb,
175 				     unsigned long action, void *data)
176 {
177 	struct kbase_device *kbdev;
178 	struct kbase_context *kctx = NULL;
179 	unsigned long kbdev_alloc_total;
180 
181 	if (WARN_ON(nb == NULL))
182 		return NOTIFY_BAD;
183 
184 	kbdev = container_of(nb, struct kbase_device, oom_notifier_block);
185 
186 	kbdev_alloc_total =
187 		KBASE_PAGES_TO_KIB(atomic_read(&(kbdev->memdev.used_pages)));
188 
189 	dev_err(kbdev->dev, "OOM notifier: dev %s  %lu kB\n", kbdev->devname,
190 		kbdev_alloc_total);
191 
192 	mutex_lock(&kbdev->kctx_list_lock);
193 
194 	list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
195 		struct pid *pid_struct;
196 		struct task_struct *task;
197 		unsigned long task_alloc_total =
198 			KBASE_PAGES_TO_KIB(atomic_read(&(kctx->used_pages)));
199 
200 		rcu_read_lock();
201 		pid_struct = find_get_pid(kctx->pid);
202 		task = pid_task(pid_struct, PIDTYPE_PID);
203 
204 		dev_err(kbdev->dev,
205 			"OOM notifier: tsk %s  tgid (%u)  pid (%u) %lu kB\n",
206 			task ? task->comm : "[null task]", kctx->tgid,
207 			kctx->pid, task_alloc_total);
208 
209 		put_pid(pid_struct);
210 		rcu_read_unlock();
211 	}
212 
213 	mutex_unlock(&kbdev->kctx_list_lock);
214 	return NOTIFY_OK;
215 }
216 
kbase_device_misc_init(struct kbase_device * const kbdev)217 int kbase_device_misc_init(struct kbase_device * const kbdev)
218 {
219 	int err;
220 #if IS_ENABLED(CONFIG_ARM64)
221 	struct device_node *np = NULL;
222 #endif /* CONFIG_ARM64 */
223 
224 	spin_lock_init(&kbdev->mmu_mask_change);
225 	mutex_init(&kbdev->mmu_hw_mutex);
226 #if IS_ENABLED(CONFIG_ARM64)
227 	kbdev->cci_snoop_enabled = false;
228 	np = kbdev->dev->of_node;
229 	if (np != NULL) {
230 		if (of_property_read_u32(np, "snoop_enable_smc",
231 					&kbdev->snoop_enable_smc))
232 			kbdev->snoop_enable_smc = 0;
233 		if (of_property_read_u32(np, "snoop_disable_smc",
234 					&kbdev->snoop_disable_smc))
235 			kbdev->snoop_disable_smc = 0;
236 		/* Either both or none of the calls should be provided. */
237 		if (!((kbdev->snoop_disable_smc == 0
238 			&& kbdev->snoop_enable_smc == 0)
239 			|| (kbdev->snoop_disable_smc != 0
240 			&& kbdev->snoop_enable_smc != 0))) {
241 			WARN_ON(1);
242 			err = -EINVAL;
243 			goto fail;
244 		}
245 	}
246 #endif /* CONFIG_ARM64 */
247 
248 	/* Get the list of workarounds for issues on the current HW
249 	 * (identified by the GPU_ID register)
250 	 */
251 	err = kbase_hw_set_issues_mask(kbdev);
252 	if (err)
253 		goto fail;
254 
255 	/* Set the list of features available on the current HW
256 	 * (identified by the GPU_ID register)
257 	 */
258 	kbase_hw_set_features_mask(kbdev);
259 
260 	err = kbase_gpuprops_set_features(kbdev);
261 	if (err)
262 		goto fail;
263 
264 	/* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
265 	 * device structure was created by device-tree
266 	 */
267 	if (!kbdev->dev->dma_mask)
268 		kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
269 
270 	err = dma_set_mask(kbdev->dev,
271 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
272 	if (err)
273 		goto dma_set_mask_failed;
274 
275 	err = dma_set_coherent_mask(kbdev->dev,
276 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
277 	if (err)
278 		goto dma_set_mask_failed;
279 
280 
281 	/* There is no limit for Mali, so set to max. */
282 	if (kbdev->dev->dma_parms)
283 		err = dma_set_max_seg_size(kbdev->dev, UINT_MAX);
284 	if (err)
285 		goto dma_set_mask_failed;
286 
287 	kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
288 
289 	err = kbase_device_all_as_init(kbdev);
290 	if (err)
291 		goto dma_set_mask_failed;
292 
293 	err = kbase_pbha_read_dtb(kbdev);
294 	if (err)
295 		goto term_as;
296 
297 	init_waitqueue_head(&kbdev->cache_clean_wait);
298 
299 	kbase_debug_assert_register_hook(&kbase_ktrace_hook_wrapper, kbdev);
300 
301 	atomic_set(&kbdev->ctx_num, 0);
302 
303 	kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
304 
305 #if MALI_USE_CSF
306 	kbdev->reset_timeout_ms = kbase_get_timeout_ms(kbdev, CSF_CSG_SUSPEND_TIMEOUT);
307 #else
308 	kbdev->reset_timeout_ms = JM_DEFAULT_RESET_TIMEOUT_MS;
309 #endif /* MALI_USE_CSF */
310 
311 	kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
312 	kbdev->mmu_as_inactive_wait_time_ms =
313 		kbase_get_timeout_ms(kbdev, MMU_AS_INACTIVE_WAIT_TIMEOUT);
314 	mutex_init(&kbdev->kctx_list_lock);
315 	INIT_LIST_HEAD(&kbdev->kctx_list);
316 
317 	dev_dbg(kbdev->dev, "Registering mali_oom_notifier_handlern");
318 	kbdev->oom_notifier_block.notifier_call = mali_oom_notifier_handler;
319 	err = register_oom_notifier(&kbdev->oom_notifier_block);
320 
321 	if (err) {
322 		dev_err(kbdev->dev,
323 			"Unable to register OOM notifier for Mali - but will continue\n");
324 		kbdev->oom_notifier_block.notifier_call = NULL;
325 	}
326 
327 #if !MALI_USE_CSF
328 	spin_lock_init(&kbdev->quick_reset_lock);
329 	kbdev->quick_reset_enabled = true;
330 	kbdev->num_of_atoms_hw_completed = 0;
331 #endif
332 
333 #if MALI_USE_CSF && IS_ENABLED(CONFIG_SYNC_FILE)
334 	atomic_set(&kbdev->live_fence_metadata, 0);
335 #endif
336 	return 0;
337 
338 term_as:
339 	kbase_device_all_as_term(kbdev);
340 dma_set_mask_failed:
341 fail:
342 	return err;
343 }
344 
kbase_device_misc_term(struct kbase_device * kbdev)345 void kbase_device_misc_term(struct kbase_device *kbdev)
346 {
347 	KBASE_DEBUG_ASSERT(kbdev);
348 
349 	WARN_ON(!list_empty(&kbdev->kctx_list));
350 
351 #if KBASE_KTRACE_ENABLE
352 	kbase_debug_assert_register_hook(NULL, NULL);
353 #endif
354 	kbase_device_all_as_term(kbdev);
355 
356 
357 	if (kbdev->oom_notifier_block.notifier_call)
358 		unregister_oom_notifier(&kbdev->oom_notifier_block);
359 
360 #if MALI_USE_CSF && IS_ENABLED(CONFIG_SYNC_FILE)
361 	if (atomic_read(&kbdev->live_fence_metadata) > 0)
362 		dev_warn(kbdev->dev, "Terminating Kbase device with live fence metadata!");
363 #endif
364 }
365 
366 #if !MALI_USE_CSF
kbase_enable_quick_reset(struct kbase_device * kbdev)367 void kbase_enable_quick_reset(struct kbase_device *kbdev)
368 {
369 	spin_lock(&kbdev->quick_reset_lock);
370 
371 	kbdev->quick_reset_enabled = true;
372 	kbdev->num_of_atoms_hw_completed = 0;
373 
374 	spin_unlock(&kbdev->quick_reset_lock);
375 }
376 
kbase_disable_quick_reset(struct kbase_device * kbdev)377 void kbase_disable_quick_reset(struct kbase_device *kbdev)
378 {
379 	spin_lock(&kbdev->quick_reset_lock);
380 
381 	kbdev->quick_reset_enabled = false;
382 	kbdev->num_of_atoms_hw_completed = 0;
383 
384 	spin_unlock(&kbdev->quick_reset_lock);
385 }
386 
kbase_is_quick_reset_enabled(struct kbase_device * kbdev)387 bool kbase_is_quick_reset_enabled(struct kbase_device *kbdev)
388 {
389 	return kbdev->quick_reset_enabled;
390 }
391 #endif
392 
kbase_device_free(struct kbase_device * kbdev)393 void kbase_device_free(struct kbase_device *kbdev)
394 {
395 	kfree(kbdev);
396 }
397 
kbase_device_id_init(struct kbase_device * kbdev)398 void kbase_device_id_init(struct kbase_device *kbdev)
399 {
400 	scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
401 			kbase_dev_nr);
402 	kbdev->id = kbase_dev_nr;
403 }
404 
kbase_increment_device_id(void)405 void kbase_increment_device_id(void)
406 {
407 	kbase_dev_nr++;
408 }
409 
kbase_device_hwcnt_context_init(struct kbase_device * kbdev)410 int kbase_device_hwcnt_context_init(struct kbase_device *kbdev)
411 {
412 	return kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
413 			&kbdev->hwcnt_gpu_ctx);
414 }
415 
kbase_device_hwcnt_context_term(struct kbase_device * kbdev)416 void kbase_device_hwcnt_context_term(struct kbase_device *kbdev)
417 {
418 	kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
419 }
420 
kbase_device_hwcnt_virtualizer_init(struct kbase_device * kbdev)421 int kbase_device_hwcnt_virtualizer_init(struct kbase_device *kbdev)
422 {
423 	return kbase_hwcnt_virtualizer_init(kbdev->hwcnt_gpu_ctx,
424 			KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS,
425 			&kbdev->hwcnt_gpu_virt);
426 }
427 
kbase_device_hwcnt_virtualizer_term(struct kbase_device * kbdev)428 void kbase_device_hwcnt_virtualizer_term(struct kbase_device *kbdev)
429 {
430 	kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
431 }
432 
kbase_device_timeline_init(struct kbase_device * kbdev)433 int kbase_device_timeline_init(struct kbase_device *kbdev)
434 {
435 	atomic_set(&kbdev->timeline_flags, 0);
436 	return kbase_timeline_init(&kbdev->timeline, &kbdev->timeline_flags);
437 }
438 
kbase_device_timeline_term(struct kbase_device * kbdev)439 void kbase_device_timeline_term(struct kbase_device *kbdev)
440 {
441 	kbase_timeline_term(kbdev->timeline);
442 }
443 
kbase_device_vinstr_init(struct kbase_device * kbdev)444 int kbase_device_vinstr_init(struct kbase_device *kbdev)
445 {
446 	return kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
447 }
448 
kbase_device_vinstr_term(struct kbase_device * kbdev)449 void kbase_device_vinstr_term(struct kbase_device *kbdev)
450 {
451 	kbase_vinstr_term(kbdev->vinstr_ctx);
452 }
453 
kbase_device_kinstr_prfcnt_init(struct kbase_device * kbdev)454 int kbase_device_kinstr_prfcnt_init(struct kbase_device *kbdev)
455 {
456 	return kbase_kinstr_prfcnt_init(kbdev->hwcnt_gpu_virt,
457 					&kbdev->kinstr_prfcnt_ctx);
458 }
459 
kbase_device_kinstr_prfcnt_term(struct kbase_device * kbdev)460 void kbase_device_kinstr_prfcnt_term(struct kbase_device *kbdev)
461 {
462 	kbase_kinstr_prfcnt_term(kbdev->kinstr_prfcnt_ctx);
463 }
464 
kbase_device_io_history_init(struct kbase_device * kbdev)465 int kbase_device_io_history_init(struct kbase_device *kbdev)
466 {
467 	return kbase_io_history_init(&kbdev->io_history,
468 			KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
469 }
470 
kbase_device_io_history_term(struct kbase_device * kbdev)471 void kbase_device_io_history_term(struct kbase_device *kbdev)
472 {
473 	kbase_io_history_term(&kbdev->io_history);
474 }
475 
kbase_device_misc_register(struct kbase_device * kbdev)476 int kbase_device_misc_register(struct kbase_device *kbdev)
477 {
478 	return misc_register(&kbdev->mdev);
479 }
480 
kbase_device_misc_deregister(struct kbase_device * kbdev)481 void kbase_device_misc_deregister(struct kbase_device *kbdev)
482 {
483 	misc_deregister(&kbdev->mdev);
484 }
485 
kbase_device_list_init(struct kbase_device * kbdev)486 int kbase_device_list_init(struct kbase_device *kbdev)
487 {
488 	const struct list_head *dev_list;
489 
490 	dev_list = kbase_device_get_list();
491 	list_add(&kbdev->entry, &kbase_dev_list);
492 	kbase_device_put_list(dev_list);
493 
494 	return 0;
495 }
496 
kbase_device_list_term(struct kbase_device * kbdev)497 void kbase_device_list_term(struct kbase_device *kbdev)
498 {
499 	const struct list_head *dev_list;
500 
501 	dev_list = kbase_device_get_list();
502 	list_del(&kbdev->entry);
503 	kbase_device_put_list(dev_list);
504 }
505 
kbase_device_get_list(void)506 const struct list_head *kbase_device_get_list(void)
507 {
508 	mutex_lock(&kbase_dev_list_lock);
509 	return &kbase_dev_list;
510 }
511 KBASE_EXPORT_TEST_API(kbase_device_get_list);
512 
kbase_device_put_list(const struct list_head * dev_list)513 void kbase_device_put_list(const struct list_head *dev_list)
514 {
515 	mutex_unlock(&kbase_dev_list_lock);
516 }
517 KBASE_EXPORT_TEST_API(kbase_device_put_list);
518 
kbase_device_early_init(struct kbase_device * kbdev)519 int kbase_device_early_init(struct kbase_device *kbdev)
520 {
521 	int err;
522 
523 	err = kbase_ktrace_init(kbdev);
524 	if (err)
525 		return err;
526 
527 
528 	err = kbasep_platform_device_init(kbdev);
529 	if (err)
530 		goto ktrace_term;
531 
532 	err = kbase_pm_runtime_init(kbdev);
533 	if (err)
534 		goto fail_runtime_pm;
535 
536 	/* This spinlock is initialized before doing the first access to GPU
537 	 * registers and installing interrupt handlers.
538 	 */
539 	spin_lock_init(&kbdev->hwaccess_lock);
540 
541 	/* Ensure we can access the GPU registers */
542 	kbase_pm_register_access_enable(kbdev);
543 
544 	/*
545 	 * Find out GPU properties based on the GPU feature registers.
546 	 * Note that this does not populate the few properties that depend on
547 	 * hw_features being initialized. Those are set by kbase_gpuprops_set_features
548 	 * soon after this in the init process.
549 	 */
550 	kbase_gpuprops_set(kbdev);
551 
552 	/* We're done accessing the GPU registers for now. */
553 	kbase_pm_register_access_disable(kbdev);
554 
555 #ifdef CONFIG_MALI_ARBITER_SUPPORT
556 	if (kbdev->arb.arb_if)
557 		err = kbase_arbiter_pm_install_interrupts(kbdev);
558 	else
559 		err = kbase_install_interrupts(kbdev);
560 #else
561 	err = kbase_install_interrupts(kbdev);
562 #endif
563 	if (err)
564 		goto fail_interrupts;
565 
566 	return 0;
567 
568 fail_interrupts:
569 	kbase_pm_runtime_term(kbdev);
570 fail_runtime_pm:
571 	kbasep_platform_device_term(kbdev);
572 ktrace_term:
573 	kbase_ktrace_term(kbdev);
574 
575 	return err;
576 }
577 
kbase_device_early_term(struct kbase_device * kbdev)578 void kbase_device_early_term(struct kbase_device *kbdev)
579 {
580 #ifdef CONFIG_MALI_ARBITER_SUPPORT
581 	if (kbdev->arb.arb_if)
582 		kbase_arbiter_pm_release_interrupts(kbdev);
583 	else
584 		kbase_release_interrupts(kbdev);
585 #else
586 	kbase_release_interrupts(kbdev);
587 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
588 	kbase_pm_runtime_term(kbdev);
589 	kbasep_platform_device_term(kbdev);
590 	kbase_ktrace_term(kbdev);
591 }
592 
kbase_device_late_init(struct kbase_device * kbdev)593 int kbase_device_late_init(struct kbase_device *kbdev)
594 {
595 	int err;
596 
597 	err = kbasep_platform_device_late_init(kbdev);
598 
599 	return err;
600 }
601 
kbase_device_late_term(struct kbase_device * kbdev)602 void kbase_device_late_term(struct kbase_device *kbdev)
603 {
604 	kbasep_platform_device_late_term(kbdev);
605 }
606