1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include <mali_kbase_config_defaults.h>
24 #include <gpu/mali_kbase_gpu_regmap.h>
25 #include <mali_kbase_gator.h>
26 #include <mali_kbase_mem_linux.h>
27 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
28 #include <linux/devfreq.h>
29 #include <backend/gpu/mali_kbase_devfreq.h>
30 #if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
31 #include <ipa/mali_kbase_ipa_debugfs.h>
32 #endif /* CONFIG_DEVFREQ_THERMAL */
33 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
34 #include "backend/gpu/mali_kbase_model_linux.h"
35 #include "uapi/gpu/arm/bifrost/mali_kbase_mem_profile_debugfs_buf_size.h"
36 #include "mali_kbase_mem.h"
37 #include "mali_kbase_mem_pool_debugfs.h"
38 #include "mali_kbase_mem_pool_group.h"
39 #include "mali_kbase_debugfs_helper.h"
40 #include "mali_kbase_regs_history_debugfs.h"
41 #include <mali_kbase_hwaccess_backend.h>
42 #include <mali_kbase_hwaccess_time.h>
43 #if !MALI_USE_CSF
44 #include <mali_kbase_hwaccess_jm.h>
45 #endif /* !MALI_USE_CSF */
46 #ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
47 #include <mali_kbase_hwaccess_instr.h>
48 #endif
49 #include <mali_kbase_reset_gpu.h>
50 #include <uapi/gpu/arm/bifrost/mali_kbase_ioctl.h>
51 #if !MALI_USE_CSF
52 #include "mali_kbase_kinstr_jm.h"
53 #endif
54 #include "hwcnt/mali_kbase_hwcnt_context.h"
55 #include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
56 #include "mali_kbase_kinstr_prfcnt.h"
57 #include "mali_kbase_vinstr.h"
58 #if MALI_USE_CSF
59 #include "csf/mali_kbase_csf_firmware.h"
60 #include "csf/mali_kbase_csf_tiler_heap.h"
61 #include "csf/mali_kbase_csf_csg_debugfs.h"
62 #include "csf/mali_kbase_csf_cpu_queue_debugfs.h"
63 #include "csf/mali_kbase_csf_event.h"
64 #endif
65 #ifdef CONFIG_MALI_ARBITER_SUPPORT
66 #include "arbiter/mali_kbase_arbiter_pm.h"
67 #endif
68
69 #include "mali_kbase_cs_experimental.h"
70
71 #ifdef CONFIG_MALI_CINSTR_GWT
72 #include "mali_kbase_gwt.h"
73 #endif
74 #include "backend/gpu/mali_kbase_pm_internal.h"
75 #include "mali_kbase_dvfs_debugfs.h"
76 #if IS_ENABLED(CONFIG_DEBUG_FS)
77 #include "mali_kbase_pbha_debugfs.h"
78 #endif
79
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/poll.h>
83 #include <linux/kernel.h>
84 #include <linux/errno.h>
85 #include <linux/of.h>
86 #include <linux/of_address.h>
87 #include <linux/platform_device.h>
88 #include <linux/of_platform.h>
89 #include <linux/miscdevice.h>
90 #include <linux/list.h>
91 #include <linux/semaphore.h>
92 #include <linux/fs.h>
93 #include <linux/uaccess.h>
94 #include <linux/interrupt.h>
95 #include <linux/irq.h>
96 #include <linux/mm.h>
97 #include <linux/compat.h> /* is_compat_task/in_compat_syscall */
98 #include <linux/mman.h>
99 #include <linux/version.h>
100 #include <linux/version_compat_defs.h>
101 #include <mali_kbase_hw.h>
102 #if IS_ENABLED(CONFIG_SYNC_FILE)
103 #include <mali_kbase_sync.h>
104 #endif /* CONFIG_SYNC_FILE */
105 #include <linux/clk.h>
106 #include <linux/clk-provider.h>
107 #include <linux/delay.h>
108 #include <linux/log2.h>
109
110 #include <mali_kbase_config.h>
111
112 #include <linux/pm_opp.h>
113 #include <soc/rockchip/rockchip_opp_select.h>
114 #include <linux/pm_runtime.h>
115
116 #include <tl/mali_kbase_timeline.h>
117
118 #include <mali_kbase_as_fault_debugfs.h>
119 #include <device/mali_kbase_device.h>
120 #include <context/mali_kbase_context.h>
121
122 #include <mali_kbase_caps.h>
123
124 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
125
126 /**
127 * KBASE_API_VERSION - KBase API Version
128 * @major: Kernel major version
129 * @minor: Kernel minor version
130 */
131 #define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \
132 (((minor) & 0xFFF) << 8) | \
133 ((0 & 0xFF) << 0))
134
135 /**
136 * struct mali_kbase_capability_def - kbase capabilities table
137 *
138 * @required_major: required major
139 * @required_minor: required minor
140 */
141 struct mali_kbase_capability_def {
142 u16 required_major;
143 u16 required_minor;
144 };
145
146 /*
147 * This must be kept in-sync with mali_kbase_cap
148 *
149 * TODO: The alternative approach would be to embed the cap enum values
150 * in the table. Less efficient but potentially safer.
151 */
152 static const struct mali_kbase_capability_def kbase_caps_table[MALI_KBASE_NUM_CAPS] = {
153 #if MALI_USE_CSF
154 { 1, 0 }, /* SYSTEM_MONITOR */
155 { 1, 0 }, /* JIT_PRESSURE_LIMIT */
156 { 1, 0 }, /* MEM_GROW_ON_GPF */
157 { 1, 0 } /* MEM_PROTECTED */
158 #else
159 { 11, 15 }, /* SYSTEM_MONITOR */
160 { 11, 25 }, /* JIT_PRESSURE_LIMIT */
161 { 11, 2 }, /* MEM_GROW_ON_GPF */
162 { 11, 2 } /* MEM_PROTECTED */
163 #endif
164 };
165
166 #if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
167 /* Mutex to synchronize the probe of multiple kbase instances */
168 static struct mutex kbase_probe_mutex;
169 #endif
170
171 #ifndef CONFIG_MALI_BIFROST_DEVFREQ
kbase_devfreq_opp_helper(struct dev_pm_set_opp_data * data)172 static inline int kbase_devfreq_opp_helper(struct dev_pm_set_opp_data *data)
173 {
174 return -EOPNOTSUPP;
175 }
176 #endif
177
178 /**
179 * mali_kbase_supports_cap - Query whether a kbase capability is supported
180 *
181 * @api_version: API version to convert
182 * @cap: Capability to query for - see mali_kbase_caps.h
183 *
184 * Return: true if the capability is supported
185 */
mali_kbase_supports_cap(unsigned long api_version,enum mali_kbase_cap cap)186 bool mali_kbase_supports_cap(unsigned long api_version, enum mali_kbase_cap cap)
187 {
188 bool supported = false;
189 unsigned long required_ver;
190
191 struct mali_kbase_capability_def const *cap_def;
192
193 if (WARN_ON(cap < 0))
194 return false;
195
196 if (WARN_ON(cap >= MALI_KBASE_NUM_CAPS))
197 return false;
198
199 cap_def = &kbase_caps_table[(int)cap];
200 required_ver = KBASE_API_VERSION(cap_def->required_major, cap_def->required_minor);
201 supported = (api_version >= required_ver);
202
203 return supported;
204 }
205
206 /**
207 * kbase_file_new - Create an object representing a device file
208 *
209 * @kbdev: An instance of the GPU platform device, allocated from the probe
210 * method of the driver.
211 * @filp: Pointer to the struct file corresponding to device file
212 * /dev/malixx instance, passed to the file's open method.
213 *
214 * In its initial state, the device file has no context (i.e. no GPU
215 * address space) and no API version number. Both must be assigned before
216 * kbase_file_get_kctx_if_setup_complete() can be used successfully.
217 *
218 * Return: Address of an object representing a simulated device file, or NULL
219 * on failure.
220 */
kbase_file_new(struct kbase_device * const kbdev,struct file * const filp)221 static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
222 struct file *const filp)
223 {
224 struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
225
226 if (kfile) {
227 kfile->kbdev = kbdev;
228 kfile->filp = filp;
229 kfile->kctx = NULL;
230 kfile->api_version = 0;
231 atomic_set(&kfile->setup_state, KBASE_FILE_NEED_VSN);
232 }
233 return kfile;
234 }
235
236 /**
237 * kbase_file_set_api_version - Set the application programmer interface version
238 *
239 * @kfile: A device file created by kbase_file_new()
240 * @major: Major version number (must not exceed 12 bits)
241 * @minor: Major version number (must not exceed 12 bits)
242 *
243 * An application programmer interface (API) version must be specified
244 * before calling kbase_file_create_kctx(), otherwise an error is returned.
245 *
246 * If a version number was already set for the given @kfile (or is in the
247 * process of being set by another thread) then an error is returned.
248 *
249 * Return: 0 if successful, otherwise a negative error code.
250 */
kbase_file_set_api_version(struct kbase_file * const kfile,u16 const major,u16 const minor)251 static int kbase_file_set_api_version(struct kbase_file *const kfile,
252 u16 const major, u16 const minor)
253 {
254 if (WARN_ON(!kfile))
255 return -EINVAL;
256
257 /* setup pending, try to signal that we'll do the setup,
258 * if setup was already in progress, err this call
259 */
260 if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
261 KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN)
262 return -EPERM;
263
264 /* save the proposed version number for later use */
265 kfile->api_version = KBASE_API_VERSION(major, minor);
266
267 atomic_set(&kfile->setup_state, KBASE_FILE_NEED_CTX);
268 return 0;
269 }
270
271 /**
272 * kbase_file_get_api_version - Get the application programmer interface version
273 *
274 * @kfile: A device file created by kbase_file_new()
275 *
276 * Return: The version number (encoded with KBASE_API_VERSION) or 0 if none has
277 * been set.
278 */
kbase_file_get_api_version(struct kbase_file * const kfile)279 static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
280 {
281 if (WARN_ON(!kfile))
282 return 0;
283
284 if (atomic_read(&kfile->setup_state) < KBASE_FILE_NEED_CTX)
285 return 0;
286
287 return kfile->api_version;
288 }
289
290 /**
291 * kbase_file_create_kctx - Create a kernel base context
292 *
293 * @kfile: A device file created by kbase_file_new()
294 * @flags: Flags to set, which can be any combination of
295 * BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
296 *
297 * This creates a new context for the GPU platform device instance that was
298 * specified when kbase_file_new() was called. Each context has its own GPU
299 * address space. If a context was already created for the given @kfile (or is
300 * in the process of being created for it by another thread) then an error is
301 * returned.
302 *
303 * An API version number must have been set by kbase_file_set_api_version()
304 * before calling this function, otherwise an error is returned.
305 *
306 * Return: 0 if a new context was created, otherwise a negative error code.
307 */
308 static int kbase_file_create_kctx(struct kbase_file *kfile,
309 base_context_create_flags flags);
310
311 /**
312 * kbase_file_get_kctx_if_setup_complete - Get a kernel base context
313 * pointer from a device file
314 *
315 * @kfile: A device file created by kbase_file_new()
316 *
317 * This function returns NULL if no context has been created for the given @kfile.
318 * This makes it safe to use in circumstances where the order of initialization
319 * cannot be enforced, but only if the caller checks the return value.
320 *
321 * Return: Address of the kernel base context associated with the @kfile, or
322 * NULL if no context exists.
323 */
kbase_file_get_kctx_if_setup_complete(struct kbase_file * const kfile)324 static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
325 struct kbase_file *const kfile)
326 {
327 if (WARN_ON(!kfile) ||
328 atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
329 WARN_ON(!kfile->kctx))
330 return NULL;
331
332 return kfile->kctx;
333 }
334
335 /**
336 * kbase_file_delete - Destroy an object representing a device file
337 *
338 * @kfile: A device file created by kbase_file_new()
339 *
340 * If any context was created for the @kfile then it is destroyed.
341 */
kbase_file_delete(struct kbase_file * const kfile)342 static void kbase_file_delete(struct kbase_file *const kfile)
343 {
344 struct kbase_device *kbdev = NULL;
345
346 if (WARN_ON(!kfile))
347 return;
348
349 kfile->filp->private_data = NULL;
350 kbdev = kfile->kbdev;
351
352 if (atomic_read(&kfile->setup_state) == KBASE_FILE_COMPLETE) {
353 struct kbase_context *kctx = kfile->kctx;
354
355 #if IS_ENABLED(CONFIG_DEBUG_FS)
356 kbasep_mem_profile_debugfs_remove(kctx);
357 #endif
358 kbase_context_debugfs_term(kctx);
359
360 kbase_destroy_context(kctx);
361
362 dev_dbg(kbdev->dev, "deleted base context\n");
363 }
364
365 kbase_release_device(kbdev);
366
367 kfree(kfile);
368 }
369
kbase_api_handshake(struct kbase_file * kfile,struct kbase_ioctl_version_check * version)370 static int kbase_api_handshake(struct kbase_file *kfile,
371 struct kbase_ioctl_version_check *version)
372 {
373 int err = 0;
374
375 switch (version->major) {
376 case BASE_UK_VERSION_MAJOR:
377 /* set minor to be the lowest common */
378 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
379 (int)version->minor);
380 break;
381 default:
382 /* We return our actual version regardless if it
383 * matches the version returned by userspace -
384 * userspace can bail if it can't handle this
385 * version
386 */
387 version->major = BASE_UK_VERSION_MAJOR;
388 version->minor = BASE_UK_VERSION_MINOR;
389 break;
390 }
391
392 /* save the proposed version number for later use */
393 err = kbase_file_set_api_version(kfile, version->major, version->minor);
394 if (unlikely(err))
395 return err;
396
397 /* For backward compatibility, we may need to create the context before
398 * the flags have been set. Originally it was created on file open
399 * (with job submission disabled) but we don't support that usage.
400 */
401 if (!mali_kbase_supports_system_monitor(kbase_file_get_api_version(kfile)))
402 err = kbase_file_create_kctx(kfile,
403 BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
404
405 return err;
406 }
407
kbase_api_handshake_dummy(struct kbase_file * kfile,struct kbase_ioctl_version_check * version)408 static int kbase_api_handshake_dummy(struct kbase_file *kfile,
409 struct kbase_ioctl_version_check *version)
410 {
411 return -EPERM;
412 }
413
kbase_api_kinstr_prfcnt_enum_info(struct kbase_file * kfile,struct kbase_ioctl_kinstr_prfcnt_enum_info * prfcnt_enum_info)414 static int kbase_api_kinstr_prfcnt_enum_info(
415 struct kbase_file *kfile,
416 struct kbase_ioctl_kinstr_prfcnt_enum_info *prfcnt_enum_info)
417 {
418 return kbase_kinstr_prfcnt_enum_info(kfile->kbdev->kinstr_prfcnt_ctx,
419 prfcnt_enum_info);
420 }
421
kbase_api_kinstr_prfcnt_setup(struct kbase_file * kfile,union kbase_ioctl_kinstr_prfcnt_setup * prfcnt_setup)422 static int kbase_api_kinstr_prfcnt_setup(
423 struct kbase_file *kfile,
424 union kbase_ioctl_kinstr_prfcnt_setup *prfcnt_setup)
425 {
426 return kbase_kinstr_prfcnt_setup(kfile->kbdev->kinstr_prfcnt_ctx,
427 prfcnt_setup);
428 }
429
to_kbase_device(struct device * dev)430 static struct kbase_device *to_kbase_device(struct device *dev)
431 {
432 return dev_get_drvdata(dev);
433 }
434
assign_irqs(struct kbase_device * kbdev)435 int assign_irqs(struct kbase_device *kbdev)
436 {
437 static const char *const irq_names_caps[] = { "JOB", "MMU", "GPU" };
438
439 #if IS_ENABLED(CONFIG_OF)
440 static const char *const irq_names[] = { "job", "mmu", "gpu" };
441 #endif
442
443 struct platform_device *pdev;
444 int i;
445
446 if (!kbdev)
447 return -ENODEV;
448
449 pdev = to_platform_device(kbdev->dev);
450
451 for (i = 0; i < ARRAY_SIZE(irq_names_caps); i++) {
452 int irq;
453
454 #if IS_ENABLED(CONFIG_OF)
455 /* We recommend using Upper case for the irq names in dts, but if
456 * there are devices in the world using Lower case then we should
457 * avoid breaking support for them. So try using names in Upper case
458 * first then try using Lower case names. If both attempts fail then
459 * we assume there is no IRQ resource specified for the GPU.
460 */
461 irq = platform_get_irq_byname(pdev, irq_names_caps[i]);
462 if (irq < 0)
463 irq = platform_get_irq_byname(pdev, irq_names[i]);
464 #else
465 irq = platform_get_irq(pdev, i);
466 #endif /* CONFIG_OF */
467
468 if (irq < 0) {
469 dev_err(kbdev->dev, "No IRQ resource '%s'\n", irq_names_caps[i]);
470 return irq;
471 }
472
473 kbdev->irqs[i].irq = irq;
474 kbdev->irqs[i].flags = irqd_get_trigger_type(irq_get_irq_data(irq));
475 }
476
477 return 0;
478 }
479
480 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
kbase_find_device(int minor)481 struct kbase_device *kbase_find_device(int minor)
482 {
483 struct kbase_device *kbdev = NULL;
484 struct list_head *entry;
485 const struct list_head *dev_list = kbase_device_get_list();
486
487 list_for_each(entry, dev_list) {
488 struct kbase_device *tmp;
489
490 tmp = list_entry(entry, struct kbase_device, entry);
491 if (tmp->mdev.minor == minor || minor == -1) {
492 kbdev = tmp;
493 get_device(kbdev->dev);
494 break;
495 }
496 }
497 kbase_device_put_list(dev_list);
498
499 return kbdev;
500 }
501 EXPORT_SYMBOL(kbase_find_device);
502
kbase_release_device(struct kbase_device * kbdev)503 void kbase_release_device(struct kbase_device *kbdev)
504 {
505 put_device(kbdev->dev);
506 }
507 EXPORT_SYMBOL(kbase_release_device);
508
509 #if IS_ENABLED(CONFIG_DEBUG_FS)
write_ctx_infinite_cache(struct file * f,const char __user * ubuf,size_t size,loff_t * off)510 static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
511 {
512 struct kbase_context *kctx = f->private_data;
513 int err;
514 bool value;
515
516 err = kstrtobool_from_user(ubuf, size, &value);
517 if (err)
518 return err;
519
520 if (value)
521 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
522 else
523 kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
524
525 return size;
526 }
527
read_ctx_infinite_cache(struct file * f,char __user * ubuf,size_t size,loff_t * off)528 static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
529 {
530 struct kbase_context *kctx = f->private_data;
531 char buf[32];
532 int count;
533 bool value;
534
535 value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
536
537 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
538
539 return simple_read_from_buffer(ubuf, size, off, buf, count);
540 }
541
542 static const struct file_operations kbase_infinite_cache_fops = {
543 .owner = THIS_MODULE,
544 .open = simple_open,
545 .write = write_ctx_infinite_cache,
546 .read = read_ctx_infinite_cache,
547 };
548
write_ctx_force_same_va(struct file * f,const char __user * ubuf,size_t size,loff_t * off)549 static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
550 size_t size, loff_t *off)
551 {
552 struct kbase_context *kctx = f->private_data;
553 int err;
554 bool value;
555
556 err = kstrtobool_from_user(ubuf, size, &value);
557 if (err)
558 return err;
559
560 if (value) {
561 #if defined(CONFIG_64BIT)
562 /* 32-bit clients cannot force SAME_VA */
563 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
564 return -EINVAL;
565 kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
566 #else /* defined(CONFIG_64BIT) */
567 /* 32-bit clients cannot force SAME_VA */
568 return -EINVAL;
569 #endif /* defined(CONFIG_64BIT) */
570 } else {
571 kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
572 }
573
574 return size;
575 }
576
read_ctx_force_same_va(struct file * f,char __user * ubuf,size_t size,loff_t * off)577 static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
578 size_t size, loff_t *off)
579 {
580 struct kbase_context *kctx = f->private_data;
581 char buf[32];
582 int count;
583 bool value;
584
585 value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
586
587 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
588
589 return simple_read_from_buffer(ubuf, size, off, buf, count);
590 }
591
592 static const struct file_operations kbase_force_same_va_fops = {
593 .owner = THIS_MODULE,
594 .open = simple_open,
595 .write = write_ctx_force_same_va,
596 .read = read_ctx_force_same_va,
597 };
598 #endif /* CONFIG_DEBUG_FS */
599
kbase_file_create_kctx(struct kbase_file * const kfile,base_context_create_flags const flags)600 static int kbase_file_create_kctx(struct kbase_file *const kfile,
601 base_context_create_flags const flags)
602 {
603 struct kbase_device *kbdev = NULL;
604 struct kbase_context *kctx = NULL;
605 #if IS_ENABLED(CONFIG_DEBUG_FS)
606 char kctx_name[64];
607 #endif
608
609 if (WARN_ON(!kfile))
610 return -EINVAL;
611
612 /* setup pending, try to signal that we'll do the setup,
613 * if setup was already in progress, err this call
614 */
615 if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
616 KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX)
617 return -EPERM;
618
619 kbdev = kfile->kbdev;
620
621 kctx = kbase_create_context(kbdev, in_compat_syscall(),
622 flags, kfile->api_version, kfile->filp);
623
624 /* if bad flags, will stay stuck in setup mode */
625 if (!kctx)
626 return -ENOMEM;
627
628 if (kbdev->infinite_cache_active_default)
629 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
630
631 #if IS_ENABLED(CONFIG_DEBUG_FS)
632 if (unlikely(!scnprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id)))
633 return -ENOMEM;
634
635 mutex_init(&kctx->mem_profile_lock);
636
637 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
638 kbdev->debugfs_ctx_directory);
639
640 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
641 /* we don't treat this as a fail - just warn about it */
642 dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
643 } else {
644 debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
645 kctx, &kbase_infinite_cache_fops);
646 debugfs_create_file("force_same_va", 0600, kctx->kctx_dentry,
647 kctx, &kbase_force_same_va_fops);
648
649 kbase_context_debugfs_init(kctx);
650 }
651 #endif /* CONFIG_DEBUG_FS */
652
653 dev_dbg(kbdev->dev, "created base context\n");
654
655 kfile->kctx = kctx;
656 atomic_set(&kfile->setup_state, KBASE_FILE_COMPLETE);
657
658 return 0;
659 }
660
kbase_open(struct inode * inode,struct file * filp)661 static int kbase_open(struct inode *inode, struct file *filp)
662 {
663 struct kbase_device *kbdev = NULL;
664 struct kbase_file *kfile;
665 int ret = 0;
666
667 kbdev = kbase_find_device(iminor(inode));
668
669 if (!kbdev)
670 return -ENODEV;
671
672 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
673 /* Set address space operations for page migration */
674 kbase_mem_migrate_set_address_space_ops(kbdev, filp);
675 #endif
676
677 /* Device-wide firmware load is moved here from probing to comply with
678 * Android GKI vendor guideline.
679 */
680 ret = kbase_device_firmware_init_once(kbdev);
681 if (ret)
682 goto out;
683
684 kfile = kbase_file_new(kbdev, filp);
685 if (!kfile) {
686 ret = -ENOMEM;
687 goto out;
688 }
689
690 filp->private_data = kfile;
691 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
692
693 return 0;
694
695 out:
696 kbase_release_device(kbdev);
697 return ret;
698 }
699
kbase_release(struct inode * inode,struct file * filp)700 static int kbase_release(struct inode *inode, struct file *filp)
701 {
702 struct kbase_file *const kfile = filp->private_data;
703
704 kbase_file_delete(kfile);
705 return 0;
706 }
707
kbase_api_set_flags(struct kbase_file * kfile,struct kbase_ioctl_set_flags * flags)708 static int kbase_api_set_flags(struct kbase_file *kfile,
709 struct kbase_ioctl_set_flags *flags)
710 {
711 int err = 0;
712 unsigned long const api_version = kbase_file_get_api_version(kfile);
713 struct kbase_context *kctx = NULL;
714
715 /* Validate flags */
716 if (flags->create_flags !=
717 (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
718 return -EINVAL;
719
720 /* For backward compatibility, the context may have been created before
721 * the flags were set.
722 */
723 if (mali_kbase_supports_system_monitor(api_version)) {
724 err = kbase_file_create_kctx(kfile, flags->create_flags);
725 } else {
726 #if !MALI_USE_CSF
727 struct kbasep_js_kctx_info *js_kctx_info = NULL;
728 unsigned long irq_flags = 0;
729 #endif
730
731 /* If setup is incomplete (e.g. because the API version
732 * wasn't set) then we have to give up.
733 */
734 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
735 if (unlikely(!kctx))
736 return -EPERM;
737
738 #if MALI_USE_CSF
739 /* On CSF GPUs Job Manager interface isn't used to submit jobs
740 * (there are no job slots). So the legacy job manager path to
741 * submit jobs needs to remain disabled for CSF GPUs.
742 */
743 #else
744 js_kctx_info = &kctx->jctx.sched_info;
745 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
746 spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
747 /* Translate the flags */
748 if ((flags->create_flags &
749 BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
750 kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
751
752
753 spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
754 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
755 #endif
756 }
757
758 return err;
759 }
760
761 #if !MALI_USE_CSF
kbase_api_job_submit(struct kbase_context * kctx,struct kbase_ioctl_job_submit * submit)762 static int kbase_api_job_submit(struct kbase_context *kctx,
763 struct kbase_ioctl_job_submit *submit)
764 {
765 return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
766 submit->nr_atoms,
767 submit->stride, false);
768 }
769 #endif /* !MALI_USE_CSF */
770
kbase_api_get_gpuprops(struct kbase_file * kfile,struct kbase_ioctl_get_gpuprops * get_props)771 static int kbase_api_get_gpuprops(struct kbase_file *kfile,
772 struct kbase_ioctl_get_gpuprops *get_props)
773 {
774 struct kbase_gpu_props *kprops = &kfile->kbdev->gpu_props;
775 int err;
776
777 if (get_props->flags != 0) {
778 dev_err(kfile->kbdev->dev, "Unsupported flags to get_gpuprops");
779 return -EINVAL;
780 }
781
782 if (get_props->size == 0)
783 return kprops->prop_buffer_size;
784 if (get_props->size < kprops->prop_buffer_size)
785 return -EINVAL;
786
787 err = copy_to_user(u64_to_user_ptr(get_props->buffer),
788 kprops->prop_buffer,
789 kprops->prop_buffer_size);
790 if (err)
791 return -EFAULT;
792 return kprops->prop_buffer_size;
793 }
794
795 #if !MALI_USE_CSF
kbase_api_post_term(struct kbase_context * kctx)796 static int kbase_api_post_term(struct kbase_context *kctx)
797 {
798 kbase_event_close(kctx);
799 return 0;
800 }
801 #endif /* !MALI_USE_CSF */
802
803 #if MALI_USE_CSF
kbase_api_mem_alloc_ex(struct kbase_context * kctx,union kbase_ioctl_mem_alloc_ex * alloc_ex)804 static int kbase_api_mem_alloc_ex(struct kbase_context *kctx,
805 union kbase_ioctl_mem_alloc_ex *alloc_ex)
806 {
807 struct kbase_va_region *reg;
808 u64 flags = alloc_ex->in.flags;
809 u64 gpu_va;
810
811 /* Calls to this function are inherently asynchronous, with respect to
812 * MMU operations.
813 */
814 const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
815
816 bool gpu_executable = (flags & BASE_MEM_PROT_GPU_EX) && kbase_has_exec_va_zone(kctx);
817 bool fixed_or_fixable = (flags & (BASE_MEM_FIXED | BASE_MEM_FIXABLE));
818
819 if (!kbase_mem_allow_alloc(kctx))
820 return -EINVAL;
821
822 /* The driver counts the number of FIXABLE and FIXED allocations because
823 * they're not supposed to happen at the same time. However, that is not
824 * a security concern: nothing bad happens if the two types of allocations
825 * are made at the same time. The only reason why the driver is guarding
826 * against them is because there's no client use case that is supposed
827 * to need both of them at the same time, and the driver wants to help
828 * the user space catch some obvious mistake.
829 *
830 * The driver is able to switch from FIXABLE allocations to FIXED and
831 * vice versa, if all the allocations of one kind are freed before trying
832 * to create allocations of a different kind.
833 */
834 if ((flags & BASE_MEM_FIXED) && (atomic64_read(&kctx->num_fixable_allocs) > 0))
835 return -EINVAL;
836
837 if ((flags & BASE_MEM_FIXABLE) && (atomic64_read(&kctx->num_fixed_allocs) > 0))
838 return -EINVAL;
839
840 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
841 return -ENOMEM;
842
843 /* The fixed_address parameter must be either a non-zero, page-aligned
844 * value for FIXED allocations or zero for any other kind of allocation.
845 */
846 if (flags & BASE_MEM_FIXED) {
847 u64 aligned_fixed_address = alloc_ex->in.fixed_address & PAGE_MASK;
848
849 if ((aligned_fixed_address == 0) ||
850 (aligned_fixed_address != alloc_ex->in.fixed_address))
851 return -EINVAL;
852
853 gpu_va = aligned_fixed_address;
854 } else if (alloc_ex->in.fixed_address != 0) {
855 return -EINVAL;
856 }
857
858 /* For 64-bit clients, force SAME_VA up to 2^(47)-1.
859 * For 32-bit clients, force SAME_VA up to 2^(32)-1.
860 *
861 * In both cases, the executable and fixed/fixable zones, and
862 * the executable+fixed/fixable zone, are all above this range.
863 */
864 if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
865 kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
866 if (!gpu_executable && !fixed_or_fixable)
867 flags |= BASE_MEM_SAME_VA;
868 }
869
870 /* If CSF event memory allocation, need to force certain flags.
871 * SAME_VA - GPU address needs to be used as a CPU address, explicit
872 * mmap has to be avoided.
873 * CACHED_CPU - Frequent access to the event memory by CPU.
874 * COHERENT_SYSTEM - No explicit cache maintenance around the access
875 * to event memory so need to leverage the coherency support.
876 */
877 if (flags & BASE_MEM_CSF_EVENT) {
878 /* We cannot honor this request */
879 if (gpu_executable || fixed_or_fixable)
880 return -ENOMEM;
881
882 flags |= (BASE_MEM_SAME_VA |
883 BASE_MEM_CACHED_CPU |
884 BASE_MEM_COHERENT_SYSTEM);
885 }
886
887 reg = kbase_mem_alloc(kctx, alloc_ex->in.va_pages, alloc_ex->in.commit_pages,
888 alloc_ex->in.extension, &flags, &gpu_va, mmu_sync_info);
889
890 if (!reg)
891 return -ENOMEM;
892
893 alloc_ex->out.flags = flags;
894 alloc_ex->out.gpu_va = gpu_va;
895
896 return 0;
897 }
898
kbase_api_mem_alloc(struct kbase_context * kctx,union kbase_ioctl_mem_alloc * alloc)899 static int kbase_api_mem_alloc(struct kbase_context *kctx, union kbase_ioctl_mem_alloc *alloc)
900 {
901 int ret;
902 union kbase_ioctl_mem_alloc_ex mem_alloc_ex = { { 0 } };
903
904 mem_alloc_ex.in.va_pages = alloc->in.va_pages;
905 mem_alloc_ex.in.commit_pages = alloc->in.commit_pages;
906 mem_alloc_ex.in.extension = alloc->in.extension;
907 mem_alloc_ex.in.flags = alloc->in.flags;
908 mem_alloc_ex.in.fixed_address = 0;
909
910 ret = kbase_api_mem_alloc_ex(kctx, &mem_alloc_ex);
911
912 alloc->out.flags = mem_alloc_ex.out.flags;
913 alloc->out.gpu_va = mem_alloc_ex.out.gpu_va;
914
915 return ret;
916 }
917 #else
kbase_api_mem_alloc(struct kbase_context * kctx,union kbase_ioctl_mem_alloc * alloc)918 static int kbase_api_mem_alloc(struct kbase_context *kctx, union kbase_ioctl_mem_alloc *alloc)
919 {
920 struct kbase_va_region *reg;
921 u64 flags = alloc->in.flags;
922 u64 gpu_va;
923
924 /* Calls to this function are inherently asynchronous, with respect to
925 * MMU operations.
926 */
927 const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
928
929 if (!kbase_mem_allow_alloc(kctx))
930 return -EINVAL;
931
932 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
933 return -ENOMEM;
934
935 /* Force SAME_VA if a 64-bit client.
936 * The only exception is GPU-executable memory if an EXEC_VA zone
937 * has been initialized. In that case, GPU-executable memory may
938 * or may not be SAME_VA.
939 */
940 if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) && kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
941 if (!(flags & BASE_MEM_PROT_GPU_EX) || !kbase_has_exec_va_zone(kctx))
942 flags |= BASE_MEM_SAME_VA;
943 }
944
945 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages, alloc->in.extension,
946 &flags, &gpu_va, mmu_sync_info);
947
948 if (!reg)
949 return -ENOMEM;
950
951 alloc->out.flags = flags;
952 alloc->out.gpu_va = gpu_va;
953
954 return 0;
955 }
956 #endif /* MALI_USE_CSF */
957
kbase_api_mem_query(struct kbase_context * kctx,union kbase_ioctl_mem_query * query)958 static int kbase_api_mem_query(struct kbase_context *kctx,
959 union kbase_ioctl_mem_query *query)
960 {
961 return kbase_mem_query(kctx, query->in.gpu_addr,
962 query->in.query, &query->out.value);
963 }
964
kbase_api_mem_free(struct kbase_context * kctx,struct kbase_ioctl_mem_free * free)965 static int kbase_api_mem_free(struct kbase_context *kctx,
966 struct kbase_ioctl_mem_free *free)
967 {
968 return kbase_mem_free(kctx, free->gpu_addr);
969 }
970
971 #if !MALI_USE_CSF
kbase_api_kinstr_jm_fd(struct kbase_context * kctx,union kbase_kinstr_jm_fd * arg)972 static int kbase_api_kinstr_jm_fd(struct kbase_context *kctx,
973 union kbase_kinstr_jm_fd *arg)
974 {
975 return kbase_kinstr_jm_get_fd(kctx->kinstr_jm, arg);
976 }
977 #endif
978
kbase_api_hwcnt_reader_setup(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_reader_setup * setup)979 static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
980 struct kbase_ioctl_hwcnt_reader_setup *setup)
981 {
982 return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
983 }
984
kbase_api_get_cpu_gpu_timeinfo(struct kbase_context * kctx,union kbase_ioctl_get_cpu_gpu_timeinfo * timeinfo)985 static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
986 union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
987 {
988 u32 flags = timeinfo->in.request_flags;
989 struct timespec64 ts = { 0 };
990 u64 timestamp = 0;
991 u64 cycle_cnt = 0;
992
993 kbase_pm_context_active(kctx->kbdev);
994
995 kbase_backend_get_gpu_time(kctx->kbdev,
996 (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
997 (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? ×tamp : NULL,
998 (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
999
1000 if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG)
1001 timeinfo->out.timestamp = timestamp;
1002
1003 if (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
1004 timeinfo->out.cycle_counter = cycle_cnt;
1005
1006 if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
1007 timeinfo->out.sec = ts.tv_sec;
1008 timeinfo->out.nsec = ts.tv_nsec;
1009 }
1010
1011 kbase_pm_context_idle(kctx->kbdev);
1012
1013 return 0;
1014 }
1015
1016 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
kbase_api_hwcnt_set(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_values * values)1017 static int kbase_api_hwcnt_set(struct kbase_context *kctx,
1018 struct kbase_ioctl_hwcnt_values *values)
1019 {
1020 return gpu_model_set_dummy_prfcnt_user_sample(u64_to_user_ptr(values->data), values->size);
1021 }
1022 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
1023
kbase_api_disjoint_query(struct kbase_context * kctx,struct kbase_ioctl_disjoint_query * query)1024 static int kbase_api_disjoint_query(struct kbase_context *kctx,
1025 struct kbase_ioctl_disjoint_query *query)
1026 {
1027 query->counter = kbase_disjoint_event_get(kctx->kbdev);
1028
1029 return 0;
1030 }
1031
kbase_api_get_ddk_version(struct kbase_context * kctx,struct kbase_ioctl_get_ddk_version * version)1032 static int kbase_api_get_ddk_version(struct kbase_context *kctx,
1033 struct kbase_ioctl_get_ddk_version *version)
1034 {
1035 int ret;
1036 int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
1037
1038 if (version->version_buffer == 0)
1039 return len;
1040
1041 if (version->size < len)
1042 return -EOVERFLOW;
1043
1044 ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
1045 KERNEL_SIDE_DDK_VERSION_STRING,
1046 sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
1047
1048 if (ret)
1049 return -EFAULT;
1050
1051 return len;
1052 }
1053
kbase_api_mem_jit_init(struct kbase_context * kctx,struct kbase_ioctl_mem_jit_init * jit_init)1054 static int kbase_api_mem_jit_init(struct kbase_context *kctx,
1055 struct kbase_ioctl_mem_jit_init *jit_init)
1056 {
1057 int i;
1058
1059 for (i = 0; i < sizeof(jit_init->padding); i++) {
1060 /* Ensure all padding bytes are 0 for potential future
1061 * extension
1062 */
1063 if (jit_init->padding[i])
1064 return -EINVAL;
1065 }
1066
1067 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
1068 jit_init->max_allocations, jit_init->trim_level,
1069 jit_init->group_id, jit_init->phys_pages);
1070 }
1071
kbase_api_mem_exec_init(struct kbase_context * kctx,struct kbase_ioctl_mem_exec_init * exec_init)1072 static int kbase_api_mem_exec_init(struct kbase_context *kctx,
1073 struct kbase_ioctl_mem_exec_init *exec_init)
1074 {
1075 return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
1076 }
1077
kbase_api_mem_sync(struct kbase_context * kctx,struct kbase_ioctl_mem_sync * sync)1078 static int kbase_api_mem_sync(struct kbase_context *kctx,
1079 struct kbase_ioctl_mem_sync *sync)
1080 {
1081 struct basep_syncset sset = {
1082 .mem_handle.basep.handle = sync->handle,
1083 .user_addr = sync->user_addr,
1084 .size = sync->size,
1085 .type = sync->type
1086 };
1087
1088 return kbase_sync_now(kctx, &sset);
1089 }
1090
kbase_api_mem_find_cpu_offset(struct kbase_context * kctx,union kbase_ioctl_mem_find_cpu_offset * find)1091 static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
1092 union kbase_ioctl_mem_find_cpu_offset *find)
1093 {
1094 return kbasep_find_enclosing_cpu_mapping_offset(
1095 kctx,
1096 find->in.cpu_addr,
1097 find->in.size,
1098 &find->out.offset);
1099 }
1100
kbase_api_mem_find_gpu_start_and_offset(struct kbase_context * kctx,union kbase_ioctl_mem_find_gpu_start_and_offset * find)1101 static int kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
1102 union kbase_ioctl_mem_find_gpu_start_and_offset *find)
1103 {
1104 return kbasep_find_enclosing_gpu_mapping_start_and_offset(
1105 kctx,
1106 find->in.gpu_addr,
1107 find->in.size,
1108 &find->out.start,
1109 &find->out.offset);
1110 }
1111
kbase_api_get_context_id(struct kbase_context * kctx,struct kbase_ioctl_get_context_id * info)1112 static int kbase_api_get_context_id(struct kbase_context *kctx,
1113 struct kbase_ioctl_get_context_id *info)
1114 {
1115 info->id = kctx->id;
1116
1117 return 0;
1118 }
1119
kbase_api_tlstream_acquire(struct kbase_context * kctx,struct kbase_ioctl_tlstream_acquire * acquire)1120 static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
1121 struct kbase_ioctl_tlstream_acquire *acquire)
1122 {
1123 return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
1124 }
1125
kbase_api_tlstream_flush(struct kbase_context * kctx)1126 static int kbase_api_tlstream_flush(struct kbase_context *kctx)
1127 {
1128 kbase_timeline_streams_flush(kctx->kbdev->timeline);
1129
1130 return 0;
1131 }
1132
kbase_api_mem_commit(struct kbase_context * kctx,struct kbase_ioctl_mem_commit * commit)1133 static int kbase_api_mem_commit(struct kbase_context *kctx,
1134 struct kbase_ioctl_mem_commit *commit)
1135 {
1136 return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
1137 }
1138
kbase_api_mem_alias(struct kbase_context * kctx,union kbase_ioctl_mem_alias * alias)1139 static int kbase_api_mem_alias(struct kbase_context *kctx,
1140 union kbase_ioctl_mem_alias *alias)
1141 {
1142 struct base_mem_aliasing_info *ai;
1143 u64 flags;
1144 int err;
1145
1146 if (alias->in.nents == 0 || alias->in.nents > BASE_MEM_ALIAS_MAX_ENTS)
1147 return -EINVAL;
1148
1149 ai = vmalloc(sizeof(*ai) * alias->in.nents);
1150 if (!ai)
1151 return -ENOMEM;
1152
1153 err = copy_from_user(ai,
1154 u64_to_user_ptr(alias->in.aliasing_info),
1155 sizeof(*ai) * alias->in.nents);
1156 if (err) {
1157 vfree(ai);
1158 return -EFAULT;
1159 }
1160
1161 flags = alias->in.flags;
1162 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
1163 vfree(ai);
1164 return -EINVAL;
1165 }
1166
1167 alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
1168 alias->in.stride, alias->in.nents,
1169 ai, &alias->out.va_pages);
1170
1171 alias->out.flags = flags;
1172
1173 vfree(ai);
1174
1175 if (alias->out.gpu_va == 0)
1176 return -ENOMEM;
1177
1178 return 0;
1179 }
1180
kbase_api_mem_import(struct kbase_context * kctx,union kbase_ioctl_mem_import * import)1181 static int kbase_api_mem_import(struct kbase_context *kctx,
1182 union kbase_ioctl_mem_import *import)
1183 {
1184 int ret;
1185 u64 flags = import->in.flags;
1186
1187 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
1188 return -ENOMEM;
1189
1190 ret = kbase_mem_import(kctx,
1191 import->in.type,
1192 u64_to_user_ptr(import->in.phandle),
1193 import->in.padding,
1194 &import->out.gpu_va,
1195 &import->out.va_pages,
1196 &flags);
1197
1198 import->out.flags = flags;
1199
1200 return ret;
1201 }
1202
kbase_api_mem_flags_change(struct kbase_context * kctx,struct kbase_ioctl_mem_flags_change * change)1203 static int kbase_api_mem_flags_change(struct kbase_context *kctx,
1204 struct kbase_ioctl_mem_flags_change *change)
1205 {
1206 if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
1207 return -ENOMEM;
1208
1209 return kbase_mem_flags_change(kctx, change->gpu_va,
1210 change->flags, change->mask);
1211 }
1212
kbase_api_stream_create(struct kbase_context * kctx,struct kbase_ioctl_stream_create * stream)1213 static int kbase_api_stream_create(struct kbase_context *kctx,
1214 struct kbase_ioctl_stream_create *stream)
1215 {
1216 #if IS_ENABLED(CONFIG_SYNC_FILE)
1217 int fd, ret;
1218
1219 /* Name must be NULL-terminated and padded with NULLs, so check last
1220 * character is NULL
1221 */
1222 if (stream->name[sizeof(stream->name)-1] != 0)
1223 return -EINVAL;
1224
1225 ret = kbase_sync_fence_stream_create(stream->name, &fd);
1226
1227 if (ret)
1228 return ret;
1229 return fd;
1230 #else
1231 return -ENOENT;
1232 #endif
1233 }
1234
kbase_api_fence_validate(struct kbase_context * kctx,struct kbase_ioctl_fence_validate * validate)1235 static int kbase_api_fence_validate(struct kbase_context *kctx,
1236 struct kbase_ioctl_fence_validate *validate)
1237 {
1238 #if IS_ENABLED(CONFIG_SYNC_FILE)
1239 return kbase_sync_fence_validate(validate->fd);
1240 #else
1241 return -ENOENT;
1242 #endif
1243 }
1244
kbase_api_mem_profile_add(struct kbase_context * kctx,struct kbase_ioctl_mem_profile_add * data)1245 static int kbase_api_mem_profile_add(struct kbase_context *kctx,
1246 struct kbase_ioctl_mem_profile_add *data)
1247 {
1248 char *buf;
1249 int err;
1250
1251 if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1252 dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big");
1253 return -EINVAL;
1254 }
1255
1256 if (!data->len) {
1257 dev_err(kctx->kbdev->dev, "mem_profile_add: buffer size is 0");
1258 /* Should return -EINVAL, but returning -ENOMEM for backwards compat */
1259 return -ENOMEM;
1260 }
1261
1262 buf = kmalloc(data->len, GFP_KERNEL);
1263 if (!buf)
1264 return -ENOMEM;
1265
1266 err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
1267 data->len);
1268 if (err) {
1269 kfree(buf);
1270 return -EFAULT;
1271 }
1272
1273 return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
1274 }
1275
1276 #if !MALI_USE_CSF
kbase_api_soft_event_update(struct kbase_context * kctx,struct kbase_ioctl_soft_event_update * update)1277 static int kbase_api_soft_event_update(struct kbase_context *kctx,
1278 struct kbase_ioctl_soft_event_update *update)
1279 {
1280 if (update->flags != 0)
1281 return -EINVAL;
1282
1283 return kbase_soft_event_update(kctx, update->event, update->new_status);
1284 }
1285 #endif /* !MALI_USE_CSF */
1286
kbase_api_sticky_resource_map(struct kbase_context * kctx,struct kbase_ioctl_sticky_resource_map * map)1287 static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
1288 struct kbase_ioctl_sticky_resource_map *map)
1289 {
1290 int ret;
1291 u64 i;
1292 u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1293
1294 if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX)
1295 return -EOVERFLOW;
1296
1297 ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
1298 sizeof(u64) * map->count);
1299
1300 if (ret != 0)
1301 return -EFAULT;
1302
1303 kbase_gpu_vm_lock(kctx);
1304
1305 for (i = 0; i < map->count; i++) {
1306 if (!kbase_sticky_resource_acquire(kctx, gpu_addr[i])) {
1307 /* Invalid resource */
1308 ret = -EINVAL;
1309 break;
1310 }
1311 }
1312
1313 if (ret != 0) {
1314 while (i > 0) {
1315 i--;
1316 kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i]);
1317 }
1318 }
1319
1320 kbase_gpu_vm_unlock(kctx);
1321
1322 return ret;
1323 }
1324
kbase_api_sticky_resource_unmap(struct kbase_context * kctx,struct kbase_ioctl_sticky_resource_unmap * unmap)1325 static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
1326 struct kbase_ioctl_sticky_resource_unmap *unmap)
1327 {
1328 int ret;
1329 u64 i;
1330 u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1331
1332 if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX)
1333 return -EOVERFLOW;
1334
1335 ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
1336 sizeof(u64) * unmap->count);
1337
1338 if (ret != 0)
1339 return -EFAULT;
1340
1341 kbase_gpu_vm_lock(kctx);
1342
1343 for (i = 0; i < unmap->count; i++) {
1344 if (!kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i])) {
1345 /* Invalid resource, but we keep going anyway */
1346 ret = -EINVAL;
1347 }
1348 }
1349
1350 kbase_gpu_vm_unlock(kctx);
1351
1352 return ret;
1353 }
1354
1355 #if MALI_UNIT_TEST
1356
kbase_api_tlstream_stats(struct kbase_context * kctx,struct kbase_ioctl_tlstream_stats * stats)1357 static int kbase_api_tlstream_stats(struct kbase_context *kctx,
1358 struct kbase_ioctl_tlstream_stats *stats)
1359 {
1360 kbase_timeline_stats(kctx->kbdev->timeline,
1361 &stats->bytes_collected,
1362 &stats->bytes_generated);
1363
1364 return 0;
1365 }
1366 #endif /* MALI_UNIT_TEST */
1367
1368 #if MALI_USE_CSF
kbasep_cs_event_signal(struct kbase_context * kctx)1369 static int kbasep_cs_event_signal(struct kbase_context *kctx)
1370 {
1371 kbase_csf_event_signal_notify_gpu(kctx);
1372 return 0;
1373 }
1374
kbasep_cs_queue_register(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_register * reg)1375 static int kbasep_cs_queue_register(struct kbase_context *kctx,
1376 struct kbase_ioctl_cs_queue_register *reg)
1377 {
1378 kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
1379
1380 return kbase_csf_queue_register(kctx, reg);
1381 }
1382
kbasep_cs_queue_register_ex(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_register_ex * reg)1383 static int kbasep_cs_queue_register_ex(struct kbase_context *kctx,
1384 struct kbase_ioctl_cs_queue_register_ex *reg)
1385 {
1386 kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
1387
1388 return kbase_csf_queue_register_ex(kctx, reg);
1389 }
1390
kbasep_cs_queue_terminate(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_terminate * term)1391 static int kbasep_cs_queue_terminate(struct kbase_context *kctx,
1392 struct kbase_ioctl_cs_queue_terminate *term)
1393 {
1394 kbase_csf_queue_terminate(kctx, term);
1395
1396 return 0;
1397 }
1398
kbasep_cs_queue_bind(struct kbase_context * kctx,union kbase_ioctl_cs_queue_bind * bind)1399 static int kbasep_cs_queue_bind(struct kbase_context *kctx,
1400 union kbase_ioctl_cs_queue_bind *bind)
1401 {
1402 return kbase_csf_queue_bind(kctx, bind);
1403 }
1404
kbasep_cs_queue_kick(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_kick * kick)1405 static int kbasep_cs_queue_kick(struct kbase_context *kctx,
1406 struct kbase_ioctl_cs_queue_kick *kick)
1407 {
1408 return kbase_csf_queue_kick(kctx, kick);
1409 }
1410
kbasep_cs_queue_group_create_1_6(struct kbase_context * kctx,union kbase_ioctl_cs_queue_group_create_1_6 * create)1411 static int kbasep_cs_queue_group_create_1_6(
1412 struct kbase_context *kctx,
1413 union kbase_ioctl_cs_queue_group_create_1_6 *create)
1414 {
1415 union kbase_ioctl_cs_queue_group_create
1416 new_create = { .in = {
1417 .tiler_mask = create->in.tiler_mask,
1418 .fragment_mask =
1419 create->in.fragment_mask,
1420 .compute_mask = create->in.compute_mask,
1421 .cs_min = create->in.cs_min,
1422 .priority = create->in.priority,
1423 .tiler_max = create->in.tiler_max,
1424 .fragment_max = create->in.fragment_max,
1425 .compute_max = create->in.compute_max,
1426 } };
1427
1428 int ret = kbase_csf_queue_group_create(kctx, &new_create);
1429
1430 create->out.group_handle = new_create.out.group_handle;
1431 create->out.group_uid = new_create.out.group_uid;
1432
1433 return ret;
1434 }
kbasep_cs_queue_group_create(struct kbase_context * kctx,union kbase_ioctl_cs_queue_group_create * create)1435 static int kbasep_cs_queue_group_create(struct kbase_context *kctx,
1436 union kbase_ioctl_cs_queue_group_create *create)
1437 {
1438 return kbase_csf_queue_group_create(kctx, create);
1439 }
1440
kbasep_cs_queue_group_terminate(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_group_term * term)1441 static int kbasep_cs_queue_group_terminate(struct kbase_context *kctx,
1442 struct kbase_ioctl_cs_queue_group_term *term)
1443 {
1444 kbase_csf_queue_group_terminate(kctx, term->group_handle);
1445
1446 return 0;
1447 }
1448
kbasep_kcpu_queue_new(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_new * new)1449 static int kbasep_kcpu_queue_new(struct kbase_context *kctx,
1450 struct kbase_ioctl_kcpu_queue_new *new)
1451 {
1452 return kbase_csf_kcpu_queue_new(kctx, new);
1453 }
1454
kbasep_kcpu_queue_delete(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_delete * delete)1455 static int kbasep_kcpu_queue_delete(struct kbase_context *kctx,
1456 struct kbase_ioctl_kcpu_queue_delete *delete)
1457 {
1458 return kbase_csf_kcpu_queue_delete(kctx, delete);
1459 }
1460
kbasep_kcpu_queue_enqueue(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_enqueue * enqueue)1461 static int kbasep_kcpu_queue_enqueue(struct kbase_context *kctx,
1462 struct kbase_ioctl_kcpu_queue_enqueue *enqueue)
1463 {
1464 return kbase_csf_kcpu_queue_enqueue(kctx, enqueue);
1465 }
1466
kbasep_cs_tiler_heap_init(struct kbase_context * kctx,union kbase_ioctl_cs_tiler_heap_init * heap_init)1467 static int kbasep_cs_tiler_heap_init(struct kbase_context *kctx,
1468 union kbase_ioctl_cs_tiler_heap_init *heap_init)
1469 {
1470 if (heap_init->in.group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
1471 return -EINVAL;
1472
1473 kctx->jit_group_id = heap_init->in.group_id;
1474
1475 return kbase_csf_tiler_heap_init(kctx, heap_init->in.chunk_size,
1476 heap_init->in.initial_chunks, heap_init->in.max_chunks,
1477 heap_init->in.target_in_flight, heap_init->in.buf_desc_va,
1478 &heap_init->out.gpu_heap_va,
1479 &heap_init->out.first_chunk_va);
1480 }
1481
kbasep_cs_tiler_heap_init_1_13(struct kbase_context * kctx,union kbase_ioctl_cs_tiler_heap_init_1_13 * heap_init)1482 static int kbasep_cs_tiler_heap_init_1_13(struct kbase_context *kctx,
1483 union kbase_ioctl_cs_tiler_heap_init_1_13 *heap_init)
1484 {
1485 if (heap_init->in.group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
1486 return -EINVAL;
1487
1488 kctx->jit_group_id = heap_init->in.group_id;
1489
1490 return kbase_csf_tiler_heap_init(kctx, heap_init->in.chunk_size,
1491 heap_init->in.initial_chunks, heap_init->in.max_chunks,
1492 heap_init->in.target_in_flight, 0,
1493 &heap_init->out.gpu_heap_va,
1494 &heap_init->out.first_chunk_va);
1495 }
1496
kbasep_cs_tiler_heap_term(struct kbase_context * kctx,struct kbase_ioctl_cs_tiler_heap_term * heap_term)1497 static int kbasep_cs_tiler_heap_term(struct kbase_context *kctx,
1498 struct kbase_ioctl_cs_tiler_heap_term *heap_term)
1499 {
1500 return kbase_csf_tiler_heap_term(kctx, heap_term->gpu_heap_va);
1501 }
1502
kbase_ioctl_cs_get_glb_iface(struct kbase_context * kctx,union kbase_ioctl_cs_get_glb_iface * param)1503 static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
1504 union kbase_ioctl_cs_get_glb_iface *param)
1505 {
1506 struct basep_cs_stream_control *stream_data = NULL;
1507 struct basep_cs_group_control *group_data = NULL;
1508 void __user *user_groups, *user_streams;
1509 int err = 0;
1510 u32 const max_group_num = param->in.max_group_num;
1511 u32 const max_total_stream_num = param->in.max_total_stream_num;
1512
1513 if (max_group_num > MAX_SUPPORTED_CSGS)
1514 return -EINVAL;
1515
1516 if (max_total_stream_num >
1517 MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP)
1518 return -EINVAL;
1519
1520 user_groups = u64_to_user_ptr(param->in.groups_ptr);
1521 user_streams = u64_to_user_ptr(param->in.streams_ptr);
1522
1523 if (max_group_num > 0) {
1524 if (!user_groups)
1525 err = -EINVAL;
1526 else {
1527 group_data = kcalloc(max_group_num,
1528 sizeof(*group_data), GFP_KERNEL);
1529 if (!group_data)
1530 err = -ENOMEM;
1531 }
1532 }
1533
1534 if (max_total_stream_num > 0) {
1535 if (!user_streams)
1536 err = -EINVAL;
1537 else {
1538 stream_data = kcalloc(max_total_stream_num,
1539 sizeof(*stream_data), GFP_KERNEL);
1540 if (!stream_data)
1541 err = -ENOMEM;
1542 }
1543 }
1544
1545 if (!err) {
1546 param->out.total_stream_num = kbase_csf_firmware_get_glb_iface(
1547 kctx->kbdev, group_data, max_group_num, stream_data,
1548 max_total_stream_num, ¶m->out.glb_version,
1549 ¶m->out.features, ¶m->out.group_num,
1550 ¶m->out.prfcnt_size, ¶m->out.instr_features);
1551
1552 if (copy_to_user(user_groups, group_data,
1553 MIN(max_group_num, param->out.group_num) *
1554 sizeof(*group_data)))
1555 err = -EFAULT;
1556 }
1557
1558 if (!err)
1559 if (copy_to_user(user_streams, stream_data,
1560 MIN(max_total_stream_num, param->out.total_stream_num) *
1561 sizeof(*stream_data)))
1562 err = -EFAULT;
1563
1564 kfree(group_data);
1565 kfree(stream_data);
1566 return err;
1567 }
1568
kbasep_ioctl_cs_cpu_queue_dump(struct kbase_context * kctx,struct kbase_ioctl_cs_cpu_queue_info * cpu_queue_info)1569 static int kbasep_ioctl_cs_cpu_queue_dump(struct kbase_context *kctx,
1570 struct kbase_ioctl_cs_cpu_queue_info *cpu_queue_info)
1571 {
1572 return kbase_csf_cpu_queue_dump(kctx, cpu_queue_info->buffer,
1573 cpu_queue_info->size);
1574 }
1575
kbase_ioctl_read_user_page(struct kbase_context * kctx,union kbase_ioctl_read_user_page * user_page)1576 static int kbase_ioctl_read_user_page(struct kbase_context *kctx,
1577 union kbase_ioctl_read_user_page *user_page)
1578 {
1579 struct kbase_device *kbdev = kctx->kbdev;
1580 unsigned long flags;
1581
1582 /* As of now, only LATEST_FLUSH is supported */
1583 if (unlikely(user_page->in.offset != LATEST_FLUSH))
1584 return -EINVAL;
1585
1586 /* Validating padding that must be zero */
1587 if (unlikely(user_page->in.padding != 0))
1588 return -EINVAL;
1589
1590 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1591 if (!kbdev->pm.backend.gpu_powered)
1592 user_page->out.val_lo = POWER_DOWN_LATEST_FLUSH_VALUE;
1593 else
1594 user_page->out.val_lo = kbase_reg_read(kbdev, USER_REG(LATEST_FLUSH));
1595 user_page->out.val_hi = 0;
1596 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1597
1598 return 0;
1599 }
1600 #endif /* MALI_USE_CSF */
1601
kbasep_ioctl_context_priority_check(struct kbase_context * kctx,struct kbase_ioctl_context_priority_check * priority_check)1602 static int kbasep_ioctl_context_priority_check(struct kbase_context *kctx,
1603 struct kbase_ioctl_context_priority_check *priority_check)
1604 {
1605 #if MALI_USE_CSF
1606 priority_check->priority = kbase_csf_priority_check(kctx->kbdev, priority_check->priority);
1607 #else
1608 base_jd_prio req_priority = (base_jd_prio)priority_check->priority;
1609
1610 priority_check->priority = (u8)kbase_js_priority_check(kctx->kbdev, req_priority);
1611 #endif
1612 return 0;
1613 }
1614
1615 #define KBASE_HANDLE_IOCTL(cmd, function, arg) \
1616 do { \
1617 int ret; \
1618 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
1619 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1620 ret = function(arg); \
1621 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1622 #function); \
1623 return ret; \
1624 } while (0)
1625
1626 #define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg) \
1627 do { \
1628 type param; \
1629 int ret, err; \
1630 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1631 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
1632 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1633 err = copy_from_user(¶m, uarg, sizeof(param)); \
1634 if (err) \
1635 return -EFAULT; \
1636 ret = function(arg, ¶m); \
1637 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1638 #function); \
1639 return ret; \
1640 } while (0)
1641
1642 #define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg) \
1643 do { \
1644 type param; \
1645 int ret, err; \
1646 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1647 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
1648 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1649 memset(¶m, 0, sizeof(param)); \
1650 ret = function(arg, ¶m); \
1651 err = copy_to_user(uarg, ¶m, sizeof(param)); \
1652 if (err) \
1653 return -EFAULT; \
1654 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1655 #function); \
1656 return ret; \
1657 } while (0)
1658
1659 #define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg) \
1660 do { \
1661 type param; \
1662 int ret, err; \
1663 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1664 BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE | _IOC_READ)); \
1665 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1666 err = copy_from_user(¶m, uarg, sizeof(param)); \
1667 if (err) \
1668 return -EFAULT; \
1669 ret = function(arg, ¶m); \
1670 err = copy_to_user(uarg, ¶m, sizeof(param)); \
1671 if (err) \
1672 return -EFAULT; \
1673 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1674 #function); \
1675 return ret; \
1676 } while (0)
1677
kbasep_ioctl_set_limited_core_count(struct kbase_context * kctx,struct kbase_ioctl_set_limited_core_count * set_limited_core_count)1678 static int kbasep_ioctl_set_limited_core_count(struct kbase_context *kctx,
1679 struct kbase_ioctl_set_limited_core_count *set_limited_core_count)
1680 {
1681 const u64 shader_core_mask =
1682 kbase_pm_get_present_cores(kctx->kbdev, KBASE_PM_CORE_SHADER);
1683 const u64 limited_core_mask =
1684 ((u64)1 << (set_limited_core_count->max_core_count)) - 1;
1685
1686 if ((shader_core_mask & limited_core_mask) == 0) {
1687 /* At least one shader core must be available after applying the mask */
1688 return -EINVAL;
1689 }
1690
1691 kctx->limited_core_mask = limited_core_mask;
1692 return 0;
1693 }
1694
kbase_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1695 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1696 {
1697 struct kbase_file *const kfile = filp->private_data;
1698 struct kbase_context *kctx = NULL;
1699 struct kbase_device *kbdev = kfile->kbdev;
1700 void __user *uarg = (void __user *)arg;
1701
1702 /* Only these ioctls are available until setup is complete */
1703 switch (cmd) {
1704 case KBASE_IOCTL_VERSION_CHECK:
1705 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
1706 kbase_api_handshake,
1707 struct kbase_ioctl_version_check,
1708 kfile);
1709 break;
1710
1711 case KBASE_IOCTL_VERSION_CHECK_RESERVED:
1712 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK_RESERVED,
1713 kbase_api_handshake_dummy,
1714 struct kbase_ioctl_version_check,
1715 kfile);
1716 break;
1717
1718 case KBASE_IOCTL_SET_FLAGS:
1719 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
1720 kbase_api_set_flags,
1721 struct kbase_ioctl_set_flags,
1722 kfile);
1723 break;
1724
1725 case KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO:
1726 KBASE_HANDLE_IOCTL_INOUT(
1727 KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO,
1728 kbase_api_kinstr_prfcnt_enum_info,
1729 struct kbase_ioctl_kinstr_prfcnt_enum_info, kfile);
1730 break;
1731
1732 case KBASE_IOCTL_KINSTR_PRFCNT_SETUP:
1733 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_PRFCNT_SETUP,
1734 kbase_api_kinstr_prfcnt_setup,
1735 union kbase_ioctl_kinstr_prfcnt_setup,
1736 kfile);
1737 break;
1738 case KBASE_IOCTL_GET_GPUPROPS:
1739 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS, kbase_api_get_gpuprops,
1740 struct kbase_ioctl_get_gpuprops, kfile);
1741 break;
1742 }
1743
1744 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
1745 if (unlikely(!kctx))
1746 return -EPERM;
1747
1748 /* Normal ioctls */
1749 switch (cmd) {
1750 #if !MALI_USE_CSF
1751 case KBASE_IOCTL_JOB_SUBMIT:
1752 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
1753 kbase_api_job_submit,
1754 struct kbase_ioctl_job_submit,
1755 kctx);
1756 break;
1757 #endif /* !MALI_USE_CSF */
1758 #if !MALI_USE_CSF
1759 case KBASE_IOCTL_POST_TERM:
1760 KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
1761 kbase_api_post_term,
1762 kctx);
1763 break;
1764 #endif /* !MALI_USE_CSF */
1765 case KBASE_IOCTL_MEM_ALLOC:
1766 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
1767 kbase_api_mem_alloc,
1768 union kbase_ioctl_mem_alloc,
1769 kctx);
1770 break;
1771 #if MALI_USE_CSF
1772 case KBASE_IOCTL_MEM_ALLOC_EX:
1773 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC_EX, kbase_api_mem_alloc_ex,
1774 union kbase_ioctl_mem_alloc_ex, kctx);
1775 break;
1776 #endif
1777 case KBASE_IOCTL_MEM_QUERY:
1778 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
1779 kbase_api_mem_query,
1780 union kbase_ioctl_mem_query,
1781 kctx);
1782 break;
1783 case KBASE_IOCTL_MEM_FREE:
1784 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
1785 kbase_api_mem_free,
1786 struct kbase_ioctl_mem_free,
1787 kctx);
1788 break;
1789 case KBASE_IOCTL_DISJOINT_QUERY:
1790 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
1791 kbase_api_disjoint_query,
1792 struct kbase_ioctl_disjoint_query,
1793 kctx);
1794 break;
1795 case KBASE_IOCTL_GET_DDK_VERSION:
1796 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
1797 kbase_api_get_ddk_version,
1798 struct kbase_ioctl_get_ddk_version,
1799 kctx);
1800 break;
1801 case KBASE_IOCTL_MEM_JIT_INIT:
1802 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
1803 kbase_api_mem_jit_init,
1804 struct kbase_ioctl_mem_jit_init,
1805 kctx);
1806 break;
1807 case KBASE_IOCTL_MEM_EXEC_INIT:
1808 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
1809 kbase_api_mem_exec_init,
1810 struct kbase_ioctl_mem_exec_init,
1811 kctx);
1812 break;
1813 case KBASE_IOCTL_MEM_SYNC:
1814 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
1815 kbase_api_mem_sync,
1816 struct kbase_ioctl_mem_sync,
1817 kctx);
1818 break;
1819 case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
1820 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
1821 kbase_api_mem_find_cpu_offset,
1822 union kbase_ioctl_mem_find_cpu_offset,
1823 kctx);
1824 break;
1825 case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
1826 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
1827 kbase_api_mem_find_gpu_start_and_offset,
1828 union kbase_ioctl_mem_find_gpu_start_and_offset,
1829 kctx);
1830 break;
1831 case KBASE_IOCTL_GET_CONTEXT_ID:
1832 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
1833 kbase_api_get_context_id,
1834 struct kbase_ioctl_get_context_id,
1835 kctx);
1836 break;
1837 case KBASE_IOCTL_TLSTREAM_ACQUIRE:
1838 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
1839 kbase_api_tlstream_acquire,
1840 struct kbase_ioctl_tlstream_acquire,
1841 kctx);
1842 break;
1843 case KBASE_IOCTL_TLSTREAM_FLUSH:
1844 KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
1845 kbase_api_tlstream_flush,
1846 kctx);
1847 break;
1848 case KBASE_IOCTL_MEM_COMMIT:
1849 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
1850 kbase_api_mem_commit,
1851 struct kbase_ioctl_mem_commit,
1852 kctx);
1853 break;
1854 case KBASE_IOCTL_MEM_ALIAS:
1855 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
1856 kbase_api_mem_alias,
1857 union kbase_ioctl_mem_alias,
1858 kctx);
1859 break;
1860 case KBASE_IOCTL_MEM_IMPORT:
1861 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
1862 kbase_api_mem_import,
1863 union kbase_ioctl_mem_import,
1864 kctx);
1865 break;
1866 case KBASE_IOCTL_MEM_FLAGS_CHANGE:
1867 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
1868 kbase_api_mem_flags_change,
1869 struct kbase_ioctl_mem_flags_change,
1870 kctx);
1871 break;
1872 case KBASE_IOCTL_STREAM_CREATE:
1873 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
1874 kbase_api_stream_create,
1875 struct kbase_ioctl_stream_create,
1876 kctx);
1877 break;
1878 case KBASE_IOCTL_FENCE_VALIDATE:
1879 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
1880 kbase_api_fence_validate,
1881 struct kbase_ioctl_fence_validate,
1882 kctx);
1883 break;
1884 case KBASE_IOCTL_MEM_PROFILE_ADD:
1885 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
1886 kbase_api_mem_profile_add,
1887 struct kbase_ioctl_mem_profile_add,
1888 kctx);
1889 break;
1890
1891 #if !MALI_USE_CSF
1892 case KBASE_IOCTL_SOFT_EVENT_UPDATE:
1893 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
1894 kbase_api_soft_event_update,
1895 struct kbase_ioctl_soft_event_update,
1896 kctx);
1897 break;
1898 #endif /* !MALI_USE_CSF */
1899
1900 case KBASE_IOCTL_STICKY_RESOURCE_MAP:
1901 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
1902 kbase_api_sticky_resource_map,
1903 struct kbase_ioctl_sticky_resource_map,
1904 kctx);
1905 break;
1906 case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
1907 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
1908 kbase_api_sticky_resource_unmap,
1909 struct kbase_ioctl_sticky_resource_unmap,
1910 kctx);
1911 break;
1912
1913 /* Instrumentation. */
1914 #if !MALI_USE_CSF
1915 case KBASE_IOCTL_KINSTR_JM_FD:
1916 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_JM_FD,
1917 kbase_api_kinstr_jm_fd,
1918 union kbase_kinstr_jm_fd,
1919 kctx);
1920 break;
1921 #endif
1922 case KBASE_IOCTL_HWCNT_READER_SETUP:
1923 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
1924 kbase_api_hwcnt_reader_setup,
1925 struct kbase_ioctl_hwcnt_reader_setup,
1926 kctx);
1927 break;
1928 case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
1929 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
1930 kbase_api_get_cpu_gpu_timeinfo,
1931 union kbase_ioctl_get_cpu_gpu_timeinfo,
1932 kctx);
1933 break;
1934 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
1935 case KBASE_IOCTL_HWCNT_SET:
1936 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
1937 kbase_api_hwcnt_set,
1938 struct kbase_ioctl_hwcnt_values,
1939 kctx);
1940 break;
1941 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
1942 #ifdef CONFIG_MALI_CINSTR_GWT
1943 case KBASE_IOCTL_CINSTR_GWT_START:
1944 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
1945 kbase_gpu_gwt_start,
1946 kctx);
1947 break;
1948 case KBASE_IOCTL_CINSTR_GWT_STOP:
1949 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
1950 kbase_gpu_gwt_stop,
1951 kctx);
1952 break;
1953 case KBASE_IOCTL_CINSTR_GWT_DUMP:
1954 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
1955 kbase_gpu_gwt_dump,
1956 union kbase_ioctl_cinstr_gwt_dump,
1957 kctx);
1958 break;
1959 #endif
1960 #if MALI_USE_CSF
1961 case KBASE_IOCTL_CS_EVENT_SIGNAL:
1962 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CS_EVENT_SIGNAL,
1963 kbasep_cs_event_signal,
1964 kctx);
1965 break;
1966 case KBASE_IOCTL_CS_QUEUE_REGISTER:
1967 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER,
1968 kbasep_cs_queue_register,
1969 struct kbase_ioctl_cs_queue_register,
1970 kctx);
1971 break;
1972 case KBASE_IOCTL_CS_QUEUE_REGISTER_EX:
1973 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER_EX,
1974 kbasep_cs_queue_register_ex,
1975 struct kbase_ioctl_cs_queue_register_ex,
1976 kctx);
1977 break;
1978 case KBASE_IOCTL_CS_QUEUE_TERMINATE:
1979 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_TERMINATE,
1980 kbasep_cs_queue_terminate,
1981 struct kbase_ioctl_cs_queue_terminate,
1982 kctx);
1983 break;
1984 case KBASE_IOCTL_CS_QUEUE_BIND:
1985 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_BIND,
1986 kbasep_cs_queue_bind,
1987 union kbase_ioctl_cs_queue_bind,
1988 kctx);
1989 break;
1990 case KBASE_IOCTL_CS_QUEUE_KICK:
1991 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_KICK,
1992 kbasep_cs_queue_kick,
1993 struct kbase_ioctl_cs_queue_kick,
1994 kctx);
1995 break;
1996 case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6:
1997 KBASE_HANDLE_IOCTL_INOUT(
1998 KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6,
1999 kbasep_cs_queue_group_create_1_6,
2000 union kbase_ioctl_cs_queue_group_create_1_6, kctx);
2001 break;
2002 case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE:
2003 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE,
2004 kbasep_cs_queue_group_create,
2005 union kbase_ioctl_cs_queue_group_create,
2006 kctx);
2007 break;
2008 case KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE:
2009 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE,
2010 kbasep_cs_queue_group_terminate,
2011 struct kbase_ioctl_cs_queue_group_term,
2012 kctx);
2013 break;
2014 case KBASE_IOCTL_KCPU_QUEUE_CREATE:
2015 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_KCPU_QUEUE_CREATE,
2016 kbasep_kcpu_queue_new,
2017 struct kbase_ioctl_kcpu_queue_new,
2018 kctx);
2019 break;
2020 case KBASE_IOCTL_KCPU_QUEUE_DELETE:
2021 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_DELETE,
2022 kbasep_kcpu_queue_delete,
2023 struct kbase_ioctl_kcpu_queue_delete,
2024 kctx);
2025 break;
2026 case KBASE_IOCTL_KCPU_QUEUE_ENQUEUE:
2027 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_ENQUEUE,
2028 kbasep_kcpu_queue_enqueue,
2029 struct kbase_ioctl_kcpu_queue_enqueue,
2030 kctx);
2031 break;
2032 case KBASE_IOCTL_CS_TILER_HEAP_INIT:
2033 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT,
2034 kbasep_cs_tiler_heap_init,
2035 union kbase_ioctl_cs_tiler_heap_init,
2036 kctx);
2037 break;
2038 case KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13:
2039 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13,
2040 kbasep_cs_tiler_heap_init_1_13,
2041 union kbase_ioctl_cs_tiler_heap_init_1_13, kctx);
2042 break;
2043 case KBASE_IOCTL_CS_TILER_HEAP_TERM:
2044 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_TILER_HEAP_TERM,
2045 kbasep_cs_tiler_heap_term,
2046 struct kbase_ioctl_cs_tiler_heap_term,
2047 kctx);
2048 break;
2049 case KBASE_IOCTL_CS_GET_GLB_IFACE:
2050 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_GET_GLB_IFACE,
2051 kbase_ioctl_cs_get_glb_iface,
2052 union kbase_ioctl_cs_get_glb_iface,
2053 kctx);
2054 break;
2055 case KBASE_IOCTL_CS_CPU_QUEUE_DUMP:
2056 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_CPU_QUEUE_DUMP,
2057 kbasep_ioctl_cs_cpu_queue_dump,
2058 struct kbase_ioctl_cs_cpu_queue_info,
2059 kctx);
2060 break;
2061 /* This IOCTL will be kept for backward compatibility */
2062 case KBASE_IOCTL_READ_USER_PAGE:
2063 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_READ_USER_PAGE, kbase_ioctl_read_user_page,
2064 union kbase_ioctl_read_user_page, kctx);
2065 break;
2066 #endif /* MALI_USE_CSF */
2067 #if MALI_UNIT_TEST
2068 case KBASE_IOCTL_TLSTREAM_STATS:
2069 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
2070 kbase_api_tlstream_stats,
2071 struct kbase_ioctl_tlstream_stats,
2072 kctx);
2073 break;
2074 #endif /* MALI_UNIT_TEST */
2075 case KBASE_IOCTL_CONTEXT_PRIORITY_CHECK:
2076 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CONTEXT_PRIORITY_CHECK,
2077 kbasep_ioctl_context_priority_check,
2078 struct kbase_ioctl_context_priority_check,
2079 kctx);
2080 break;
2081 case KBASE_IOCTL_SET_LIMITED_CORE_COUNT:
2082 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_LIMITED_CORE_COUNT,
2083 kbasep_ioctl_set_limited_core_count,
2084 struct kbase_ioctl_set_limited_core_count,
2085 kctx);
2086 break;
2087 }
2088
2089 dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
2090
2091 return -ENOIOCTLCMD;
2092 }
2093
2094 #if MALI_USE_CSF
kbase_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)2095 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
2096 {
2097 struct kbase_file *const kfile = filp->private_data;
2098 struct kbase_context *const kctx =
2099 kbase_file_get_kctx_if_setup_complete(kfile);
2100 struct base_csf_notification event_data = {
2101 .type = BASE_CSF_NOTIFICATION_EVENT };
2102 const size_t data_size = sizeof(event_data);
2103 bool read_event = false, read_error = false;
2104
2105 if (unlikely(!kctx))
2106 return -EPERM;
2107
2108 if (count < data_size)
2109 return -ENOBUFS;
2110
2111 if (atomic_read(&kctx->event_count))
2112 read_event = true;
2113 else
2114 read_error = kbase_csf_event_read_error(kctx, &event_data);
2115
2116 if (!read_event && !read_error) {
2117 bool dump = kbase_csf_cpu_queue_read_dump_req(kctx,
2118 &event_data);
2119 /* This condition is not treated as an error.
2120 * It is possible that event handling thread was woken up due
2121 * to a fault/error that occurred for a queue group, but before
2122 * the corresponding fault data was read by the thread the
2123 * queue group was already terminated by the userspace.
2124 */
2125 if (!dump)
2126 dev_dbg(kctx->kbdev->dev,
2127 "Neither event nor error signaled");
2128 }
2129
2130 if (copy_to_user(buf, &event_data, data_size) != 0) {
2131 dev_warn(kctx->kbdev->dev,
2132 "Failed to copy data\n");
2133 return -EFAULT;
2134 }
2135
2136 if (read_event)
2137 atomic_set(&kctx->event_count, 0);
2138
2139 return data_size;
2140 }
2141 #else /* MALI_USE_CSF */
kbase_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)2142 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
2143 {
2144 struct kbase_file *const kfile = filp->private_data;
2145 struct kbase_context *const kctx =
2146 kbase_file_get_kctx_if_setup_complete(kfile);
2147 struct base_jd_event_v2 uevent;
2148 int out_count = 0;
2149
2150 if (unlikely(!kctx))
2151 return -EPERM;
2152
2153 if (count < sizeof(uevent))
2154 return -ENOBUFS;
2155
2156 memset(&uevent, 0, sizeof(uevent));
2157
2158 do {
2159 while (kbase_event_dequeue(kctx, &uevent)) {
2160 if (out_count > 0)
2161 goto out;
2162
2163 if (filp->f_flags & O_NONBLOCK)
2164 return -EAGAIN;
2165
2166 if (wait_event_interruptible(kctx->event_queue,
2167 kbase_event_pending(kctx)) != 0)
2168 return -ERESTARTSYS;
2169 }
2170 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
2171 if (out_count == 0)
2172 return -EPIPE;
2173 goto out;
2174 }
2175
2176 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
2177 return -EFAULT;
2178
2179 buf += sizeof(uevent);
2180 out_count++;
2181 count -= sizeof(uevent);
2182 } while (count >= sizeof(uevent));
2183
2184 out:
2185 return out_count * sizeof(uevent);
2186 }
2187 #endif /* MALI_USE_CSF */
2188
kbase_poll(struct file * filp,poll_table * wait)2189 static __poll_t kbase_poll(struct file *filp, poll_table *wait)
2190 {
2191 struct kbase_file *const kfile = filp->private_data;
2192 struct kbase_context *const kctx =
2193 kbase_file_get_kctx_if_setup_complete(kfile);
2194
2195 if (unlikely(!kctx)) {
2196 #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
2197 return POLLERR;
2198 #else
2199 return EPOLLERR;
2200 #endif
2201 }
2202
2203 poll_wait(filp, &kctx->event_queue, wait);
2204 if (kbase_event_pending(kctx)) {
2205 #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
2206 return POLLIN | POLLRDNORM;
2207 #else
2208 return EPOLLIN | EPOLLRDNORM;
2209 #endif
2210 }
2211
2212 return 0;
2213 }
2214
kbase_event_wakeup(struct kbase_context * kctx)2215 void kbase_event_wakeup(struct kbase_context *kctx)
2216 {
2217 KBASE_DEBUG_ASSERT(kctx);
2218 dev_dbg(kctx->kbdev->dev, "Waking event queue for context %pK\n",
2219 (void *)kctx);
2220 wake_up_interruptible(&kctx->event_queue);
2221 }
2222
2223 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
2224
2225 #if MALI_USE_CSF
kbase_event_pending(struct kbase_context * ctx)2226 int kbase_event_pending(struct kbase_context *ctx)
2227 {
2228 KBASE_DEBUG_ASSERT(ctx);
2229
2230 if (unlikely(!ctx))
2231 return -EPERM;
2232
2233 return (atomic_read(&ctx->event_count) != 0) ||
2234 kbase_csf_event_error_pending(ctx) ||
2235 kbase_csf_cpu_queue_dump_needed(ctx);
2236 }
2237 #else
kbase_event_pending(struct kbase_context * ctx)2238 int kbase_event_pending(struct kbase_context *ctx)
2239 {
2240 KBASE_DEBUG_ASSERT(ctx);
2241
2242 if (unlikely(!ctx))
2243 return -EPERM;
2244
2245 return (atomic_read(&ctx->event_count) != 0) ||
2246 (atomic_read(&ctx->event_closed) != 0);
2247 }
2248 #endif
2249
2250 KBASE_EXPORT_TEST_API(kbase_event_pending);
2251
kbase_mmap(struct file * const filp,struct vm_area_struct * const vma)2252 static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
2253 {
2254 struct kbase_file *const kfile = filp->private_data;
2255 struct kbase_context *const kctx =
2256 kbase_file_get_kctx_if_setup_complete(kfile);
2257
2258 if (unlikely(!kctx))
2259 return -EPERM;
2260
2261 return kbase_context_mmap(kctx, vma);
2262 }
2263
kbase_check_flags(int flags)2264 static int kbase_check_flags(int flags)
2265 {
2266 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
2267 * closes the file descriptor in a child process.
2268 */
2269 if (0 == (flags & O_CLOEXEC))
2270 return -EINVAL;
2271
2272 return 0;
2273 }
2274
kbase_get_unmapped_area(struct file * const filp,const unsigned long addr,const unsigned long len,const unsigned long pgoff,const unsigned long flags)2275 static unsigned long kbase_get_unmapped_area(struct file *const filp,
2276 const unsigned long addr, const unsigned long len,
2277 const unsigned long pgoff, const unsigned long flags)
2278 {
2279 struct kbase_file *const kfile = filp->private_data;
2280 struct kbase_context *const kctx =
2281 kbase_file_get_kctx_if_setup_complete(kfile);
2282
2283 if (unlikely(!kctx))
2284 return -EPERM;
2285
2286 return kbase_context_get_unmapped_area(kctx, addr, len, pgoff, flags);
2287 }
2288
2289 static const struct file_operations kbase_fops = {
2290 .owner = THIS_MODULE,
2291 .open = kbase_open,
2292 .release = kbase_release,
2293 .read = kbase_read,
2294 .poll = kbase_poll,
2295 .unlocked_ioctl = kbase_ioctl,
2296 .compat_ioctl = kbase_ioctl,
2297 .mmap = kbase_mmap,
2298 .check_flags = kbase_check_flags,
2299 .get_unmapped_area = kbase_get_unmapped_area,
2300 };
2301
2302 /**
2303 * power_policy_show - Show callback for the power_policy sysfs file.
2304 *
2305 * @dev: The device this sysfs file is for
2306 * @attr: The attributes of the sysfs file
2307 * @buf: The output buffer for the sysfs file contents
2308 *
2309 * This function is called to get the contents of the power_policy sysfs
2310 * file. This is a list of the available policies with the currently active one
2311 * surrounded by square brackets.
2312 *
2313 * Return: The number of bytes output to @buf.
2314 */
power_policy_show(struct device * dev,struct device_attribute * attr,char * const buf)2315 static ssize_t power_policy_show(struct device *dev, struct device_attribute *attr, char *const buf)
2316 {
2317 struct kbase_device *kbdev;
2318 const struct kbase_pm_policy *current_policy;
2319 const struct kbase_pm_policy *const *policy_list;
2320 int policy_count;
2321 int i;
2322 ssize_t ret = 0;
2323
2324 kbdev = to_kbase_device(dev);
2325
2326 if (!kbdev)
2327 return -ENODEV;
2328
2329 current_policy = kbase_pm_get_policy(kbdev);
2330
2331 policy_count = kbase_pm_list_policies(kbdev, &policy_list);
2332
2333 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
2334 if (policy_list[i] == current_policy)
2335 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
2336 else
2337 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
2338 }
2339
2340 if (ret < PAGE_SIZE - 1) {
2341 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2342 } else {
2343 buf[PAGE_SIZE - 2] = '\n';
2344 buf[PAGE_SIZE - 1] = '\0';
2345 ret = PAGE_SIZE - 1;
2346 }
2347
2348 return ret;
2349 }
2350
2351 /**
2352 * power_policy_store - Store callback for the power_policy sysfs file.
2353 *
2354 * @dev: The device with sysfs file is for
2355 * @attr: The attributes of the sysfs file
2356 * @buf: The value written to the sysfs file
2357 * @count: The number of bytes to write to the sysfs file
2358 *
2359 * This function is called when the power_policy sysfs file is written to.
2360 * It matches the requested policy against the available policies and if a
2361 * matching policy is found calls kbase_pm_set_policy() to change the
2362 * policy.
2363 *
2364 * Return: @count if the function succeeded. An error code on failure.
2365 */
power_policy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2366 static ssize_t power_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2367 {
2368 struct kbase_device *kbdev;
2369 const struct kbase_pm_policy *new_policy = NULL;
2370 const struct kbase_pm_policy *const *policy_list;
2371 int policy_count;
2372 int i;
2373
2374 kbdev = to_kbase_device(dev);
2375
2376 if (!kbdev)
2377 return -ENODEV;
2378
2379 policy_count = kbase_pm_list_policies(kbdev, &policy_list);
2380
2381 for (i = 0; i < policy_count; i++) {
2382 if (sysfs_streq(policy_list[i]->name, buf)) {
2383 new_policy = policy_list[i];
2384 break;
2385 }
2386 }
2387
2388 if (!new_policy) {
2389 dev_err(dev, "power_policy: policy not found\n");
2390 return -EINVAL;
2391 }
2392
2393 kbase_pm_set_policy(kbdev, new_policy);
2394
2395 return count;
2396 }
2397
2398 /*
2399 * The sysfs file power_policy.
2400 *
2401 * This is used for obtaining information about the available policies,
2402 * determining which policy is currently active, and changing the active
2403 * policy.
2404 */
2405 static DEVICE_ATTR_RW(power_policy);
2406
2407 /*
2408 * core_mask_show - Show callback for the core_mask sysfs file.
2409 *
2410 * @dev: The device this sysfs file is for
2411 * @attr: The attributes of the sysfs file
2412 * @buf: The output buffer for the sysfs file contents
2413 *
2414 * This function is called to get the contents of the core_mask sysfs file.
2415 *
2416 * Return: The number of bytes output to @buf.
2417 */
core_mask_show(struct device * dev,struct device_attribute * attr,char * const buf)2418 static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr, char * const buf)
2419 {
2420 struct kbase_device *kbdev;
2421 unsigned long flags;
2422 ssize_t ret = 0;
2423
2424 kbdev = to_kbase_device(dev);
2425
2426 if (!kbdev)
2427 return -ENODEV;
2428
2429 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2430
2431 #if MALI_USE_CSF
2432 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2433 "Current debug core mask : 0x%llX\n",
2434 kbdev->pm.debug_core_mask);
2435 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2436 "Current desired core mask : 0x%llX\n",
2437 kbase_pm_ca_get_core_mask(kbdev));
2438 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2439 "Current in use core mask : 0x%llX\n",
2440 kbdev->pm.backend.shaders_avail);
2441 #else
2442 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2443 "Current core mask (JS0) : 0x%llX\n",
2444 kbdev->pm.debug_core_mask[0]);
2445 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2446 "Current core mask (JS1) : 0x%llX\n",
2447 kbdev->pm.debug_core_mask[1]);
2448 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2449 "Current core mask (JS2) : 0x%llX\n",
2450 kbdev->pm.debug_core_mask[2]);
2451 #endif /* MALI_USE_CSF */
2452
2453 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2454 "Available core mask : 0x%llX\n",
2455 kbdev->gpu_props.props.raw_props.shader_present);
2456
2457 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2458
2459 return ret;
2460 }
2461
2462 /**
2463 * core_mask_store - Store callback for the core_mask sysfs file.
2464 *
2465 * @dev: The device with sysfs file is for
2466 * @attr: The attributes of the sysfs file
2467 * @buf: The value written to the sysfs file
2468 * @count: The number of bytes to write to the sysfs file
2469 *
2470 * This function is called when the core_mask sysfs file is written to.
2471 *
2472 * Return: @count if the function succeeded. An error code on failure.
2473 */
core_mask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2474 static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2475 {
2476 struct kbase_device *kbdev;
2477 #if MALI_USE_CSF
2478 u64 new_core_mask;
2479 #else
2480 u64 new_core_mask[3];
2481 u64 group0_core_mask;
2482 int i;
2483 #endif /* MALI_USE_CSF */
2484
2485 int items;
2486 ssize_t err = count;
2487 unsigned long flags;
2488 u64 shader_present;
2489
2490 kbdev = to_kbase_device(dev);
2491
2492 if (!kbdev)
2493 return -ENODEV;
2494
2495 #if MALI_USE_CSF
2496 items = sscanf(buf, "%llx", &new_core_mask);
2497
2498 if (items != 1) {
2499 dev_err(kbdev->dev,
2500 "Couldn't process core mask write operation.\n"
2501 "Use format <core_mask>\n");
2502 err = -EINVAL;
2503 goto end;
2504 }
2505 #else
2506 items = sscanf(buf, "%llx %llx %llx",
2507 &new_core_mask[0], &new_core_mask[1],
2508 &new_core_mask[2]);
2509
2510 if (items != 1 && items != 3) {
2511 dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
2512 "Use format <core_mask>\n"
2513 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
2514 err = -EINVAL;
2515 goto end;
2516 }
2517
2518 if (items == 1)
2519 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
2520 #endif
2521
2522 mutex_lock(&kbdev->pm.lock);
2523 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2524
2525 shader_present = kbdev->gpu_props.props.raw_props.shader_present;
2526
2527 #if MALI_USE_CSF
2528 if ((new_core_mask & shader_present) != new_core_mask) {
2529 dev_err(dev,
2530 "Invalid core mask 0x%llX: Includes non-existent cores (present = 0x%llX)",
2531 new_core_mask, shader_present);
2532 err = -EINVAL;
2533 goto unlock;
2534
2535 } else if (!(new_core_mask & shader_present &
2536 kbdev->pm.backend.ca_cores_enabled)) {
2537 dev_err(dev,
2538 "Invalid core mask 0x%llX: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
2539 new_core_mask,
2540 kbdev->gpu_props.props.raw_props.shader_present,
2541 kbdev->pm.backend.ca_cores_enabled);
2542 err = -EINVAL;
2543 goto unlock;
2544 }
2545
2546 if (kbdev->pm.debug_core_mask != new_core_mask)
2547 kbase_pm_set_debug_core_mask(kbdev, new_core_mask);
2548 #else
2549 group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
2550
2551 for (i = 0; i < 3; ++i) {
2552 if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
2553 dev_err(dev, "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
2554 new_core_mask[i], i, shader_present);
2555 err = -EINVAL;
2556 goto unlock;
2557
2558 } else if (!(new_core_mask[i] & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
2559 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
2560 new_core_mask[i], i,
2561 kbdev->gpu_props.props.raw_props.shader_present,
2562 kbdev->pm.backend.ca_cores_enabled);
2563 err = -EINVAL;
2564 goto unlock;
2565
2566 } else if (!(new_core_mask[i] & group0_core_mask)) {
2567 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
2568 new_core_mask[i], i, group0_core_mask);
2569 err = -EINVAL;
2570 goto unlock;
2571 } else if (!(new_core_mask[i] & kbdev->gpu_props.curr_config.shader_present)) {
2572 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with current core mask 0x%llX\n",
2573 new_core_mask[i], i, kbdev->gpu_props.curr_config.shader_present);
2574 err = -EINVAL;
2575 goto unlock;
2576 }
2577 }
2578
2579 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
2580 kbdev->pm.debug_core_mask[1] !=
2581 new_core_mask[1] ||
2582 kbdev->pm.debug_core_mask[2] !=
2583 new_core_mask[2]) {
2584
2585 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
2586 new_core_mask[1], new_core_mask[2]);
2587 }
2588 #endif /* MALI_USE_CSF */
2589
2590 unlock:
2591 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2592 mutex_unlock(&kbdev->pm.lock);
2593 end:
2594 return err;
2595 }
2596
2597 /*
2598 * The sysfs file core_mask.
2599 *
2600 * This is used to restrict shader core availability for debugging purposes.
2601 * Reading it will show the current core mask and the mask of cores available.
2602 * Writing to it will set the current core mask.
2603 */
2604 static DEVICE_ATTR_RW(core_mask);
2605
2606 #if !MALI_USE_CSF
2607 /**
2608 * soft_job_timeout_store - Store callback for the soft_job_timeout sysfs
2609 * file.
2610 *
2611 * @dev: The device this sysfs file is for.
2612 * @attr: The attributes of the sysfs file.
2613 * @buf: The value written to the sysfs file.
2614 * @count: The number of bytes to write to the sysfs file.
2615 *
2616 * This allows setting the timeout for software jobs. Waiting soft event wait
2617 * jobs will be cancelled after this period expires, while soft fence wait jobs
2618 * will print debug information if the fence debug feature is enabled.
2619 *
2620 * This is expressed in milliseconds.
2621 *
2622 * Return: count if the function succeeded. An error code on failure.
2623 */
soft_job_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2624 static ssize_t soft_job_timeout_store(struct device *dev,
2625 struct device_attribute *attr,
2626 const char *buf, size_t count)
2627 {
2628 struct kbase_device *kbdev;
2629 int soft_job_timeout_ms;
2630
2631 kbdev = to_kbase_device(dev);
2632 if (!kbdev)
2633 return -ENODEV;
2634
2635 if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
2636 (soft_job_timeout_ms <= 0))
2637 return -EINVAL;
2638
2639 atomic_set(&kbdev->js_data.soft_job_timeout_ms,
2640 soft_job_timeout_ms);
2641
2642 return count;
2643 }
2644
2645 /**
2646 * soft_job_timeout_show - Show callback for the soft_job_timeout sysfs
2647 * file.
2648 *
2649 * @dev: The device this sysfs file is for.
2650 * @attr: The attributes of the sysfs file.
2651 * @buf: The output buffer for the sysfs file contents.
2652 *
2653 * This will return the timeout for the software jobs.
2654 *
2655 * Return: The number of bytes output to buf.
2656 */
soft_job_timeout_show(struct device * dev,struct device_attribute * attr,char * const buf)2657 static ssize_t soft_job_timeout_show(struct device *dev,
2658 struct device_attribute *attr,
2659 char * const buf)
2660 {
2661 struct kbase_device *kbdev;
2662
2663 kbdev = to_kbase_device(dev);
2664 if (!kbdev)
2665 return -ENODEV;
2666
2667 return scnprintf(buf, PAGE_SIZE, "%i\n",
2668 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
2669 }
2670
2671 static DEVICE_ATTR_RW(soft_job_timeout);
2672
timeout_ms_to_ticks(struct kbase_device * kbdev,long timeout_ms,int default_ticks,u32 old_ticks)2673 static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
2674 int default_ticks, u32 old_ticks)
2675 {
2676 if (timeout_ms > 0) {
2677 u64 ticks = timeout_ms * 1000000ULL;
2678
2679 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2680 if (!ticks)
2681 return 1;
2682 return ticks;
2683 } else if (timeout_ms < 0) {
2684 return default_ticks;
2685 } else {
2686 return old_ticks;
2687 }
2688 }
2689
2690 /**
2691 * js_timeouts_store - Store callback for the js_timeouts sysfs file.
2692 *
2693 * @dev: The device with sysfs file is for
2694 * @attr: The attributes of the sysfs file
2695 * @buf: The value written to the sysfs file
2696 * @count: The number of bytes to write to the sysfs file
2697 *
2698 * This function is called to get the contents of the js_timeouts sysfs
2699 * file. This file contains five values separated by whitespace. The values
2700 * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
2701 * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
2702 * configuration values (in that order), with the difference that the js_timeout
2703 * values are expressed in MILLISECONDS.
2704 *
2705 * The js_timeouts sysfile file allows the current values in
2706 * use by the job scheduler to get override. Note that a value needs to
2707 * be other than 0 for it to override the current job scheduler value.
2708 *
2709 * Return: @count if the function succeeded. An error code on failure.
2710 */
js_timeouts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2711 static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2712 {
2713 struct kbase_device *kbdev;
2714 int items;
2715 long js_soft_stop_ms;
2716 long js_soft_stop_ms_cl;
2717 long js_hard_stop_ms_ss;
2718 long js_hard_stop_ms_cl;
2719 long js_hard_stop_ms_dumping;
2720 long js_reset_ms_ss;
2721 long js_reset_ms_cl;
2722 long js_reset_ms_dumping;
2723
2724 kbdev = to_kbase_device(dev);
2725 if (!kbdev)
2726 return -ENODEV;
2727
2728 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2729 &js_soft_stop_ms, &js_soft_stop_ms_cl,
2730 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2731 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2732 &js_reset_ms_cl, &js_reset_ms_dumping);
2733
2734 if (items == 8) {
2735 struct kbasep_js_device_data *js_data = &kbdev->js_data;
2736 unsigned long flags;
2737
2738 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2739
2740 #define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
2741 js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
2742 default, js_data->ticks_name); \
2743 dev_dbg(kbdev->dev, "Overriding " #ticks_name \
2744 " with %lu ticks (%lu ms)\n", \
2745 (unsigned long)js_data->ticks_name, \
2746 ms_name); \
2747 } while (0)
2748
2749 UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
2750 DEFAULT_JS_SOFT_STOP_TICKS);
2751 UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
2752 DEFAULT_JS_SOFT_STOP_TICKS_CL);
2753 UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
2754 DEFAULT_JS_HARD_STOP_TICKS_SS);
2755 UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
2756 DEFAULT_JS_HARD_STOP_TICKS_CL);
2757 UPDATE_TIMEOUT(hard_stop_ticks_dumping,
2758 js_hard_stop_ms_dumping,
2759 DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
2760 UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
2761 DEFAULT_JS_RESET_TICKS_SS);
2762 UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
2763 DEFAULT_JS_RESET_TICKS_CL);
2764 UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
2765 DEFAULT_JS_RESET_TICKS_DUMPING);
2766
2767 kbase_js_set_timeouts(kbdev);
2768
2769 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2770
2771 return count;
2772 }
2773
2774 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2775 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2776 "Write 0 for no change, -1 to restore default timeout\n");
2777 return -EINVAL;
2778 }
2779
get_js_timeout_in_ms(u32 scheduling_period_ns,u32 ticks)2780 static unsigned long get_js_timeout_in_ms(
2781 u32 scheduling_period_ns,
2782 u32 ticks)
2783 {
2784 u64 ms = (u64)ticks * scheduling_period_ns;
2785
2786 do_div(ms, 1000000UL);
2787 return ms;
2788 }
2789
2790 /**
2791 * js_timeouts_show - Show callback for the js_timeouts sysfs file.
2792 *
2793 * @dev: The device this sysfs file is for
2794 * @attr: The attributes of the sysfs file
2795 * @buf: The output buffer for the sysfs file contents
2796 *
2797 * This function is called to get the contents of the js_timeouts sysfs
2798 * file. It returns the last set values written to the js_timeouts sysfs file.
2799 * If the file didn't get written yet, the values will be current setting in
2800 * use.
2801 *
2802 * Return: The number of bytes output to @buf.
2803 */
js_timeouts_show(struct device * dev,struct device_attribute * attr,char * const buf)2804 static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *attr, char * const buf)
2805 {
2806 struct kbase_device *kbdev;
2807 ssize_t ret;
2808 unsigned long js_soft_stop_ms;
2809 unsigned long js_soft_stop_ms_cl;
2810 unsigned long js_hard_stop_ms_ss;
2811 unsigned long js_hard_stop_ms_cl;
2812 unsigned long js_hard_stop_ms_dumping;
2813 unsigned long js_reset_ms_ss;
2814 unsigned long js_reset_ms_cl;
2815 unsigned long js_reset_ms_dumping;
2816 u32 scheduling_period_ns;
2817
2818 kbdev = to_kbase_device(dev);
2819 if (!kbdev)
2820 return -ENODEV;
2821
2822 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2823
2824 #define GET_TIMEOUT(name) get_js_timeout_in_ms(\
2825 scheduling_period_ns, \
2826 kbdev->js_data.name)
2827
2828 js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
2829 js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
2830 js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
2831 js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
2832 js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
2833 js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
2834 js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
2835 js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
2836
2837 #undef GET_TIMEOUT
2838
2839 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2840 js_soft_stop_ms, js_soft_stop_ms_cl,
2841 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2842 js_hard_stop_ms_dumping, js_reset_ms_ss,
2843 js_reset_ms_cl, js_reset_ms_dumping);
2844
2845 if (ret >= PAGE_SIZE) {
2846 buf[PAGE_SIZE - 2] = '\n';
2847 buf[PAGE_SIZE - 1] = '\0';
2848 ret = PAGE_SIZE - 1;
2849 }
2850
2851 return ret;
2852 }
2853
2854 /*
2855 * The sysfs file js_timeouts.
2856 *
2857 * This is used to override the current job scheduler values for
2858 * JS_STOP_STOP_TICKS_SS
2859 * JS_STOP_STOP_TICKS_CL
2860 * JS_HARD_STOP_TICKS_SS
2861 * JS_HARD_STOP_TICKS_CL
2862 * JS_HARD_STOP_TICKS_DUMPING
2863 * JS_RESET_TICKS_SS
2864 * JS_RESET_TICKS_CL
2865 * JS_RESET_TICKS_DUMPING.
2866 */
2867 static DEVICE_ATTR_RW(js_timeouts);
2868
get_new_js_timeout(u32 old_period,u32 old_ticks,u32 new_scheduling_period_ns)2869 static u32 get_new_js_timeout(
2870 u32 old_period,
2871 u32 old_ticks,
2872 u32 new_scheduling_period_ns)
2873 {
2874 u64 ticks = (u64)old_period * (u64)old_ticks;
2875
2876 do_div(ticks, new_scheduling_period_ns);
2877 return ticks?ticks:1;
2878 }
2879
2880 /**
2881 * js_scheduling_period_store - Store callback for the js_scheduling_period sysfs
2882 * file
2883 * @dev: The device the sysfs file is for
2884 * @attr: The attributes of the sysfs file
2885 * @buf: The value written to the sysfs file
2886 * @count: The number of bytes to write to the sysfs file
2887 *
2888 * This function is called when the js_scheduling_period sysfs file is written
2889 * to. It checks the data written, and if valid updates the js_scheduling_period
2890 * value
2891 *
2892 * Return: @count if the function succeeded. An error code on failure.
2893 */
js_scheduling_period_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2894 static ssize_t js_scheduling_period_store(struct device *dev,
2895 struct device_attribute *attr, const char *buf, size_t count)
2896 {
2897 struct kbase_device *kbdev;
2898 int ret;
2899 unsigned int js_scheduling_period;
2900 u32 new_scheduling_period_ns;
2901 u32 old_period;
2902 struct kbasep_js_device_data *js_data;
2903 unsigned long flags;
2904
2905 kbdev = to_kbase_device(dev);
2906 if (!kbdev)
2907 return -ENODEV;
2908
2909 js_data = &kbdev->js_data;
2910
2911 ret = kstrtouint(buf, 0, &js_scheduling_period);
2912 if (ret || !js_scheduling_period) {
2913 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2914 "Use format <js_scheduling_period_ms>\n");
2915 return -EINVAL;
2916 }
2917
2918 new_scheduling_period_ns = js_scheduling_period * 1000000;
2919
2920 /* Update scheduling timeouts */
2921 mutex_lock(&js_data->runpool_mutex);
2922 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2923
2924 /* If no contexts have been scheduled since js_timeouts was last written
2925 * to, the new timeouts might not have been latched yet. So check if an
2926 * update is pending and use the new values if necessary.
2927 */
2928
2929 /* Use previous 'new' scheduling period as a base if present. */
2930 old_period = js_data->scheduling_period_ns;
2931
2932 #define SET_TIMEOUT(name) \
2933 (js_data->name = get_new_js_timeout(\
2934 old_period, \
2935 kbdev->js_data.name, \
2936 new_scheduling_period_ns))
2937
2938 SET_TIMEOUT(soft_stop_ticks);
2939 SET_TIMEOUT(soft_stop_ticks_cl);
2940 SET_TIMEOUT(hard_stop_ticks_ss);
2941 SET_TIMEOUT(hard_stop_ticks_cl);
2942 SET_TIMEOUT(hard_stop_ticks_dumping);
2943 SET_TIMEOUT(gpu_reset_ticks_ss);
2944 SET_TIMEOUT(gpu_reset_ticks_cl);
2945 SET_TIMEOUT(gpu_reset_ticks_dumping);
2946
2947 #undef SET_TIMEOUT
2948
2949 js_data->scheduling_period_ns = new_scheduling_period_ns;
2950
2951 kbase_js_set_timeouts(kbdev);
2952
2953 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2954 mutex_unlock(&js_data->runpool_mutex);
2955
2956 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2957 js_scheduling_period);
2958
2959 return count;
2960 }
2961
2962 /**
2963 * js_scheduling_period_show - Show callback for the js_scheduling_period sysfs
2964 * entry.
2965 * @dev: The device this sysfs file is for.
2966 * @attr: The attributes of the sysfs file.
2967 * @buf: The output buffer to receive the GPU information.
2968 *
2969 * This function is called to get the current period used for the JS scheduling
2970 * period.
2971 *
2972 * Return: The number of bytes output to @buf.
2973 */
js_scheduling_period_show(struct device * dev,struct device_attribute * attr,char * const buf)2974 static ssize_t js_scheduling_period_show(struct device *dev,
2975 struct device_attribute *attr, char * const buf)
2976 {
2977 struct kbase_device *kbdev;
2978 u32 period;
2979 ssize_t ret;
2980
2981 kbdev = to_kbase_device(dev);
2982 if (!kbdev)
2983 return -ENODEV;
2984
2985 period = kbdev->js_data.scheduling_period_ns;
2986
2987 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2988 period / 1000000);
2989
2990 return ret;
2991 }
2992
2993 static DEVICE_ATTR_RW(js_scheduling_period);
2994
2995
2996 #ifdef CONFIG_MALI_BIFROST_DEBUG
js_softstop_always_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2997 static ssize_t js_softstop_always_store(struct device *dev,
2998 struct device_attribute *attr, const char *buf, size_t count)
2999 {
3000 struct kbase_device *kbdev;
3001 int ret;
3002 int softstop_always;
3003
3004 kbdev = to_kbase_device(dev);
3005 if (!kbdev)
3006 return -ENODEV;
3007
3008 ret = kstrtoint(buf, 0, &softstop_always);
3009 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
3010 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
3011 "Use format <soft_stop_always>\n");
3012 return -EINVAL;
3013 }
3014
3015 kbdev->js_data.softstop_always = (bool) softstop_always;
3016 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
3017 (kbdev->js_data.softstop_always) ?
3018 "Enabled" : "Disabled");
3019 return count;
3020 }
3021
js_softstop_always_show(struct device * dev,struct device_attribute * attr,char * const buf)3022 static ssize_t js_softstop_always_show(struct device *dev,
3023 struct device_attribute *attr, char * const buf)
3024 {
3025 struct kbase_device *kbdev;
3026 ssize_t ret;
3027
3028 kbdev = to_kbase_device(dev);
3029 if (!kbdev)
3030 return -ENODEV;
3031
3032 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
3033
3034 if (ret >= PAGE_SIZE) {
3035 buf[PAGE_SIZE - 2] = '\n';
3036 buf[PAGE_SIZE - 1] = '\0';
3037 ret = PAGE_SIZE - 1;
3038 }
3039
3040 return ret;
3041 }
3042
3043 /*
3044 * By default, soft-stops are disabled when only a single context is present.
3045 * The ability to enable soft-stop when only a single context is present can be
3046 * used for debug and unit-testing purposes.
3047 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
3048 */
3049 static DEVICE_ATTR_RW(js_softstop_always);
3050 #endif /* CONFIG_MALI_BIFROST_DEBUG */
3051 #endif /* !MALI_USE_CSF */
3052
3053 #ifdef CONFIG_MALI_BIFROST_DEBUG
3054 typedef void kbasep_debug_command_func(struct kbase_device *);
3055
3056 enum kbasep_debug_command_code {
3057 KBASEP_DEBUG_COMMAND_DUMPTRACE,
3058
3059 /* This must be the last enum */
3060 KBASEP_DEBUG_COMMAND_COUNT
3061 };
3062
3063 struct kbasep_debug_command {
3064 char *str;
3065 kbasep_debug_command_func *func;
3066 };
3067
kbasep_ktrace_dump_wrapper(struct kbase_device * kbdev)3068 static void kbasep_ktrace_dump_wrapper(struct kbase_device *kbdev)
3069 {
3070 KBASE_KTRACE_DUMP(kbdev);
3071 }
3072
3073 /* Debug commands supported by the driver */
3074 static const struct kbasep_debug_command debug_commands[] = {
3075 {
3076 .str = "dumptrace",
3077 .func = &kbasep_ktrace_dump_wrapper,
3078 }
3079 };
3080
3081 /**
3082 * debug_command_show - Show callback for the debug_command sysfs file.
3083 *
3084 * @dev: The device this sysfs file is for
3085 * @attr: The attributes of the sysfs file
3086 * @buf: The output buffer for the sysfs file contents
3087 *
3088 * This function is called to get the contents of the debug_command sysfs
3089 * file. This is a list of the available debug commands, separated by newlines.
3090 *
3091 * Return: The number of bytes output to @buf.
3092 */
debug_command_show(struct device * dev,struct device_attribute * attr,char * const buf)3093 static ssize_t debug_command_show(struct device *dev, struct device_attribute *attr, char * const buf)
3094 {
3095 struct kbase_device *kbdev;
3096 int i;
3097 ssize_t ret = 0;
3098
3099 kbdev = to_kbase_device(dev);
3100
3101 if (!kbdev)
3102 return -ENODEV;
3103
3104 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
3105 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
3106
3107 if (ret >= PAGE_SIZE) {
3108 buf[PAGE_SIZE - 2] = '\n';
3109 buf[PAGE_SIZE - 1] = '\0';
3110 ret = PAGE_SIZE - 1;
3111 }
3112
3113 return ret;
3114 }
3115
3116 /**
3117 * debug_command_store - Store callback for the debug_command sysfs file.
3118 *
3119 * @dev: The device with sysfs file is for
3120 * @attr: The attributes of the sysfs file
3121 * @buf: The value written to the sysfs file
3122 * @count: The number of bytes written to the sysfs file
3123 *
3124 * This function is called when the debug_command sysfs file is written to.
3125 * It matches the requested command against the available commands, and if
3126 * a matching command is found calls the associated function from
3127 * @debug_commands to issue the command.
3128 *
3129 * Return: @count if the function succeeded. An error code on failure.
3130 */
debug_command_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3131 static ssize_t debug_command_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3132 {
3133 struct kbase_device *kbdev;
3134 int i;
3135
3136 kbdev = to_kbase_device(dev);
3137
3138 if (!kbdev)
3139 return -ENODEV;
3140
3141 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
3142 if (sysfs_streq(debug_commands[i].str, buf)) {
3143 debug_commands[i].func(kbdev);
3144 return count;
3145 }
3146 }
3147
3148 /* Debug Command not found */
3149 dev_err(dev, "debug_command: command not known\n");
3150 return -EINVAL;
3151 }
3152
3153 /* The sysfs file debug_command.
3154 *
3155 * This is used to issue general debug commands to the device driver.
3156 * Reading it will produce a list of debug commands, separated by newlines.
3157 * Writing to it with one of those commands will issue said command.
3158 */
3159 static DEVICE_ATTR_RW(debug_command);
3160 #endif /* CONFIG_MALI_BIFROST_DEBUG */
3161
3162 /**
3163 * gpuinfo_show - Show callback for the gpuinfo sysfs entry.
3164 * @dev: The device this sysfs file is for.
3165 * @attr: The attributes of the sysfs file.
3166 * @buf: The output buffer to receive the GPU information.
3167 *
3168 * This function is called to get a description of the present Mali
3169 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
3170 * number of cores, the hardware version and the raw product id. For
3171 * example
3172 *
3173 * Mali-T60x MP4 r0p0 0x6956
3174 *
3175 * Return: The number of bytes output to @buf.
3176 */
gpuinfo_show(struct device * dev,struct device_attribute * attr,char * buf)3177 static ssize_t gpuinfo_show(struct device *dev,
3178 struct device_attribute *attr, char *buf)
3179 {
3180 static const struct gpu_product_id_name {
3181 unsigned int id;
3182 char *name;
3183 } gpu_product_id_names[] = {
3184 { .id = GPU_ID2_PRODUCT_TMIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3185 .name = "Mali-G71" },
3186 { .id = GPU_ID2_PRODUCT_THEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3187 .name = "Mali-G72" },
3188 { .id = GPU_ID2_PRODUCT_TSIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3189 .name = "Mali-G51" },
3190 { .id = GPU_ID2_PRODUCT_TNOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3191 .name = "Mali-G76" },
3192 { .id = GPU_ID2_PRODUCT_TDVX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3193 .name = "Mali-G31" },
3194 { .id = GPU_ID2_PRODUCT_TGOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3195 .name = "Mali-G52" },
3196 { .id = GPU_ID2_PRODUCT_TTRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3197 .name = "Mali-G77" },
3198 { .id = GPU_ID2_PRODUCT_TBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3199 .name = "Mali-G78" },
3200 { .id = GPU_ID2_PRODUCT_TBAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3201 .name = "Mali-G78AE" },
3202 { .id = GPU_ID2_PRODUCT_LBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3203 .name = "Mali-G68" },
3204 { .id = GPU_ID2_PRODUCT_TNAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3205 .name = "Mali-G57" },
3206 { .id = GPU_ID2_PRODUCT_TODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3207 .name = "Mali-G710" },
3208 { .id = GPU_ID2_PRODUCT_LODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3209 .name = "Mali-G610" },
3210 { .id = GPU_ID2_PRODUCT_TGRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3211 .name = "Mali-G510" },
3212 { .id = GPU_ID2_PRODUCT_TVAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3213 .name = "Mali-G310" },
3214 { .id = GPU_ID2_PRODUCT_TTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3215 .name = "Mali-TTIX" },
3216 { .id = GPU_ID2_PRODUCT_LTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3217 .name = "Mali-LTIX" },
3218 };
3219 const char *product_name = "(Unknown Mali GPU)";
3220 struct kbase_device *kbdev;
3221 u32 gpu_id;
3222 unsigned int product_id, product_id_mask;
3223 unsigned int i;
3224 struct kbase_gpu_props *gpu_props;
3225
3226 kbdev = to_kbase_device(dev);
3227 if (!kbdev)
3228 return -ENODEV;
3229
3230 gpu_props = &kbdev->gpu_props;
3231 gpu_id = gpu_props->props.raw_props.gpu_id;
3232 product_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3233 product_id_mask = GPU_ID2_PRODUCT_MODEL >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3234
3235 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
3236 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
3237
3238 if ((p->id & product_id_mask) ==
3239 (product_id & product_id_mask)) {
3240 product_name = p->name;
3241 break;
3242 }
3243 }
3244
3245 #if MALI_USE_CSF
3246 if ((product_id & product_id_mask) ==
3247 ((GPU_ID2_PRODUCT_TTUX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) & product_id_mask)) {
3248 const bool rt_supported =
3249 GPU_FEATURES_RAY_TRACING_GET(gpu_props->props.raw_props.gpu_features);
3250 const u8 nr_cores = gpu_props->num_cores;
3251
3252 /* Mali-G715-Immortalis if 10 < number of cores with ray tracing supproted.
3253 * Mali-G715 if 10 < number of cores without ray tracing supported.
3254 * Mali-G715 if 7 <= number of cores <= 10 regardless ray tracing.
3255 * Mali-G615 if number of cores < 7.
3256 */
3257 if ((nr_cores > 10) && rt_supported)
3258 product_name = "Mali-G715-Immortalis";
3259 else if (nr_cores >= 7)
3260 product_name = "Mali-G715";
3261
3262 if (nr_cores < 7) {
3263 dev_warn(kbdev->dev, "nr_cores(%u) GPU ID must be G615", nr_cores);
3264 product_name = "Mali-G615";
3265 } else
3266 dev_dbg(kbdev->dev, "GPU ID_Name: %s, nr_cores(%u)\n", product_name,
3267 nr_cores);
3268 }
3269 #endif /* MALI_USE_CSF */
3270
3271 return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n", product_name,
3272 kbdev->gpu_props.num_cores,
3273 (gpu_id & GPU_ID_VERSION_MAJOR) >> KBASE_GPU_ID_VERSION_MAJOR_SHIFT,
3274 (gpu_id & GPU_ID_VERSION_MINOR) >> KBASE_GPU_ID_VERSION_MINOR_SHIFT,
3275 product_id);
3276 }
3277 static DEVICE_ATTR_RO(gpuinfo);
3278
3279 /**
3280 * dvfs_period_store - Store callback for the dvfs_period sysfs file.
3281 * @dev: The device with sysfs file is for
3282 * @attr: The attributes of the sysfs file
3283 * @buf: The value written to the sysfs file
3284 * @count: The number of bytes written to the sysfs file
3285 *
3286 * This function is called when the dvfs_period sysfs file is written to. It
3287 * checks the data written, and if valid updates the DVFS period variable,
3288 *
3289 * Return: @count if the function succeeded. An error code on failure.
3290 */
dvfs_period_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3291 static ssize_t dvfs_period_store(struct device *dev,
3292 struct device_attribute *attr, const char *buf, size_t count)
3293 {
3294 struct kbase_device *kbdev;
3295 int ret;
3296 int dvfs_period;
3297
3298 kbdev = to_kbase_device(dev);
3299 if (!kbdev)
3300 return -ENODEV;
3301
3302 ret = kstrtoint(buf, 0, &dvfs_period);
3303 if (ret || dvfs_period <= 0) {
3304 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
3305 "Use format <dvfs_period_ms>\n");
3306 return -EINVAL;
3307 }
3308
3309 kbdev->pm.dvfs_period = dvfs_period;
3310 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
3311
3312 return count;
3313 }
3314
3315 /**
3316 * dvfs_period_show - Show callback for the dvfs_period sysfs entry.
3317 * @dev: The device this sysfs file is for.
3318 * @attr: The attributes of the sysfs file.
3319 * @buf: The output buffer to receive the GPU information.
3320 *
3321 * This function is called to get the current period used for the DVFS sample
3322 * timer.
3323 *
3324 * Return: The number of bytes output to @buf.
3325 */
dvfs_period_show(struct device * dev,struct device_attribute * attr,char * const buf)3326 static ssize_t dvfs_period_show(struct device *dev,
3327 struct device_attribute *attr, char * const buf)
3328 {
3329 struct kbase_device *kbdev;
3330 ssize_t ret;
3331
3332 kbdev = to_kbase_device(dev);
3333 if (!kbdev)
3334 return -ENODEV;
3335
3336 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
3337
3338 return ret;
3339 }
3340
3341 static DEVICE_ATTR_RW(dvfs_period);
3342
kbase_pm_lowest_gpu_freq_init(struct kbase_device * kbdev)3343 int kbase_pm_lowest_gpu_freq_init(struct kbase_device *kbdev)
3344 {
3345 /* Uses default reference frequency defined in below macro */
3346 u64 lowest_freq_khz = DEFAULT_REF_TIMEOUT_FREQ_KHZ;
3347
3348 /* Only check lowest frequency in cases when OPPs are used and
3349 * present in the device tree.
3350 */
3351 #ifdef CONFIG_PM_OPP
3352 struct dev_pm_opp *opp_ptr;
3353 unsigned long found_freq = 0;
3354
3355 /* find lowest frequency OPP */
3356 opp_ptr = dev_pm_opp_find_freq_ceil(kbdev->dev, &found_freq);
3357 if (IS_ERR(opp_ptr)) {
3358 dev_err(kbdev->dev, "No OPPs found in device tree! Scaling timeouts using %llu kHz",
3359 (unsigned long long)lowest_freq_khz);
3360 } else {
3361 #if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
3362 dev_pm_opp_put(opp_ptr); /* decrease OPP refcount */
3363 #endif
3364 /* convert found frequency to KHz */
3365 found_freq /= 1000;
3366
3367 /* If lowest frequency in OPP table is still higher
3368 * than the reference, then keep the reference frequency
3369 * as the one to use for scaling .
3370 */
3371 if (found_freq < lowest_freq_khz)
3372 lowest_freq_khz = found_freq;
3373 }
3374 #else
3375 dev_err(kbdev->dev, "No operating-points-v2 node or operating-points property in DT");
3376 #endif
3377
3378 kbdev->lowest_gpu_freq_khz = lowest_freq_khz;
3379 dev_dbg(kbdev->dev, "Lowest frequency identified is %llu kHz", kbdev->lowest_gpu_freq_khz);
3380 return 0;
3381 }
3382
3383 /**
3384 * pm_poweroff_store - Store callback for the pm_poweroff sysfs file.
3385 * @dev: The device with sysfs file is for
3386 * @attr: The attributes of the sysfs file
3387 * @buf: The value written to the sysfs file
3388 * @count: The number of bytes written to the sysfs file
3389 *
3390 * This function is called when the pm_poweroff sysfs file is written to.
3391 *
3392 * This file contains three values separated by whitespace. The values
3393 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
3394 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
3395 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
3396 * ticks before the GPU is powered off), in that order.
3397 *
3398 * Return: @count if the function succeeded. An error code on failure.
3399 */
pm_poweroff_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3400 static ssize_t pm_poweroff_store(struct device *dev,
3401 struct device_attribute *attr, const char *buf, size_t count)
3402 {
3403 struct kbase_device *kbdev;
3404 struct kbasep_pm_tick_timer_state *stt;
3405 int items;
3406 u64 gpu_poweroff_time;
3407 unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
3408 unsigned long flags;
3409
3410 kbdev = to_kbase_device(dev);
3411 if (!kbdev)
3412 return -ENODEV;
3413
3414 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
3415 &poweroff_shader_ticks,
3416 &poweroff_gpu_ticks);
3417 if (items != 3) {
3418 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
3419 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
3420 return -EINVAL;
3421 }
3422
3423 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3424
3425 stt = &kbdev->pm.backend.shader_tick_timer;
3426 stt->configured_interval = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
3427 stt->default_ticks = poweroff_shader_ticks;
3428 stt->configured_ticks = stt->default_ticks;
3429
3430 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3431
3432 if (poweroff_gpu_ticks != 0)
3433 dev_warn(kbdev->dev, "Separate GPU poweroff delay no longer supported.\n");
3434
3435 return count;
3436 }
3437
3438 /**
3439 * pm_poweroff_show - Show callback for the pm_poweroff sysfs entry.
3440 * @dev: The device this sysfs file is for.
3441 * @attr: The attributes of the sysfs file.
3442 * @buf: The output buffer to receive the GPU information.
3443 *
3444 * This function is called to get the current period used for the DVFS sample
3445 * timer.
3446 *
3447 * Return: The number of bytes output to @buf.
3448 */
pm_poweroff_show(struct device * dev,struct device_attribute * attr,char * const buf)3449 static ssize_t pm_poweroff_show(struct device *dev,
3450 struct device_attribute *attr, char * const buf)
3451 {
3452 struct kbase_device *kbdev;
3453 struct kbasep_pm_tick_timer_state *stt;
3454 ssize_t ret;
3455 unsigned long flags;
3456
3457 kbdev = to_kbase_device(dev);
3458 if (!kbdev)
3459 return -ENODEV;
3460
3461 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3462
3463 stt = &kbdev->pm.backend.shader_tick_timer;
3464 ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
3465 ktime_to_ns(stt->configured_interval),
3466 stt->default_ticks);
3467
3468 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3469
3470 return ret;
3471 }
3472
3473 static DEVICE_ATTR_RW(pm_poweroff);
3474
3475 /**
3476 * reset_timeout_store - Store callback for the reset_timeout sysfs file.
3477 * @dev: The device with sysfs file is for
3478 * @attr: The attributes of the sysfs file
3479 * @buf: The value written to the sysfs file
3480 * @count: The number of bytes written to the sysfs file
3481 *
3482 * This function is called when the reset_timeout sysfs file is written to. It
3483 * checks the data written, and if valid updates the reset timeout.
3484 *
3485 * Return: @count if the function succeeded. An error code on failure.
3486 */
reset_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3487 static ssize_t reset_timeout_store(struct device *dev,
3488 struct device_attribute *attr, const char *buf, size_t count)
3489 {
3490 struct kbase_device *kbdev;
3491 int ret;
3492 int reset_timeout;
3493
3494 kbdev = to_kbase_device(dev);
3495 if (!kbdev)
3496 return -ENODEV;
3497
3498 ret = kstrtoint(buf, 0, &reset_timeout);
3499 if (ret || reset_timeout <= 0) {
3500 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
3501 "Use format <reset_timeout_ms>\n");
3502 return -EINVAL;
3503 }
3504
3505 kbdev->reset_timeout_ms = reset_timeout;
3506 dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
3507
3508 return count;
3509 }
3510
3511 /**
3512 * reset_timeout_show - Show callback for the reset_timeout sysfs entry.
3513 * @dev: The device this sysfs file is for.
3514 * @attr: The attributes of the sysfs file.
3515 * @buf: The output buffer to receive the GPU information.
3516 *
3517 * This function is called to get the current reset timeout.
3518 *
3519 * Return: The number of bytes output to @buf.
3520 */
reset_timeout_show(struct device * dev,struct device_attribute * attr,char * const buf)3521 static ssize_t reset_timeout_show(struct device *dev,
3522 struct device_attribute *attr, char * const buf)
3523 {
3524 struct kbase_device *kbdev;
3525 ssize_t ret;
3526
3527 kbdev = to_kbase_device(dev);
3528 if (!kbdev)
3529 return -ENODEV;
3530
3531 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
3532
3533 return ret;
3534 }
3535
3536 static DEVICE_ATTR_RW(reset_timeout);
3537
mem_pool_size_show(struct device * dev,struct device_attribute * attr,char * const buf)3538 static ssize_t mem_pool_size_show(struct device *dev,
3539 struct device_attribute *attr, char * const buf)
3540 {
3541 struct kbase_device *const kbdev = to_kbase_device(dev);
3542
3543 if (!kbdev)
3544 return -ENODEV;
3545
3546 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3547 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3548 kbase_mem_pool_debugfs_size);
3549 }
3550
mem_pool_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3551 static ssize_t mem_pool_size_store(struct device *dev,
3552 struct device_attribute *attr, const char *buf, size_t count)
3553 {
3554 struct kbase_device *const kbdev = to_kbase_device(dev);
3555 int err;
3556
3557 if (!kbdev)
3558 return -ENODEV;
3559
3560 err = kbase_debugfs_helper_set_attr_from_string(buf,
3561 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3562 kbase_mem_pool_debugfs_trim);
3563
3564 return err ? err : count;
3565 }
3566
3567 static DEVICE_ATTR_RW(mem_pool_size);
3568
mem_pool_max_size_show(struct device * dev,struct device_attribute * attr,char * const buf)3569 static ssize_t mem_pool_max_size_show(struct device *dev,
3570 struct device_attribute *attr, char * const buf)
3571 {
3572 struct kbase_device *const kbdev = to_kbase_device(dev);
3573
3574 if (!kbdev)
3575 return -ENODEV;
3576
3577 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3578 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3579 kbase_mem_pool_debugfs_max_size);
3580 }
3581
mem_pool_max_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3582 static ssize_t mem_pool_max_size_store(struct device *dev,
3583 struct device_attribute *attr, const char *buf, size_t count)
3584 {
3585 struct kbase_device *const kbdev = to_kbase_device(dev);
3586 int err;
3587
3588 if (!kbdev)
3589 return -ENODEV;
3590
3591 err = kbase_debugfs_helper_set_attr_from_string(buf,
3592 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3593 kbase_mem_pool_debugfs_set_max_size);
3594
3595 return err ? err : count;
3596 }
3597
3598 static DEVICE_ATTR_RW(mem_pool_max_size);
3599
3600 /**
3601 * lp_mem_pool_size_show - Show size of the large memory pages pool.
3602 * @dev: The device this sysfs file is for.
3603 * @attr: The attributes of the sysfs file.
3604 * @buf: The output buffer to receive the pool size.
3605 *
3606 * This function is called to get the number of large memory pages which currently populate the kbdev pool.
3607 *
3608 * Return: The number of bytes output to @buf.
3609 */
lp_mem_pool_size_show(struct device * dev,struct device_attribute * attr,char * const buf)3610 static ssize_t lp_mem_pool_size_show(struct device *dev,
3611 struct device_attribute *attr, char * const buf)
3612 {
3613 struct kbase_device *const kbdev = to_kbase_device(dev);
3614
3615 if (!kbdev)
3616 return -ENODEV;
3617
3618 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3619 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3620 kbase_mem_pool_debugfs_size);
3621 }
3622
3623 /**
3624 * lp_mem_pool_size_store - Set size of the large memory pages pool.
3625 * @dev: The device this sysfs file is for.
3626 * @attr: The attributes of the sysfs file.
3627 * @buf: The value written to the sysfs file.
3628 * @count: The number of bytes written to the sysfs file.
3629 *
3630 * This function is called to set the number of large memory pages which should populate the kbdev pool.
3631 * This may cause existing pages to be removed from the pool, or new pages to be created and then added to the pool.
3632 *
3633 * Return: @count if the function succeeded. An error code on failure.
3634 */
lp_mem_pool_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3635 static ssize_t lp_mem_pool_size_store(struct device *dev,
3636 struct device_attribute *attr, const char *buf, size_t count)
3637 {
3638 struct kbase_device *const kbdev = to_kbase_device(dev);
3639 int err;
3640
3641 if (!kbdev)
3642 return -ENODEV;
3643
3644 err = kbase_debugfs_helper_set_attr_from_string(buf,
3645 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3646 kbase_mem_pool_debugfs_trim);
3647
3648 return err ? err : count;
3649 }
3650
3651 static DEVICE_ATTR_RW(lp_mem_pool_size);
3652
3653 /**
3654 * lp_mem_pool_max_size_show - Show maximum size of the large memory pages pool.
3655 * @dev: The device this sysfs file is for.
3656 * @attr: The attributes of the sysfs file.
3657 * @buf: The output buffer to receive the pool size.
3658 *
3659 * This function is called to get the maximum number of large memory pages that the kbdev pool can possibly contain.
3660 *
3661 * Return: The number of bytes output to @buf.
3662 */
lp_mem_pool_max_size_show(struct device * dev,struct device_attribute * attr,char * const buf)3663 static ssize_t lp_mem_pool_max_size_show(struct device *dev,
3664 struct device_attribute *attr, char * const buf)
3665 {
3666 struct kbase_device *const kbdev = to_kbase_device(dev);
3667
3668 if (!kbdev)
3669 return -ENODEV;
3670
3671 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3672 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3673 kbase_mem_pool_debugfs_max_size);
3674 }
3675
3676 /**
3677 * lp_mem_pool_max_size_store - Set maximum size of the large memory pages pool.
3678 * @dev: The device this sysfs file is for.
3679 * @attr: The attributes of the sysfs file.
3680 * @buf: The value written to the sysfs file.
3681 * @count: The number of bytes written to the sysfs file.
3682 *
3683 * This function is called to set the maximum number of large memory pages that the kbdev pool can possibly contain.
3684 *
3685 * Return: @count if the function succeeded. An error code on failure.
3686 */
lp_mem_pool_max_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3687 static ssize_t lp_mem_pool_max_size_store(struct device *dev,
3688 struct device_attribute *attr, const char *buf, size_t count)
3689 {
3690 struct kbase_device *const kbdev = to_kbase_device(dev);
3691 int err;
3692
3693 if (!kbdev)
3694 return -ENODEV;
3695
3696 err = kbase_debugfs_helper_set_attr_from_string(buf,
3697 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3698 kbase_mem_pool_debugfs_set_max_size);
3699
3700 return err ? err : count;
3701 }
3702
3703 static DEVICE_ATTR_RW(lp_mem_pool_max_size);
3704
3705 /**
3706 * show_simplified_mem_pool_max_size - Show the maximum size for the memory
3707 * pool 0 of small (4KiB) pages.
3708 * @dev: The device this sysfs file is for.
3709 * @attr: The attributes of the sysfs file.
3710 * @buf: The output buffer to receive the max size.
3711 *
3712 * This function is called to get the maximum size for the memory pool 0 of
3713 * small (4KiB) pages. It is assumed that the maximum size value is same for
3714 * all the pools.
3715 *
3716 * Return: The number of bytes output to @buf.
3717 */
show_simplified_mem_pool_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3718 static ssize_t show_simplified_mem_pool_max_size(struct device *dev,
3719 struct device_attribute *attr, char * const buf)
3720 {
3721 struct kbase_device *const kbdev = to_kbase_device(dev);
3722
3723 if (!kbdev)
3724 return -ENODEV;
3725
3726 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3727 kbdev->mem_pools.small, 1, kbase_mem_pool_debugfs_max_size);
3728 }
3729
3730 /**
3731 * set_simplified_mem_pool_max_size - Set the same maximum size for all the
3732 * memory pools of small (4KiB) pages.
3733 * @dev: The device with sysfs file is for
3734 * @attr: The attributes of the sysfs file
3735 * @buf: The value written to the sysfs file
3736 * @count: The number of bytes written to the sysfs file
3737 *
3738 * This function is called to set the same maximum size for all the memory
3739 * pools of small (4KiB) pages.
3740 *
3741 * Return: The number of bytes output to @buf.
3742 */
set_simplified_mem_pool_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3743 static ssize_t set_simplified_mem_pool_max_size(struct device *dev,
3744 struct device_attribute *attr, const char *buf, size_t count)
3745 {
3746 struct kbase_device *const kbdev = to_kbase_device(dev);
3747 unsigned long new_size;
3748 int gid;
3749 int err;
3750
3751 if (!kbdev)
3752 return -ENODEV;
3753
3754 err = kstrtoul(buf, 0, &new_size);
3755 if (err)
3756 return -EINVAL;
3757
3758 for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
3759 kbase_mem_pool_debugfs_set_max_size(
3760 kbdev->mem_pools.small, gid, (size_t)new_size);
3761
3762 return count;
3763 }
3764
3765 static DEVICE_ATTR(max_size, 0600, show_simplified_mem_pool_max_size,
3766 set_simplified_mem_pool_max_size);
3767
3768 /**
3769 * show_simplified_lp_mem_pool_max_size - Show the maximum size for the memory
3770 * pool 0 of large (2MiB) pages.
3771 * @dev: The device this sysfs file is for.
3772 * @attr: The attributes of the sysfs file.
3773 * @buf: The output buffer to receive the total current pool size.
3774 *
3775 * This function is called to get the maximum size for the memory pool 0 of
3776 * large (2MiB) pages. It is assumed that the maximum size value is same for
3777 * all the pools.
3778 *
3779 * Return: The number of bytes output to @buf.
3780 */
show_simplified_lp_mem_pool_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3781 static ssize_t show_simplified_lp_mem_pool_max_size(struct device *dev,
3782 struct device_attribute *attr, char * const buf)
3783 {
3784 struct kbase_device *const kbdev = to_kbase_device(dev);
3785
3786 if (!kbdev)
3787 return -ENODEV;
3788
3789 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3790 kbdev->mem_pools.large, 1, kbase_mem_pool_debugfs_max_size);
3791 }
3792
3793 /**
3794 * set_simplified_lp_mem_pool_max_size - Set the same maximum size for all the
3795 * memory pools of large (2MiB) pages.
3796 * @dev: The device with sysfs file is for
3797 * @attr: The attributes of the sysfs file
3798 * @buf: The value written to the sysfs file
3799 * @count: The number of bytes written to the sysfs file
3800 *
3801 * This function is called to set the same maximum size for all the memory
3802 * pools of large (2MiB) pages.
3803 *
3804 * Return: The number of bytes output to @buf.
3805 */
set_simplified_lp_mem_pool_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3806 static ssize_t set_simplified_lp_mem_pool_max_size(struct device *dev,
3807 struct device_attribute *attr, const char *buf, size_t count)
3808 {
3809 struct kbase_device *const kbdev = to_kbase_device(dev);
3810 unsigned long new_size;
3811 int gid;
3812 int err;
3813
3814 if (!kbdev)
3815 return -ENODEV;
3816
3817 err = kstrtoul(buf, 0, &new_size);
3818 if (err)
3819 return -EINVAL;
3820
3821 for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
3822 kbase_mem_pool_debugfs_set_max_size(
3823 kbdev->mem_pools.large, gid, (size_t)new_size);
3824
3825 return count;
3826 }
3827
3828 static DEVICE_ATTR(lp_max_size, 0600, show_simplified_lp_mem_pool_max_size,
3829 set_simplified_lp_mem_pool_max_size);
3830
3831 /**
3832 * show_simplified_ctx_default_max_size - Show the default maximum size for the
3833 * memory pool 0 of small (4KiB) pages.
3834 * @dev: The device this sysfs file is for.
3835 * @attr: The attributes of the sysfs file.
3836 * @buf: The output buffer to receive the pool size.
3837 *
3838 * This function is called to get the default ctx maximum size for the memory
3839 * pool 0 of small (4KiB) pages. It is assumed that maximum size value is same
3840 * for all the pools. The maximum size for the pool of large (2MiB) pages will
3841 * be same as max size of the pool of small (4KiB) pages in terms of bytes.
3842 *
3843 * Return: The number of bytes output to @buf.
3844 */
show_simplified_ctx_default_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3845 static ssize_t show_simplified_ctx_default_max_size(struct device *dev,
3846 struct device_attribute *attr, char * const buf)
3847 {
3848 struct kbase_device *kbdev = to_kbase_device(dev);
3849 size_t max_size;
3850
3851 if (!kbdev)
3852 return -ENODEV;
3853
3854 max_size = kbase_mem_pool_config_debugfs_max_size(
3855 kbdev->mem_pool_defaults.small, 0);
3856
3857 return scnprintf(buf, PAGE_SIZE, "%zu\n", max_size);
3858 }
3859
3860 /**
3861 * set_simplified_ctx_default_max_size - Set the same default maximum size for
3862 * all the pools created for new
3863 * contexts. This covers the pool of
3864 * large pages as well and its max size
3865 * will be same as max size of the pool
3866 * of small pages in terms of bytes.
3867 * @dev: The device this sysfs file is for.
3868 * @attr: The attributes of the sysfs file.
3869 * @buf: The value written to the sysfs file.
3870 * @count: The number of bytes written to the sysfs file.
3871 *
3872 * This function is called to set the same maximum size for all pools created
3873 * for new contexts.
3874 *
3875 * Return: @count if the function succeeded. An error code on failure.
3876 */
set_simplified_ctx_default_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3877 static ssize_t set_simplified_ctx_default_max_size(struct device *dev,
3878 struct device_attribute *attr, const char *buf, size_t count)
3879 {
3880 struct kbase_device *kbdev;
3881 unsigned long new_size;
3882 int err;
3883
3884 kbdev = to_kbase_device(dev);
3885 if (!kbdev)
3886 return -ENODEV;
3887
3888 err = kstrtoul(buf, 0, &new_size);
3889 if (err)
3890 return -EINVAL;
3891
3892 kbase_mem_pool_group_config_set_max_size(
3893 &kbdev->mem_pool_defaults, (size_t)new_size);
3894
3895 return count;
3896 }
3897
3898 static DEVICE_ATTR(ctx_default_max_size, 0600,
3899 show_simplified_ctx_default_max_size,
3900 set_simplified_ctx_default_max_size);
3901
3902 #if !MALI_USE_CSF
3903 /**
3904 * js_ctx_scheduling_mode_show - Show callback for js_ctx_scheduling_mode sysfs
3905 * entry.
3906 * @dev: The device this sysfs file is for.
3907 * @attr: The attributes of the sysfs file.
3908 * @buf: The output buffer to receive the context scheduling mode information.
3909 *
3910 * This function is called to get the context scheduling mode being used by JS.
3911 *
3912 * Return: The number of bytes output to @buf.
3913 */
js_ctx_scheduling_mode_show(struct device * dev,struct device_attribute * attr,char * const buf)3914 static ssize_t js_ctx_scheduling_mode_show(struct device *dev,
3915 struct device_attribute *attr, char * const buf)
3916 {
3917 struct kbase_device *kbdev;
3918
3919 kbdev = to_kbase_device(dev);
3920 if (!kbdev)
3921 return -ENODEV;
3922
3923 return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
3924 }
3925
3926 /**
3927 * js_ctx_scheduling_mode_store - Set callback for js_ctx_scheduling_mode sysfs
3928 * entry.
3929 * @dev: The device this sysfs file is for.
3930 * @attr: The attributes of the sysfs file.
3931 * @buf: The value written to the sysfs file.
3932 * @count: The number of bytes written to the sysfs file.
3933 *
3934 * This function is called when the js_ctx_scheduling_mode sysfs file is written
3935 * to. It checks the data written, and if valid updates the ctx scheduling mode
3936 * being by JS.
3937 *
3938 * Return: @count if the function succeeded. An error code on failure.
3939 */
js_ctx_scheduling_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3940 static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
3941 struct device_attribute *attr, const char *buf, size_t count)
3942 {
3943 struct kbase_context *kctx;
3944 u32 new_js_ctx_scheduling_mode;
3945 struct kbase_device *kbdev;
3946 unsigned long flags;
3947 int ret;
3948
3949 kbdev = to_kbase_device(dev);
3950 if (!kbdev)
3951 return -ENODEV;
3952
3953 ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
3954 if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
3955 dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
3956 " write operation.\n"
3957 "Use format <js_ctx_scheduling_mode>\n");
3958 return -EINVAL;
3959 }
3960
3961 if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
3962 return count;
3963
3964 mutex_lock(&kbdev->kctx_list_lock);
3965 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3966
3967 /* Update the context priority mode */
3968 kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
3969
3970 /* Adjust priority of all the contexts as per the new mode */
3971 list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link)
3972 kbase_js_update_ctx_priority(kctx);
3973
3974 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3975 mutex_unlock(&kbdev->kctx_list_lock);
3976
3977 dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
3978
3979 return count;
3980 }
3981
3982 static DEVICE_ATTR_RW(js_ctx_scheduling_mode);
3983
3984 /* Number of entries in serialize_jobs_settings[] */
3985 #define NR_SERIALIZE_JOBS_SETTINGS 5
3986 /* Maximum string length in serialize_jobs_settings[].name */
3987 #define MAX_SERIALIZE_JOBS_NAME_LEN 16
3988
3989 static struct
3990 {
3991 char *name;
3992 u8 setting;
3993 } serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
3994 {"none", 0},
3995 {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
3996 {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
3997 {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
3998 {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
3999 KBASE_SERIALIZE_RESET}
4000 };
4001
4002 /**
4003 * update_serialize_jobs_setting - Update the serialization setting for the
4004 * submission of GPU jobs.
4005 *
4006 * @kbdev: An instance of the GPU platform device, allocated from the probe
4007 * method of the driver.
4008 * @buf: Buffer containing the value written to the sysfs/debugfs file.
4009 * @count: The number of bytes to write to the sysfs/debugfs file.
4010 *
4011 * This function is called when the serialize_jobs sysfs/debugfs file is
4012 * written to. It matches the requested setting against the available settings
4013 * and if a matching setting is found updates kbdev->serialize_jobs.
4014 *
4015 * Return: @count if the function succeeded. An error code on failure.
4016 */
update_serialize_jobs_setting(struct kbase_device * kbdev,const char * buf,size_t count)4017 static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
4018 const char *buf, size_t count)
4019 {
4020 int i;
4021 bool valid = false;
4022
4023 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4024 if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
4025 kbdev->serialize_jobs =
4026 serialize_jobs_settings[i].setting;
4027 valid = true;
4028 break;
4029 }
4030 }
4031
4032 if (!valid) {
4033 dev_err(kbdev->dev, "serialize_jobs: invalid setting");
4034 return -EINVAL;
4035 }
4036
4037 return count;
4038 }
4039
4040 #if IS_ENABLED(CONFIG_DEBUG_FS)
4041 /**
4042 * kbasep_serialize_jobs_seq_debugfs_show - Show callback for the serialize_jobs
4043 * debugfs file
4044 * @sfile: seq_file pointer
4045 * @data: Private callback data
4046 *
4047 * This function is called to get the contents of the serialize_jobs debugfs
4048 * file. This is a list of the available settings with the currently active one
4049 * surrounded by square brackets.
4050 *
4051 * Return: 0 on success, or an error code on error
4052 */
kbasep_serialize_jobs_seq_debugfs_show(struct seq_file * sfile,void * data)4053 static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
4054 void *data)
4055 {
4056 struct kbase_device *kbdev = sfile->private;
4057 int i;
4058
4059 CSTD_UNUSED(data);
4060
4061 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4062 if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
4063 seq_printf(sfile, "[%s] ",
4064 serialize_jobs_settings[i].name);
4065 else
4066 seq_printf(sfile, "%s ",
4067 serialize_jobs_settings[i].name);
4068 }
4069
4070 seq_puts(sfile, "\n");
4071
4072 return 0;
4073 }
4074
4075 /**
4076 * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
4077 * debugfs file.
4078 * @file: File pointer
4079 * @ubuf: User buffer containing data to store
4080 * @count: Number of bytes in user buffer
4081 * @ppos: File position
4082 *
4083 * This function is called when the serialize_jobs debugfs file is written to.
4084 * It matches the requested setting against the available settings and if a
4085 * matching setting is found updates kbdev->serialize_jobs.
4086 *
4087 * Return: @count if the function succeeded. An error code on failure.
4088 */
kbasep_serialize_jobs_debugfs_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)4089 static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
4090 const char __user *ubuf, size_t count, loff_t *ppos)
4091 {
4092 struct seq_file *s = file->private_data;
4093 struct kbase_device *kbdev = s->private;
4094 char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
4095
4096 CSTD_UNUSED(ppos);
4097
4098 count = min_t(size_t, sizeof(buf) - 1, count);
4099 if (copy_from_user(buf, ubuf, count))
4100 return -EFAULT;
4101
4102 buf[count] = 0;
4103
4104 return update_serialize_jobs_setting(kbdev, buf, count);
4105 }
4106
4107 /**
4108 * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
4109 * debugfs file
4110 * @in: inode pointer
4111 * @file: file pointer
4112 *
4113 * Return: Zero on success, error code on failure
4114 */
kbasep_serialize_jobs_debugfs_open(struct inode * in,struct file * file)4115 static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
4116 struct file *file)
4117 {
4118 return single_open(file, kbasep_serialize_jobs_seq_debugfs_show,
4119 in->i_private);
4120 }
4121
4122 static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
4123 .owner = THIS_MODULE,
4124 .open = kbasep_serialize_jobs_debugfs_open,
4125 .read = seq_read,
4126 .write = kbasep_serialize_jobs_debugfs_write,
4127 .llseek = seq_lseek,
4128 .release = single_release,
4129 };
4130
4131 #endif /* CONFIG_DEBUG_FS */
4132
4133 /**
4134 * show_serialize_jobs_sysfs - Show callback for serialize_jobs sysfs file.
4135 *
4136 * @dev: The device this sysfs file is for
4137 * @attr: The attributes of the sysfs file
4138 * @buf: The output buffer for the sysfs file contents
4139 *
4140 * This function is called to get the contents of the serialize_jobs sysfs
4141 * file. This is a list of the available settings with the currently active
4142 * one surrounded by square brackets.
4143 *
4144 * Return: The number of bytes output to @buf.
4145 */
show_serialize_jobs_sysfs(struct device * dev,struct device_attribute * attr,char * buf)4146 static ssize_t show_serialize_jobs_sysfs(struct device *dev,
4147 struct device_attribute *attr,
4148 char *buf)
4149 {
4150 struct kbase_device *kbdev = to_kbase_device(dev);
4151 ssize_t ret = 0;
4152 int i;
4153
4154 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4155 if (kbdev->serialize_jobs ==
4156 serialize_jobs_settings[i].setting)
4157 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s]",
4158 serialize_jobs_settings[i].name);
4159 else
4160 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ",
4161 serialize_jobs_settings[i].name);
4162 }
4163
4164 if (ret < PAGE_SIZE - 1) {
4165 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
4166 } else {
4167 buf[PAGE_SIZE - 2] = '\n';
4168 buf[PAGE_SIZE - 1] = '\0';
4169 ret = PAGE_SIZE - 1;
4170 }
4171
4172 return ret;
4173 }
4174
4175 /**
4176 * store_serialize_jobs_sysfs - Store callback for serialize_jobs sysfs file.
4177 *
4178 * @dev: The device this sysfs file is for
4179 * @attr: The attributes of the sysfs file
4180 * @buf: The value written to the sysfs file
4181 * @count: The number of bytes to write to the sysfs file
4182 *
4183 * This function is called when the serialize_jobs sysfs file is written to.
4184 * It matches the requested setting against the available settings and if a
4185 * matching setting is found updates kbdev->serialize_jobs.
4186 *
4187 * Return: @count if the function succeeded. An error code on failure.
4188 */
store_serialize_jobs_sysfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4189 static ssize_t store_serialize_jobs_sysfs(struct device *dev,
4190 struct device_attribute *attr,
4191 const char *buf, size_t count)
4192 {
4193 return update_serialize_jobs_setting(to_kbase_device(dev), buf, count);
4194 }
4195
4196 static DEVICE_ATTR(serialize_jobs, 0600, show_serialize_jobs_sysfs,
4197 store_serialize_jobs_sysfs);
4198 #endif /* !MALI_USE_CSF */
4199
kbasep_protected_mode_hwcnt_disable_worker(struct work_struct * data)4200 static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
4201 {
4202 struct kbase_device *kbdev = container_of(data, struct kbase_device,
4203 protected_mode_hwcnt_disable_work);
4204 spinlock_t *backend_lock;
4205 unsigned long flags;
4206
4207 bool do_disable;
4208
4209 #if MALI_USE_CSF
4210 backend_lock = &kbdev->csf.scheduler.interrupt_lock;
4211 #else
4212 backend_lock = &kbdev->hwaccess_lock;
4213 #endif
4214
4215 spin_lock_irqsave(backend_lock, flags);
4216 do_disable = !kbdev->protected_mode_hwcnt_desired &&
4217 !kbdev->protected_mode_hwcnt_disabled;
4218 spin_unlock_irqrestore(backend_lock, flags);
4219
4220 if (!do_disable)
4221 return;
4222
4223 kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
4224
4225 spin_lock_irqsave(backend_lock, flags);
4226 do_disable = !kbdev->protected_mode_hwcnt_desired &&
4227 !kbdev->protected_mode_hwcnt_disabled;
4228
4229 if (do_disable) {
4230 /* Protected mode state did not change while we were doing the
4231 * disable, so commit the work we just performed and continue
4232 * the state machine.
4233 */
4234 kbdev->protected_mode_hwcnt_disabled = true;
4235 #if !MALI_USE_CSF
4236 kbase_backend_slot_update(kbdev);
4237 #endif /* !MALI_USE_CSF */
4238 } else {
4239 /* Protected mode state was updated while we were doing the
4240 * disable, so we need to undo the disable we just performed.
4241 */
4242 kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
4243 }
4244
4245 spin_unlock_irqrestore(backend_lock, flags);
4246 }
4247
4248 #ifndef PLATFORM_PROTECTED_CALLBACKS
kbasep_protected_mode_enable(struct protected_mode_device * pdev)4249 static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
4250 {
4251 struct kbase_device *kbdev = pdev->data;
4252
4253 return kbase_pm_protected_mode_enable(kbdev);
4254 }
4255
kbasep_protected_mode_disable(struct protected_mode_device * pdev)4256 static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
4257 {
4258 struct kbase_device *kbdev = pdev->data;
4259
4260 return kbase_pm_protected_mode_disable(kbdev);
4261 }
4262
4263 static const struct protected_mode_ops kbasep_native_protected_ops = {
4264 .protected_mode_enable = kbasep_protected_mode_enable,
4265 .protected_mode_disable = kbasep_protected_mode_disable
4266 };
4267
4268 #define PLATFORM_PROTECTED_CALLBACKS (&kbasep_native_protected_ops)
4269 #endif /* PLATFORM_PROTECTED_CALLBACKS */
4270
kbase_protected_mode_init(struct kbase_device * kbdev)4271 int kbase_protected_mode_init(struct kbase_device *kbdev)
4272 {
4273 /* Use native protected ops */
4274 kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
4275 GFP_KERNEL);
4276 if (!kbdev->protected_dev)
4277 return -ENOMEM;
4278 kbdev->protected_dev->data = kbdev;
4279 kbdev->protected_ops = PLATFORM_PROTECTED_CALLBACKS;
4280 INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
4281 kbasep_protected_mode_hwcnt_disable_worker);
4282 kbdev->protected_mode_hwcnt_desired = true;
4283 kbdev->protected_mode_hwcnt_disabled = false;
4284 return 0;
4285 }
4286
kbase_protected_mode_term(struct kbase_device * kbdev)4287 void kbase_protected_mode_term(struct kbase_device *kbdev)
4288 {
4289 cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
4290 kfree(kbdev->protected_dev);
4291 }
4292
4293 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
kbase_common_reg_map(struct kbase_device * kbdev)4294 static int kbase_common_reg_map(struct kbase_device *kbdev)
4295 {
4296 return 0;
4297 }
kbase_common_reg_unmap(struct kbase_device * const kbdev)4298 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
4299 {
4300 }
4301 #else /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
kbase_common_reg_map(struct kbase_device * kbdev)4302 static int kbase_common_reg_map(struct kbase_device *kbdev)
4303 {
4304 int err = 0;
4305
4306 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
4307 dev_err(kbdev->dev, "Register window unavailable\n");
4308 err = -EIO;
4309 goto out_region;
4310 }
4311
4312 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
4313 if (!kbdev->reg) {
4314 dev_err(kbdev->dev, "Can't remap register window\n");
4315 err = -EINVAL;
4316 goto out_ioremap;
4317 }
4318
4319 return err;
4320
4321 out_ioremap:
4322 release_mem_region(kbdev->reg_start, kbdev->reg_size);
4323 out_region:
4324 return err;
4325 }
4326
kbase_common_reg_unmap(struct kbase_device * const kbdev)4327 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
4328 {
4329 if (kbdev->reg) {
4330 iounmap(kbdev->reg);
4331 release_mem_region(kbdev->reg_start, kbdev->reg_size);
4332 kbdev->reg = NULL;
4333 kbdev->reg_start = 0;
4334 kbdev->reg_size = 0;
4335 }
4336 }
4337 #endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
4338
registers_map(struct kbase_device * const kbdev)4339 int registers_map(struct kbase_device * const kbdev)
4340 {
4341 /* the first memory resource is the physical address of the GPU
4342 * registers.
4343 */
4344 struct platform_device *pdev = to_platform_device(kbdev->dev);
4345 struct resource *reg_res;
4346 int err;
4347
4348 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4349 if (!reg_res) {
4350 dev_err(kbdev->dev, "Invalid register resource\n");
4351 return -ENOENT;
4352 }
4353
4354 kbdev->reg_start = reg_res->start;
4355 kbdev->reg_size = resource_size(reg_res);
4356
4357 #if MALI_USE_CSF
4358 if (kbdev->reg_size <
4359 (CSF_HW_DOORBELL_PAGE_OFFSET +
4360 CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE)) {
4361 dev_err(kbdev->dev, "Insufficient register space, will override to the required size\n");
4362 kbdev->reg_size = CSF_HW_DOORBELL_PAGE_OFFSET +
4363 CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE;
4364 }
4365 #endif
4366
4367 err = kbase_common_reg_map(kbdev);
4368 if (err) {
4369 dev_err(kbdev->dev, "Failed to map registers\n");
4370 return err;
4371 }
4372
4373 return 0;
4374 }
4375
registers_unmap(struct kbase_device * kbdev)4376 void registers_unmap(struct kbase_device *kbdev)
4377 {
4378 kbase_common_reg_unmap(kbdev);
4379 }
4380
4381 #if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
4382
kbase_is_pm_enabled(const struct device_node * gpu_node)4383 static bool kbase_is_pm_enabled(const struct device_node *gpu_node)
4384 {
4385 const struct device_node *power_model_node;
4386 const void *cooling_cells_node;
4387 const void *operating_point_node;
4388 bool is_pm_enable = false;
4389
4390 power_model_node = of_get_child_by_name(gpu_node,
4391 "power_model");
4392 if (power_model_node)
4393 is_pm_enable = true;
4394
4395 cooling_cells_node = of_get_property(gpu_node,
4396 "#cooling-cells", NULL);
4397 if (cooling_cells_node)
4398 is_pm_enable = true;
4399
4400 operating_point_node = of_get_property(gpu_node,
4401 "operating-points", NULL);
4402 if (operating_point_node)
4403 is_pm_enable = true;
4404
4405 return is_pm_enable;
4406 }
4407
kbase_is_pv_enabled(const struct device_node * gpu_node)4408 static bool kbase_is_pv_enabled(const struct device_node *gpu_node)
4409 {
4410 const void *arbiter_if_node;
4411
4412 arbiter_if_node = of_get_property(gpu_node,
4413 "arbiter_if", NULL);
4414
4415 return arbiter_if_node ? true : false;
4416 }
4417
kbase_is_full_coherency_enabled(const struct device_node * gpu_node)4418 static bool kbase_is_full_coherency_enabled(const struct device_node *gpu_node)
4419 {
4420 const void *coherency_dts;
4421 u32 coherency;
4422
4423 coherency_dts = of_get_property(gpu_node,
4424 "system-coherency",
4425 NULL);
4426 if (coherency_dts) {
4427 coherency = be32_to_cpup(coherency_dts);
4428 if (coherency == COHERENCY_ACE)
4429 return true;
4430 }
4431 return false;
4432 }
4433
4434 #endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4435
kbase_device_pm_init(struct kbase_device * kbdev)4436 int kbase_device_pm_init(struct kbase_device *kbdev)
4437 {
4438 int err = 0;
4439
4440 #if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
4441
4442 u32 gpu_id;
4443 u32 product_id;
4444 u32 gpu_model_id;
4445
4446 if (kbase_is_pv_enabled(kbdev->dev->of_node)) {
4447 dev_info(kbdev->dev, "Arbitration interface enabled\n");
4448 if (kbase_is_pm_enabled(kbdev->dev->of_node)) {
4449 /* Arbitration AND power management invalid */
4450 dev_err(kbdev->dev, "Invalid combination of arbitration AND power management\n");
4451 return -EPERM;
4452 }
4453 if (kbase_is_full_coherency_enabled(kbdev->dev->of_node)) {
4454 /* Arbitration AND full coherency invalid */
4455 dev_err(kbdev->dev, "Invalid combination of arbitration AND full coherency\n");
4456 return -EPERM;
4457 }
4458 err = kbase_arbiter_pm_early_init(kbdev);
4459 if (err == 0) {
4460 /* Check if Arbitration is running on
4461 * supported GPU platform
4462 */
4463 kbase_pm_register_access_enable(kbdev);
4464 gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
4465 kbase_pm_register_access_disable(kbdev);
4466 product_id =
4467 KBASE_UBFX32(gpu_id, KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT, 16);
4468 gpu_model_id = GPU_ID2_MODEL_MATCH_VALUE(product_id);
4469
4470 if (gpu_model_id != GPU_ID2_PRODUCT_TGOX
4471 && gpu_model_id != GPU_ID2_PRODUCT_TNOX
4472 && gpu_model_id != GPU_ID2_PRODUCT_TBAX) {
4473 kbase_arbiter_pm_early_term(kbdev);
4474 dev_err(kbdev->dev, "GPU platform not suitable for arbitration\n");
4475 return -EPERM;
4476 }
4477 }
4478 } else {
4479 kbdev->arb.arb_if = NULL;
4480 kbdev->arb.arb_dev = NULL;
4481 err = power_control_init(kbdev);
4482 }
4483 #else
4484 err = power_control_init(kbdev);
4485 #endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4486 return err;
4487 }
4488
kbase_device_pm_term(struct kbase_device * kbdev)4489 void kbase_device_pm_term(struct kbase_device *kbdev)
4490 {
4491 #ifdef CONFIG_MALI_ARBITER_SUPPORT
4492 #if IS_ENABLED(CONFIG_OF)
4493 if (kbase_is_pv_enabled(kbdev->dev->of_node))
4494 kbase_arbiter_pm_early_term(kbdev);
4495 else
4496 power_control_term(kbdev);
4497 #endif /* CONFIG_OF */
4498 #else
4499 power_control_term(kbdev);
4500 #endif
4501 }
4502
power_control_init(struct kbase_device * kbdev)4503 int power_control_init(struct kbase_device *kbdev)
4504 {
4505 #ifndef CONFIG_OF
4506 /* Power control initialization requires at least the capability to get
4507 * regulators and clocks from the device tree, as well as parsing
4508 * arrays of unsigned integer values.
4509 *
4510 * The whole initialization process shall simply be skipped if the
4511 * minimum capability is not available.
4512 */
4513 return 0;
4514 #else
4515 struct platform_device *pdev;
4516 int err = 0;
4517 unsigned int i;
4518 #if defined(CONFIG_REGULATOR)
4519 static const char * const regulator_names[] = {
4520 "mali", "mem"
4521 };
4522 #endif /* CONFIG_REGULATOR */
4523
4524 if (!kbdev)
4525 return -ENODEV;
4526
4527 pdev = to_platform_device(kbdev->dev);
4528
4529 #if defined(CONFIG_REGULATOR)
4530 /* Since the error code EPROBE_DEFER causes the entire probing
4531 * procedure to be restarted from scratch at a later time,
4532 * all regulators will be released before returning.
4533 *
4534 * Any other error is ignored and the driver will continue
4535 * operating with a partial initialization of regulators.
4536 */
4537 for (i = 0; i < ARRAY_SIZE(regulator_names); i++) {
4538 kbdev->regulators[i] = regulator_get_optional(kbdev->dev,
4539 regulator_names[i]);
4540 if (IS_ERR(kbdev->regulators[i])) {
4541 err = PTR_ERR(kbdev->regulators[i]);
4542 kbdev->regulators[i] = NULL;
4543 break;
4544 }
4545 }
4546 if (err == -EPROBE_DEFER) {
4547 while (i > 0)
4548 regulator_put(kbdev->regulators[--i]);
4549 return err;
4550 }
4551
4552 kbdev->nr_regulators = i;
4553 dev_dbg(&pdev->dev, "Regulators probed: %u\n", kbdev->nr_regulators);
4554 #endif
4555
4556 /* Having more clocks than regulators is acceptable, while the
4557 * opposite shall not happen.
4558 *
4559 * Since the error code EPROBE_DEFER causes the entire probing
4560 * procedure to be restarted from scratch at a later time,
4561 * all clocks and regulators will be released before returning.
4562 *
4563 * Any other error is ignored and the driver will continue
4564 * operating with a partial initialization of clocks.
4565 */
4566 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4567 kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, i);
4568 if (IS_ERR(kbdev->clocks[i])) {
4569 err = PTR_ERR(kbdev->clocks[i]);
4570 kbdev->clocks[i] = NULL;
4571 break;
4572 }
4573
4574 err = clk_prepare(kbdev->clocks[i]);
4575 if (err) {
4576 dev_err(kbdev->dev,
4577 "Failed to prepare and enable clock (%d)\n",
4578 err);
4579 clk_put(kbdev->clocks[i]);
4580 break;
4581 }
4582 }
4583 if (err == -EPROBE_DEFER) {
4584 while (i > 0) {
4585 clk_disable_unprepare(kbdev->clocks[--i]);
4586 clk_put(kbdev->clocks[i]);
4587 }
4588 goto clocks_probe_defer;
4589 }
4590
4591 kbdev->nr_clocks = i;
4592 dev_dbg(&pdev->dev, "Clocks probed: %u\n", kbdev->nr_clocks);
4593
4594 /* Any error in parsing the OPP table from the device file
4595 * shall be ignored. The fact that the table may be absent or wrong
4596 * on the device tree of the platform shouldn't prevent the driver
4597 * from completing its initialization.
4598 */
4599 #if defined(CONFIG_PM_OPP)
4600 #if defined(CONFIG_REGULATOR)
4601 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
4602 if (kbdev->nr_regulators > 0) {
4603 kbdev->token = dev_pm_opp_set_regulators(kbdev->dev, regulator_names);
4604
4605 if (kbdev->token < 0) {
4606 err = kbdev->token;
4607 goto regulators_probe_defer;
4608 }
4609
4610 }
4611 #elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
4612 if (kbdev->nr_regulators > 0) {
4613 kbdev->opp_table =
4614 dev_pm_opp_set_regulators(kbdev->dev, regulator_names,
4615 kbdev->nr_regulators);
4616 if (IS_ERR(kbdev->opp_table)) {
4617 dev_err(kbdev->dev, "Failed to set regulators\n");
4618 return 0;
4619 }
4620 kbdev->opp_table =
4621 dev_pm_opp_register_set_opp_helper(kbdev->dev,
4622 kbase_devfreq_opp_helper);
4623 if (IS_ERR(kbdev->opp_table)) {
4624 dev_pm_opp_put_regulators(kbdev->opp_table);
4625 kbdev->opp_table = NULL;
4626 dev_err(kbdev->dev, "Failed to set opp helper\n");
4627 return 0;
4628 }
4629 }
4630 #endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
4631 #endif /* CONFIG_REGULATOR */
4632
4633 #ifdef CONFIG_ARCH_ROCKCHIP
4634 err = kbase_platform_rk_init_opp_table(kbdev);
4635 if (err)
4636 dev_err(kbdev->dev, "Failed to init_opp_table (%d)\n", err);
4637 #else
4638 err = dev_pm_opp_of_add_table(kbdev->dev);
4639 CSTD_UNUSED(err);
4640 #endif
4641 #endif /* CONFIG_PM_OPP */
4642 return 0;
4643
4644 #if defined(CONFIG_PM_OPP) && \
4645 ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) && defined(CONFIG_REGULATOR))
4646 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4647 if (kbdev->clocks[i]) {
4648 if (__clk_is_enabled(kbdev->clocks[i]))
4649 clk_disable_unprepare(kbdev->clocks[i]);
4650 clk_put(kbdev->clocks[i]);
4651 kbdev->clocks[i] = NULL;
4652 } else
4653 break;
4654 }
4655 #endif
4656
4657 clocks_probe_defer:
4658 #if defined(CONFIG_REGULATOR)
4659 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++)
4660 regulator_put(kbdev->regulators[i]);
4661 #endif
4662 return err;
4663 #endif /* CONFIG_OF */
4664 }
4665
power_control_term(struct kbase_device * kbdev)4666 void power_control_term(struct kbase_device *kbdev)
4667 {
4668 unsigned int i;
4669
4670 #if defined(CONFIG_PM_OPP)
4671 dev_pm_opp_of_remove_table(kbdev->dev);
4672 #if defined(CONFIG_REGULATOR)
4673 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
4674 if (kbdev->token > -EPERM) {
4675 dev_pm_opp_unregister_set_opp_helper(kbdev->opp_table);
4676 dev_pm_opp_put_regulators(kbdev->token);
4677 }
4678 #elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
4679 if (!IS_ERR_OR_NULL(kbdev->opp_table))
4680 dev_pm_opp_put_regulators(kbdev->opp_table);
4681 #endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
4682 #endif /* CONFIG_REGULATOR */
4683 #endif /* CONFIG_PM_OPP */
4684
4685 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4686 if (kbdev->clocks[i]) {
4687 clk_unprepare(kbdev->clocks[i]);
4688 clk_put(kbdev->clocks[i]);
4689 kbdev->clocks[i] = NULL;
4690 } else
4691 break;
4692 }
4693
4694 #if defined(CONFIG_OF) && defined(CONFIG_REGULATOR)
4695 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4696 if (kbdev->regulators[i]) {
4697 regulator_put(kbdev->regulators[i]);
4698 kbdev->regulators[i] = NULL;
4699 }
4700 }
4701 #endif
4702 }
4703
4704 #if IS_ENABLED(CONFIG_DEBUG_FS)
4705
trigger_reset(struct kbase_device * kbdev)4706 static void trigger_reset(struct kbase_device *kbdev)
4707 {
4708 kbase_pm_context_active(kbdev);
4709 if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
4710 kbase_reset_gpu(kbdev);
4711 kbase_pm_context_idle(kbdev);
4712 }
4713
4714 #define MAKE_QUIRK_ACCESSORS(type) \
4715 static int type##_quirks_set(void *data, u64 val) \
4716 { \
4717 struct kbase_device *kbdev; \
4718 kbdev = (struct kbase_device *)data; \
4719 kbdev->hw_quirks_##type = (u32)val; \
4720 trigger_reset(kbdev); \
4721 return 0; \
4722 } \
4723 \
4724 static int type##_quirks_get(void *data, u64 *val) \
4725 { \
4726 struct kbase_device *kbdev; \
4727 kbdev = (struct kbase_device *)data; \
4728 *val = kbdev->hw_quirks_##type; \
4729 return 0; \
4730 } \
4731 DEFINE_DEBUGFS_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get, \
4732 type##_quirks_set, "%llu\n")
4733
4734 MAKE_QUIRK_ACCESSORS(sc);
4735 MAKE_QUIRK_ACCESSORS(tiler);
4736 MAKE_QUIRK_ACCESSORS(mmu);
4737 MAKE_QUIRK_ACCESSORS(gpu);
4738
4739 /**
4740 * kbase_device_debugfs_reset_write() - Reset the GPU
4741 *
4742 * @data: Pointer to the Kbase device.
4743 * @wait_for_reset: Value written to the file.
4744 *
4745 * This function will perform the GPU reset, and if the value written to
4746 * the file is 1 it will also wait for the reset to complete.
4747 *
4748 * Return: 0 in case of no error otherwise a negative value.
4749 */
kbase_device_debugfs_reset_write(void * data,u64 wait_for_reset)4750 static int kbase_device_debugfs_reset_write(void *data, u64 wait_for_reset)
4751 {
4752 struct kbase_device *kbdev = data;
4753
4754 trigger_reset(kbdev);
4755
4756 if (wait_for_reset == 1)
4757 return kbase_reset_gpu_wait(kbdev);
4758
4759 return 0;
4760 }
4761
4762 DEFINE_DEBUGFS_ATTRIBUTE(fops_trigger_reset, NULL, &kbase_device_debugfs_reset_write, "%llu\n");
4763
4764 /**
4765 * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
4766 * @file: File object to read is for
4767 * @buf: User buffer to populate with data
4768 * @len: Length of user buffer
4769 * @ppos: Offset within file object
4770 *
4771 * Retrieves the current status of protected debug mode
4772 * (0 = disabled, 1 = enabled)
4773 *
4774 * Return: Number of bytes added to user buffer
4775 */
debugfs_protected_debug_mode_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)4776 static ssize_t debugfs_protected_debug_mode_read(struct file *file,
4777 char __user *buf, size_t len, loff_t *ppos)
4778 {
4779 struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
4780 u32 gpu_status;
4781 ssize_t ret_val;
4782
4783 kbase_pm_context_active(kbdev);
4784 gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
4785 kbase_pm_context_idle(kbdev);
4786
4787 if (gpu_status & GPU_DBGEN)
4788 ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
4789 else
4790 ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
4791
4792 return ret_val;
4793 }
4794
4795 /*
4796 * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
4797 *
4798 * Contains the file operations for the "protected_debug_mode" debugfs file
4799 */
4800 static const struct file_operations fops_protected_debug_mode = {
4801 .owner = THIS_MODULE,
4802 .open = simple_open,
4803 .read = debugfs_protected_debug_mode_read,
4804 .llseek = default_llseek,
4805 };
4806
kbase_device_debugfs_mem_pool_max_size_show(struct seq_file * sfile,void * data)4807 static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile,
4808 void *data)
4809 {
4810 CSTD_UNUSED(data);
4811 return kbase_debugfs_helper_seq_read(sfile,
4812 MEMORY_GROUP_MANAGER_NR_GROUPS,
4813 kbase_mem_pool_config_debugfs_max_size);
4814 }
4815
kbase_device_debugfs_mem_pool_max_size_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)4816 static ssize_t kbase_device_debugfs_mem_pool_max_size_write(struct file *file,
4817 const char __user *ubuf, size_t count, loff_t *ppos)
4818 {
4819 int err = 0;
4820
4821 CSTD_UNUSED(ppos);
4822 err = kbase_debugfs_helper_seq_write(file, ubuf, count,
4823 MEMORY_GROUP_MANAGER_NR_GROUPS,
4824 kbase_mem_pool_config_debugfs_set_max_size);
4825
4826 return err ? err : count;
4827 }
4828
kbase_device_debugfs_mem_pool_max_size_open(struct inode * in,struct file * file)4829 static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in,
4830 struct file *file)
4831 {
4832 return single_open(file, kbase_device_debugfs_mem_pool_max_size_show,
4833 in->i_private);
4834 }
4835
4836 static const struct file_operations
4837 kbase_device_debugfs_mem_pool_max_size_fops = {
4838 .owner = THIS_MODULE,
4839 .open = kbase_device_debugfs_mem_pool_max_size_open,
4840 .read = seq_read,
4841 .write = kbase_device_debugfs_mem_pool_max_size_write,
4842 .llseek = seq_lseek,
4843 .release = single_release,
4844 };
4845
4846 /**
4847 * debugfs_ctx_defaults_init - Create the default configuration of new contexts in debugfs
4848 * @kbdev: An instance of the GPU platform device, allocated from the probe method of the driver.
4849 * Return: A pointer to the last dentry that it tried to create, whether successful or not.
4850 * Could be NULL or encode another error value.
4851 */
debugfs_ctx_defaults_init(struct kbase_device * const kbdev)4852 static struct dentry *debugfs_ctx_defaults_init(struct kbase_device *const kbdev)
4853 {
4854 /* prevent unprivileged use of debug file system
4855 * in old kernel version
4856 */
4857 const mode_t mode = 0644;
4858 struct dentry *dentry = debugfs_create_dir("defaults", kbdev->debugfs_ctx_directory);
4859 struct dentry *debugfs_ctx_defaults_directory = dentry;
4860
4861 if (IS_ERR_OR_NULL(dentry)) {
4862 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
4863 return dentry;
4864 }
4865
4866 debugfs_create_bool("infinite_cache", mode,
4867 debugfs_ctx_defaults_directory,
4868 &kbdev->infinite_cache_active_default);
4869
4870 dentry = debugfs_create_file("mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
4871 &kbdev->mem_pool_defaults.small,
4872 &kbase_device_debugfs_mem_pool_max_size_fops);
4873 if (IS_ERR_OR_NULL(dentry)) {
4874 dev_err(kbdev->dev, "Unable to create mem_pool_max_size debugfs entry\n");
4875 return dentry;
4876 }
4877
4878 dentry = debugfs_create_file("lp_mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
4879 &kbdev->mem_pool_defaults.large,
4880 &kbase_device_debugfs_mem_pool_max_size_fops);
4881 if (IS_ERR_OR_NULL(dentry))
4882 dev_err(kbdev->dev, "Unable to create lp_mem_pool_max_size debugfs entry\n");
4883
4884 return dentry;
4885 }
4886
4887 /**
4888 * init_debugfs - Create device-wide debugfs directories and files for the Mali driver
4889 * @kbdev: An instance of the GPU platform device, allocated from the probe method of the driver.
4890 * Return: A pointer to the last dentry that it tried to create, whether successful or not.
4891 * Could be NULL or encode another error value.
4892 */
init_debugfs(struct kbase_device * kbdev)4893 static struct dentry *init_debugfs(struct kbase_device *kbdev)
4894 {
4895 struct dentry *dentry = debugfs_create_dir(kbdev->devname, NULL);
4896
4897 kbdev->mali_debugfs_directory = dentry;
4898 if (IS_ERR_OR_NULL(dentry)) {
4899 dev_err(kbdev->dev,
4900 "Couldn't create mali debugfs directory: %s\n",
4901 kbdev->devname);
4902 return dentry;
4903 }
4904
4905 dentry = debugfs_create_dir("ctx", kbdev->mali_debugfs_directory);
4906 kbdev->debugfs_ctx_directory = dentry;
4907 if (IS_ERR_OR_NULL(dentry)) {
4908 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
4909 return dentry;
4910 }
4911
4912 dentry = debugfs_create_dir("instrumentation", kbdev->mali_debugfs_directory);
4913 kbdev->debugfs_instr_directory = dentry;
4914 if (IS_ERR_OR_NULL(dentry)) {
4915 dev_err(kbdev->dev, "Couldn't create mali debugfs instrumentation directory\n");
4916 return dentry;
4917 }
4918
4919 kbasep_regs_history_debugfs_init(kbdev);
4920
4921 #if MALI_USE_CSF
4922 kbase_debug_csf_fault_debugfs_init(kbdev);
4923 #else /* MALI_USE_CSF */
4924 kbase_debug_job_fault_debugfs_init(kbdev);
4925 #endif /* !MALI_USE_CSF */
4926
4927 kbasep_gpu_memory_debugfs_init(kbdev);
4928 kbase_as_fault_debugfs_init(kbdev);
4929 #ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
4930 kbase_instr_backend_debugfs_init(kbdev);
4931 #endif
4932 kbase_pbha_debugfs_init(kbdev);
4933
4934 /* fops_* variables created by invocations of macro
4935 * MAKE_QUIRK_ACCESSORS() above.
4936 */
4937 dentry = debugfs_create_file("quirks_sc", 0644,
4938 kbdev->mali_debugfs_directory, kbdev,
4939 &fops_sc_quirks);
4940 if (IS_ERR_OR_NULL(dentry)) {
4941 dev_err(kbdev->dev, "Unable to create quirks_sc debugfs entry\n");
4942 return dentry;
4943 }
4944
4945 dentry = debugfs_create_file("quirks_tiler", 0644,
4946 kbdev->mali_debugfs_directory, kbdev,
4947 &fops_tiler_quirks);
4948 if (IS_ERR_OR_NULL(dentry)) {
4949 dev_err(kbdev->dev, "Unable to create quirks_tiler debugfs entry\n");
4950 return dentry;
4951 }
4952
4953 dentry = debugfs_create_file("quirks_mmu", 0644,
4954 kbdev->mali_debugfs_directory, kbdev,
4955 &fops_mmu_quirks);
4956 if (IS_ERR_OR_NULL(dentry)) {
4957 dev_err(kbdev->dev, "Unable to create quirks_mmu debugfs entry\n");
4958 return dentry;
4959 }
4960
4961 dentry = debugfs_create_file("quirks_gpu", 0644, kbdev->mali_debugfs_directory,
4962 kbdev, &fops_gpu_quirks);
4963 if (IS_ERR_OR_NULL(dentry)) {
4964 dev_err(kbdev->dev, "Unable to create quirks_gpu debugfs entry\n");
4965 return dentry;
4966 }
4967
4968 dentry = debugfs_ctx_defaults_init(kbdev);
4969 if (IS_ERR_OR_NULL(dentry))
4970 return dentry;
4971
4972 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
4973 dentry = debugfs_create_file("protected_debug_mode", 0444,
4974 kbdev->mali_debugfs_directory, kbdev,
4975 &fops_protected_debug_mode);
4976 if (IS_ERR_OR_NULL(dentry)) {
4977 dev_err(kbdev->dev, "Unable to create protected_debug_mode debugfs entry\n");
4978 return dentry;
4979 }
4980 }
4981
4982 dentry = debugfs_create_file("reset", 0644,
4983 kbdev->mali_debugfs_directory, kbdev,
4984 &fops_trigger_reset);
4985 if (IS_ERR_OR_NULL(dentry)) {
4986 dev_err(kbdev->dev, "Unable to create reset debugfs entry\n");
4987 return dentry;
4988 }
4989
4990 kbase_ktrace_debugfs_init(kbdev);
4991
4992 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
4993 #if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
4994 if (kbdev->devfreq && !kbdev->model_data &&
4995 !kbdev->dfc_power.dyn_power_coeff)
4996 kbase_ipa_debugfs_init(kbdev);
4997 #endif /* CONFIG_DEVFREQ_THERMAL */
4998 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
4999
5000 #if !MALI_USE_CSF
5001 dentry = debugfs_create_file("serialize_jobs", 0644,
5002 kbdev->mali_debugfs_directory, kbdev,
5003 &kbasep_serialize_jobs_debugfs_fops);
5004 if (IS_ERR_OR_NULL(dentry)) {
5005 dev_err(kbdev->dev, "Unable to create serialize_jobs debugfs entry\n");
5006 return dentry;
5007 }
5008 kbase_timeline_io_debugfs_init(kbdev);
5009 #endif
5010 kbase_dvfs_status_debugfs_init(kbdev);
5011
5012
5013 return dentry;
5014 }
5015
kbase_device_debugfs_init(struct kbase_device * kbdev)5016 int kbase_device_debugfs_init(struct kbase_device *kbdev)
5017 {
5018 struct dentry *dentry = init_debugfs(kbdev);
5019
5020 if (IS_ERR_OR_NULL(dentry)) {
5021 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
5022 return IS_ERR(dentry) ? PTR_ERR(dentry) : -ENOMEM;
5023 }
5024 return 0;
5025 }
5026
kbase_device_debugfs_term(struct kbase_device * kbdev)5027 void kbase_device_debugfs_term(struct kbase_device *kbdev)
5028 {
5029 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
5030 }
5031 #endif /* CONFIG_DEBUG_FS */
5032
kbase_device_coherency_init(struct kbase_device * kbdev)5033 int kbase_device_coherency_init(struct kbase_device *kbdev)
5034 {
5035 #if IS_ENABLED(CONFIG_OF)
5036 u32 supported_coherency_bitmap =
5037 kbdev->gpu_props.props.raw_props.coherency_mode;
5038 const void *coherency_override_dts;
5039 bool dma_coherent;
5040 u32 override_coherency, gpu_id;
5041 unsigned int prod_id;
5042
5043 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
5044 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
5045 prod_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
5046
5047 /* Only for tMIx :
5048 * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
5049 * documented for tMIx so force correct value here.
5050 */
5051 if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
5052 GPU_ID2_PRODUCT_TMIX)
5053 if (supported_coherency_bitmap ==
5054 COHERENCY_FEATURE_BIT(COHERENCY_ACE))
5055 supported_coherency_bitmap |=
5056 COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
5057
5058 #endif /* CONFIG_OF */
5059
5060 kbdev->system_coherency = COHERENCY_NONE;
5061
5062 /* device tree may override the coherency */
5063 #if IS_ENABLED(CONFIG_OF)
5064 /* treat "dma-coherency" as a synonym for ACE-lite */
5065 dma_coherent = of_dma_is_coherent(kbdev->dev->of_node);
5066 coherency_override_dts = of_get_property(kbdev->dev->of_node,
5067 "system-coherency",
5068 NULL);
5069 if (coherency_override_dts || dma_coherent) {
5070 if (coherency_override_dts) {
5071 override_coherency = be32_to_cpup(coherency_override_dts);
5072 if (dma_coherent && override_coherency != COHERENCY_ACE_LITE) {
5073 dev_err(kbdev->dev,
5074 "system-coherency needs to be 0 when dma-coherent is set\n");
5075 return -EINVAL;
5076 }
5077 } else {
5078 /* dma-coherent set and system-coherency not specified */
5079 override_coherency = COHERENCY_ACE_LITE;
5080 }
5081
5082 #if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
5083 /* ACE coherency mode is not supported by Driver on CSF GPUs.
5084 * Return an error to signal the invalid device tree configuration.
5085 */
5086 if (override_coherency == COHERENCY_ACE) {
5087 dev_err(kbdev->dev,
5088 "ACE coherency not supported, wrong DT configuration");
5089 return -EINVAL;
5090 }
5091 #endif
5092
5093 if ((override_coherency <= COHERENCY_NONE) &&
5094 (supported_coherency_bitmap &
5095 COHERENCY_FEATURE_BIT(override_coherency))) {
5096
5097 kbdev->system_coherency = override_coherency;
5098
5099 dev_info(kbdev->dev,
5100 "Using coherency mode %u set from dtb",
5101 override_coherency);
5102 } else
5103 dev_warn(kbdev->dev,
5104 "Ignoring unsupported coherency mode %u set from dtb",
5105 override_coherency);
5106 }
5107
5108 #endif /* CONFIG_OF */
5109
5110 kbdev->gpu_props.props.raw_props.coherency_mode =
5111 kbdev->system_coherency;
5112
5113 return 0;
5114 }
5115
5116
5117 #if MALI_USE_CSF
5118 /**
5119 * csg_scheduling_period_store - Store callback for the csg_scheduling_period
5120 * sysfs file.
5121 * @dev: The device with sysfs file is for
5122 * @attr: The attributes of the sysfs file
5123 * @buf: The value written to the sysfs file
5124 * @count: The number of bytes written to the sysfs file
5125 *
5126 * This function is called when the csg_scheduling_period sysfs file is written
5127 * to. It checks the data written, and if valid updates the reset timeout.
5128 *
5129 * Return: @count if the function succeeded. An error code on failure.
5130 */
csg_scheduling_period_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5131 static ssize_t csg_scheduling_period_store(struct device *dev,
5132 struct device_attribute *attr,
5133 const char *buf, size_t count)
5134 {
5135 struct kbase_device *kbdev;
5136 int ret;
5137 unsigned int csg_scheduling_period;
5138
5139 kbdev = to_kbase_device(dev);
5140 if (!kbdev)
5141 return -ENODEV;
5142
5143 ret = kstrtouint(buf, 0, &csg_scheduling_period);
5144 if (ret || csg_scheduling_period == 0) {
5145 dev_err(kbdev->dev,
5146 "Couldn't process csg_scheduling_period write operation.\n"
5147 "Use format 'csg_scheduling_period_ms', and csg_scheduling_period_ms > 0\n");
5148 return -EINVAL;
5149 }
5150
5151 kbase_csf_scheduler_lock(kbdev);
5152 kbdev->csf.scheduler.csg_scheduling_period_ms = csg_scheduling_period;
5153 dev_dbg(kbdev->dev, "CSG scheduling period: %ums\n",
5154 csg_scheduling_period);
5155 kbase_csf_scheduler_unlock(kbdev);
5156
5157 return count;
5158 }
5159
5160 /**
5161 * csg_scheduling_period_show - Show callback for the csg_scheduling_period
5162 * sysfs entry.
5163 * @dev: The device this sysfs file is for.
5164 * @attr: The attributes of the sysfs file.
5165 * @buf: The output buffer to receive the GPU information.
5166 *
5167 * This function is called to get the current reset timeout.
5168 *
5169 * Return: The number of bytes output to @buf.
5170 */
csg_scheduling_period_show(struct device * dev,struct device_attribute * attr,char * const buf)5171 static ssize_t csg_scheduling_period_show(struct device *dev,
5172 struct device_attribute *attr,
5173 char *const buf)
5174 {
5175 struct kbase_device *kbdev;
5176 ssize_t ret;
5177
5178 kbdev = to_kbase_device(dev);
5179 if (!kbdev)
5180 return -ENODEV;
5181
5182 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
5183 kbdev->csf.scheduler.csg_scheduling_period_ms);
5184
5185 return ret;
5186 }
5187
5188 static DEVICE_ATTR_RW(csg_scheduling_period);
5189
5190 /**
5191 * fw_timeout_store - Store callback for the fw_timeout sysfs file.
5192 * @dev: The device with sysfs file is for
5193 * @attr: The attributes of the sysfs file
5194 * @buf: The value written to the sysfs file
5195 * @count: The number of bytes written to the sysfs file
5196 *
5197 * This function is called when the fw_timeout sysfs file is written to. It
5198 * checks the data written, and if valid updates the reset timeout.
5199 *
5200 * Return: @count if the function succeeded. An error code on failure.
5201 */
fw_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5202 static ssize_t fw_timeout_store(struct device *dev,
5203 struct device_attribute *attr, const char *buf,
5204 size_t count)
5205 {
5206 struct kbase_device *kbdev;
5207 int ret;
5208 unsigned int fw_timeout;
5209
5210 kbdev = to_kbase_device(dev);
5211 if (!kbdev)
5212 return -ENODEV;
5213
5214 ret = kstrtouint(buf, 0, &fw_timeout);
5215 if (ret || fw_timeout == 0) {
5216 dev_err(kbdev->dev,
5217 "Couldn't process fw_timeout write operation.\n"
5218 "Use format 'fw_timeout_ms', and fw_timeout_ms > 0\n"
5219 "Default fw_timeout: %u",
5220 kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_PING_TIMEOUT));
5221 return -EINVAL;
5222 }
5223
5224 kbase_csf_scheduler_lock(kbdev);
5225 kbdev->csf.fw_timeout_ms = fw_timeout;
5226 kbase_csf_scheduler_unlock(kbdev);
5227 dev_dbg(kbdev->dev, "Firmware timeout: %ums\n", fw_timeout);
5228
5229 return count;
5230 }
5231
5232 /**
5233 * fw_timeout_show - Show callback for the firmware timeout sysfs entry.
5234 * @dev: The device this sysfs file is for.
5235 * @attr: The attributes of the sysfs file.
5236 * @buf: The output buffer to receive the GPU information.
5237 *
5238 * This function is called to get the current reset timeout.
5239 *
5240 * Return: The number of bytes output to @buf.
5241 */
fw_timeout_show(struct device * dev,struct device_attribute * attr,char * const buf)5242 static ssize_t fw_timeout_show(struct device *dev,
5243 struct device_attribute *attr, char *const buf)
5244 {
5245 struct kbase_device *kbdev;
5246 ssize_t ret;
5247
5248 kbdev = to_kbase_device(dev);
5249 if (!kbdev)
5250 return -ENODEV;
5251
5252 ret = scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->csf.fw_timeout_ms);
5253
5254 return ret;
5255 }
5256
5257 static DEVICE_ATTR_RW(fw_timeout);
5258
5259 /**
5260 * idle_hysteresis_time_store - Store callback for CSF idle_hysteresis_time
5261 * sysfs file.
5262 * @dev: The device with sysfs file is for
5263 * @attr: The attributes of the sysfs file
5264 * @buf: The value written to the sysfs file
5265 * @count: The number of bytes written to the sysfs file
5266 *
5267 * This function is called when the idle_hysteresis_time sysfs file is
5268 * written to.
5269 *
5270 * This file contains values of the idle hysteresis duration.
5271 *
5272 * Return: @count if the function succeeded. An error code on failure.
5273 */
idle_hysteresis_time_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5274 static ssize_t idle_hysteresis_time_store(struct device *dev,
5275 struct device_attribute *attr, const char *buf, size_t count)
5276 {
5277 struct kbase_device *kbdev;
5278 u32 dur = 0;
5279
5280 kbdev = to_kbase_device(dev);
5281 if (!kbdev)
5282 return -ENODEV;
5283
5284 if (kstrtou32(buf, 0, &dur)) {
5285 dev_err(kbdev->dev, "Couldn't process idle_hysteresis_time write operation.\n"
5286 "Use format <idle_hysteresis_time>\n");
5287 return -EINVAL;
5288 }
5289
5290 kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur);
5291
5292 return count;
5293 }
5294
5295 /**
5296 * idle_hysteresis_time_show - Show callback for CSF idle_hysteresis_time
5297 * sysfs entry.
5298 * @dev: The device this sysfs file is for.
5299 * @attr: The attributes of the sysfs file.
5300 * @buf: The output buffer to receive the GPU information.
5301 *
5302 * This function is called to get the current idle hysteresis duration in ms.
5303 *
5304 * Return: The number of bytes output to @buf.
5305 */
idle_hysteresis_time_show(struct device * dev,struct device_attribute * attr,char * const buf)5306 static ssize_t idle_hysteresis_time_show(struct device *dev,
5307 struct device_attribute *attr, char * const buf)
5308 {
5309 struct kbase_device *kbdev;
5310 ssize_t ret;
5311 u32 dur;
5312
5313 kbdev = to_kbase_device(dev);
5314 if (!kbdev)
5315 return -ENODEV;
5316
5317 dur = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev);
5318 ret = scnprintf(buf, PAGE_SIZE, "%u\n", dur);
5319
5320 return ret;
5321 }
5322
5323 static DEVICE_ATTR_RW(idle_hysteresis_time);
5324
5325 /**
5326 * mcu_shader_pwroff_timeout_show - Get the MCU shader Core power-off time value.
5327 *
5328 * @dev: The device this sysfs file is for.
5329 * @attr: The attributes of the sysfs file.
5330 * @buf: The output buffer for the sysfs file contents
5331 *
5332 * Get the internally recorded MCU shader Core power-off (nominal) timeout value.
5333 * The unit of the value is in micro-seconds.
5334 *
5335 * Return: The number of bytes output to @buf if the
5336 * function succeeded. A Negative value on failure.
5337 */
mcu_shader_pwroff_timeout_show(struct device * dev,struct device_attribute * attr,char * const buf)5338 static ssize_t mcu_shader_pwroff_timeout_show(struct device *dev, struct device_attribute *attr,
5339 char *const buf)
5340 {
5341 struct kbase_device *kbdev = dev_get_drvdata(dev);
5342 u32 pwroff;
5343
5344 if (!kbdev)
5345 return -ENODEV;
5346
5347 pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev);
5348 return scnprintf(buf, PAGE_SIZE, "%u\n", pwroff);
5349 }
5350
5351 /**
5352 * mcu_shader_pwroff_timeout_store - Set the MCU shader core power-off time value.
5353 *
5354 * @dev: The device with sysfs file is for
5355 * @attr: The attributes of the sysfs file
5356 * @buf: The value written to the sysfs file
5357 * @count: The number of bytes to write to the sysfs file
5358 *
5359 * The duration value (unit: micro-seconds) for configuring MCU Shader Core
5360 * timer, when the shader cores' power transitions are delegated to the
5361 * MCU (normal operational mode)
5362 *
5363 * Return: @count if the function succeeded. An error code on failure.
5364 */
mcu_shader_pwroff_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5365 static ssize_t mcu_shader_pwroff_timeout_store(struct device *dev, struct device_attribute *attr,
5366 const char *buf, size_t count)
5367 {
5368 struct kbase_device *kbdev = dev_get_drvdata(dev);
5369 u32 dur;
5370
5371 if (!kbdev)
5372 return -ENODEV;
5373
5374 if (kstrtouint(buf, 0, &dur))
5375 return -EINVAL;
5376
5377 kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur);
5378
5379 return count;
5380 }
5381
5382 static DEVICE_ATTR_RW(mcu_shader_pwroff_timeout);
5383
5384 #endif /* MALI_USE_CSF */
5385
5386 static struct attribute *kbase_scheduling_attrs[] = {
5387 #if !MALI_USE_CSF
5388 &dev_attr_serialize_jobs.attr,
5389 #endif /* !MALI_USE_CSF */
5390 NULL
5391 };
5392
5393 static struct attribute *kbase_attrs[] = {
5394 #ifdef CONFIG_MALI_BIFROST_DEBUG
5395 &dev_attr_debug_command.attr,
5396 #if !MALI_USE_CSF
5397 &dev_attr_js_softstop_always.attr,
5398 #endif /* !MALI_USE_CSF */
5399 #endif
5400 #if !MALI_USE_CSF
5401 &dev_attr_js_timeouts.attr,
5402 &dev_attr_soft_job_timeout.attr,
5403 #endif /* !MALI_USE_CSF */
5404 &dev_attr_gpuinfo.attr,
5405 &dev_attr_dvfs_period.attr,
5406 &dev_attr_pm_poweroff.attr,
5407 &dev_attr_reset_timeout.attr,
5408 #if !MALI_USE_CSF
5409 &dev_attr_js_scheduling_period.attr,
5410 #else
5411 &dev_attr_csg_scheduling_period.attr,
5412 &dev_attr_fw_timeout.attr,
5413 &dev_attr_idle_hysteresis_time.attr,
5414 &dev_attr_mcu_shader_pwroff_timeout.attr,
5415 #endif /* !MALI_USE_CSF */
5416 &dev_attr_power_policy.attr,
5417 &dev_attr_core_mask.attr,
5418 &dev_attr_mem_pool_size.attr,
5419 &dev_attr_mem_pool_max_size.attr,
5420 &dev_attr_lp_mem_pool_size.attr,
5421 &dev_attr_lp_mem_pool_max_size.attr,
5422 #if !MALI_USE_CSF
5423 &dev_attr_js_ctx_scheduling_mode.attr,
5424 #endif /* !MALI_USE_CSF */
5425 NULL
5426 };
5427
5428 static struct attribute *kbase_mempool_attrs[] = {
5429 &dev_attr_max_size.attr,
5430 &dev_attr_lp_max_size.attr,
5431 &dev_attr_ctx_default_max_size.attr,
5432 NULL
5433 };
5434
5435 #define SYSFS_SCHEDULING_GROUP "scheduling"
5436 static const struct attribute_group kbase_scheduling_attr_group = {
5437 .name = SYSFS_SCHEDULING_GROUP,
5438 .attrs = kbase_scheduling_attrs,
5439 };
5440
5441 #define SYSFS_MEMPOOL_GROUP "mempool"
5442 static const struct attribute_group kbase_mempool_attr_group = {
5443 .name = SYSFS_MEMPOOL_GROUP,
5444 .attrs = kbase_mempool_attrs,
5445 };
5446
5447 static const struct attribute_group kbase_attr_group = {
5448 .attrs = kbase_attrs,
5449 };
5450
kbase_sysfs_init(struct kbase_device * kbdev)5451 int kbase_sysfs_init(struct kbase_device *kbdev)
5452 {
5453 int err = 0;
5454
5455 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
5456 kbdev->mdev.name = kbdev->devname;
5457 kbdev->mdev.fops = &kbase_fops;
5458 kbdev->mdev.parent = get_device(kbdev->dev);
5459 kbdev->mdev.mode = 0666;
5460
5461 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
5462 if (err)
5463 return err;
5464
5465 err = sysfs_create_group(&kbdev->dev->kobj,
5466 &kbase_scheduling_attr_group);
5467 if (err) {
5468 dev_err(kbdev->dev, "Creation of %s sysfs group failed",
5469 SYSFS_SCHEDULING_GROUP);
5470 sysfs_remove_group(&kbdev->dev->kobj,
5471 &kbase_attr_group);
5472 return err;
5473 }
5474
5475 err = sysfs_create_group(&kbdev->dev->kobj,
5476 &kbase_mempool_attr_group);
5477 if (err) {
5478 dev_err(kbdev->dev, "Creation of %s sysfs group failed",
5479 SYSFS_MEMPOOL_GROUP);
5480 sysfs_remove_group(&kbdev->dev->kobj,
5481 &kbase_scheduling_attr_group);
5482 sysfs_remove_group(&kbdev->dev->kobj,
5483 &kbase_attr_group);
5484 }
5485
5486 return err;
5487 }
5488
kbase_sysfs_term(struct kbase_device * kbdev)5489 void kbase_sysfs_term(struct kbase_device *kbdev)
5490 {
5491 sysfs_remove_group(&kbdev->dev->kobj, &kbase_mempool_attr_group);
5492 sysfs_remove_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
5493 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
5494 put_device(kbdev->dev);
5495 }
5496
kbase_platform_device_remove(struct platform_device * pdev)5497 static int kbase_platform_device_remove(struct platform_device *pdev)
5498 {
5499 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
5500
5501 if (!kbdev)
5502 return -ENODEV;
5503
5504 kbase_device_term(kbdev);
5505 dev_set_drvdata(kbdev->dev, NULL);
5506 kbase_device_free(kbdev);
5507
5508 return 0;
5509 }
5510
kbase_backend_devfreq_term(struct kbase_device * kbdev)5511 void kbase_backend_devfreq_term(struct kbase_device *kbdev)
5512 {
5513 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5514 if (kbdev->devfreq)
5515 kbase_devfreq_term(kbdev);
5516 #endif
5517 }
5518
kbase_backend_devfreq_init(struct kbase_device * kbdev)5519 int kbase_backend_devfreq_init(struct kbase_device *kbdev)
5520 {
5521 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5522 /* Devfreq uses hardware counters, so must be initialized after it. */
5523 int err = kbase_devfreq_init(kbdev);
5524
5525 if (err)
5526 dev_err(kbdev->dev, "Continuing without devfreq\n");
5527 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
5528 return 0;
5529 }
5530
kbase_platform_device_probe(struct platform_device * pdev)5531 static int kbase_platform_device_probe(struct platform_device *pdev)
5532 {
5533 struct kbase_device *kbdev;
5534 int err = 0;
5535
5536 mali_kbase_print_cs_experimental();
5537
5538 kbdev = kbase_device_alloc();
5539 if (!kbdev) {
5540 dev_err(&pdev->dev, "Allocate device failed\n");
5541 return -ENOMEM;
5542 }
5543
5544 kbdev->dev = &pdev->dev;
5545
5546 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
5547 kbdev->token = -EPERM;
5548 #endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
5549
5550 dev_set_drvdata(kbdev->dev, kbdev);
5551 #if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
5552 mutex_lock(&kbase_probe_mutex);
5553 #endif
5554 err = kbase_device_init(kbdev);
5555
5556 if (err) {
5557 if (err == -EPROBE_DEFER)
5558 dev_info(kbdev->dev,
5559 "Device initialization Deferred\n");
5560 else
5561 dev_err(kbdev->dev, "Device initialization failed\n");
5562
5563 dev_set_drvdata(kbdev->dev, NULL);
5564 kbase_device_free(kbdev);
5565 #if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
5566 mutex_unlock(&kbase_probe_mutex);
5567 #endif
5568 } else {
5569 dev_info(kbdev->dev,
5570 "Probed as %s\n", dev_name(kbdev->mdev.this_device));
5571 kbase_increment_device_id();
5572 #if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
5573 mutex_unlock(&kbase_probe_mutex);
5574 #endif
5575 #ifdef CONFIG_MALI_ARBITER_SUPPORT
5576 mutex_lock(&kbdev->pm.lock);
5577 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_INITIALIZED_EVT);
5578 mutex_unlock(&kbdev->pm.lock);
5579 #endif
5580 }
5581
5582 return err;
5583 }
5584
5585 #undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
5586
5587 /**
5588 * kbase_device_suspend - Suspend callback from the OS.
5589 *
5590 * @dev: The device to suspend
5591 *
5592 * This is called by Linux when the device should suspend.
5593 *
5594 * Return: A standard Linux error code on failure, 0 otherwise.
5595 */
kbase_device_suspend(struct device * dev)5596 static int kbase_device_suspend(struct device *dev)
5597 {
5598 struct kbase_device *kbdev = to_kbase_device(dev);
5599
5600 if (!kbdev)
5601 return -ENODEV;
5602
5603 if (kbase_pm_suspend(kbdev)) {
5604 dev_warn(kbdev->dev, "Abort suspend as GPU suspension failed");
5605 return -EBUSY;
5606 }
5607
5608 #ifdef CONFIG_MALI_BIFROST_DVFS
5609 kbase_pm_metrics_stop(kbdev);
5610 #endif
5611
5612 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5613 dev_dbg(dev, "Callback %s\n", __func__);
5614 if (kbdev->devfreq) {
5615 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
5616 flush_workqueue(kbdev->devfreq_queue.workq);
5617 }
5618 #endif
5619
5620 #ifdef CONFIG_ARCH_ROCKCHIP
5621 kbase_platform_rk_enable_regulator(kbdev);
5622 #endif
5623
5624 #ifdef KBASE_PM_RUNTIME
5625 if (kbdev->is_runtime_resumed) {
5626 if (kbdev->pm.backend.callback_power_runtime_off)
5627 kbdev->pm.backend.callback_power_runtime_off(kbdev);
5628 }
5629 #endif /* KBASE_PM_RUNTIME */
5630
5631 return 0;
5632 }
5633
5634 /**
5635 * kbase_device_resume - Resume callback from the OS.
5636 *
5637 * @dev: The device to resume
5638 *
5639 * This is called by Linux when the device should resume from suspension.
5640 *
5641 * Return: A standard Linux error code
5642 */
kbase_device_resume(struct device * dev)5643 static int kbase_device_resume(struct device *dev)
5644 {
5645 struct kbase_device *kbdev = to_kbase_device(dev);
5646
5647 if (!kbdev)
5648 return -ENODEV;
5649
5650 #ifdef KBASE_PM_RUNTIME
5651 if (kbdev->is_runtime_resumed) {
5652 if (kbdev->pm.backend.callback_power_runtime_on)
5653 kbdev->pm.backend.callback_power_runtime_on(kbdev);
5654 }
5655 #endif /* KBASE_PM_RUNTIME */
5656
5657 kbase_pm_resume(kbdev);
5658
5659 #ifdef CONFIG_MALI_BIFROST_DVFS
5660 kbase_pm_metrics_start(kbdev);
5661 #endif
5662
5663 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5664 dev_dbg(dev, "Callback %s\n", __func__);
5665 if (kbdev->devfreq)
5666 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
5667 #endif
5668
5669 #if !MALI_USE_CSF
5670 kbase_enable_quick_reset(kbdev);
5671 #endif
5672
5673 return 0;
5674 }
5675
5676 /**
5677 * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
5678 *
5679 * @dev: The device to suspend
5680 *
5681 * This is called by Linux when the device should prepare for a condition in
5682 * which it will not be able to communicate with the CPU(s) and RAM due to
5683 * power management.
5684 *
5685 * Return: A standard Linux error code
5686 */
5687 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_suspend(struct device * dev)5688 static int kbase_device_runtime_suspend(struct device *dev)
5689 {
5690 struct kbase_device *kbdev = to_kbase_device(dev);
5691 int ret = 0;
5692
5693 if (!kbdev)
5694 return -ENODEV;
5695
5696 dev_dbg(dev, "Callback %s\n", __func__);
5697 KBASE_KTRACE_ADD(kbdev, PM_RUNTIME_SUSPEND_CALLBACK, NULL, 0);
5698
5699 #if MALI_USE_CSF
5700 ret = kbase_pm_handle_runtime_suspend(kbdev);
5701 if (ret)
5702 return ret;
5703 #endif
5704
5705 #ifdef CONFIG_MALI_BIFROST_DVFS
5706 kbase_pm_metrics_stop(kbdev);
5707 #endif
5708
5709 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5710 if (kbdev->devfreq)
5711 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
5712 #endif
5713
5714 if (kbdev->pm.backend.callback_power_runtime_off) {
5715 kbdev->pm.backend.callback_power_runtime_off(kbdev);
5716 kbdev->is_runtime_resumed = false;
5717 dev_dbg(dev, "runtime suspend\n");
5718 }
5719 return ret;
5720 }
5721 #endif /* KBASE_PM_RUNTIME */
5722
5723 /**
5724 * kbase_device_runtime_resume - Runtime resume callback from the OS.
5725 *
5726 * @dev: The device to suspend
5727 *
5728 * This is called by Linux when the device should go into a fully active state.
5729 *
5730 * Return: A standard Linux error code
5731 */
5732
5733 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_resume(struct device * dev)5734 static int kbase_device_runtime_resume(struct device *dev)
5735 {
5736 int ret = 0;
5737 struct kbase_device *kbdev = to_kbase_device(dev);
5738
5739 if (!kbdev)
5740 return -ENODEV;
5741
5742 dev_dbg(dev, "Callback %s\n", __func__);
5743 // KBASE_KTRACE_ADD(kbdev, PM_RUNTIME_RESUME_CALLBACK, NULL, 0);
5744 if (kbdev->pm.backend.callback_power_runtime_on) {
5745 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
5746 kbdev->is_runtime_resumed = true;
5747 dev_dbg(dev, "runtime resume\n");
5748 }
5749
5750 #ifdef CONFIG_MALI_BIFROST_DVFS
5751 kbase_pm_metrics_start(kbdev);
5752 #endif
5753
5754 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
5755 if (kbdev->devfreq)
5756 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
5757 #endif
5758
5759 return ret;
5760 }
5761 #endif /* KBASE_PM_RUNTIME */
5762
5763
5764 #ifdef KBASE_PM_RUNTIME
5765 /**
5766 * kbase_device_runtime_idle - Runtime idle callback from the OS.
5767 * @dev: The device to suspend
5768 *
5769 * This is called by Linux when the device appears to be inactive and it might
5770 * be placed into a low power state.
5771 *
5772 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
5773 * otherwise a standard Linux error code
5774 */
kbase_device_runtime_idle(struct device * dev)5775 static int kbase_device_runtime_idle(struct device *dev)
5776 {
5777 struct kbase_device *kbdev = to_kbase_device(dev);
5778
5779 if (!kbdev)
5780 return -ENODEV;
5781
5782 dev_dbg(dev, "Callback %s\n", __func__);
5783 /* Use platform specific implementation if it exists. */
5784 if (kbdev->pm.backend.callback_power_runtime_idle)
5785 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
5786
5787 /* Just need to update the device's last busy mark. Kernel will respect
5788 * the autosuspend delay and so won't suspend the device immediately.
5789 */
5790 pm_runtime_mark_last_busy(kbdev->dev);
5791 return 0;
5792 }
5793 #endif /* KBASE_PM_RUNTIME */
5794
5795 /* The power management operations for the platform driver.
5796 */
5797 static const struct dev_pm_ops kbase_pm_ops = {
5798 .suspend = kbase_device_suspend,
5799 .resume = kbase_device_resume,
5800 #ifdef KBASE_PM_RUNTIME
5801 .runtime_suspend = kbase_device_runtime_suspend,
5802 .runtime_resume = kbase_device_runtime_resume,
5803 .runtime_idle = kbase_device_runtime_idle,
5804 #endif /* KBASE_PM_RUNTIME */
5805 };
5806
5807 #if IS_ENABLED(CONFIG_OF)
5808 static const struct of_device_id kbase_dt_ids[] = { { .compatible = "arm,malit6xx" },
5809 { .compatible = "arm,mali-midgard" },
5810 { .compatible = "arm,mali-bifrost" },
5811 { .compatible = "arm,mali-valhall" },
5812 { /* sentinel */ } };
5813 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
5814 #endif
5815
5816 static struct platform_driver kbase_platform_driver = {
5817 .probe = kbase_platform_device_probe,
5818 .remove = kbase_platform_device_remove,
5819 .driver = {
5820 .name = kbase_drv_name,
5821 .pm = &kbase_pm_ops,
5822 .of_match_table = of_match_ptr(kbase_dt_ids),
5823 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
5824 },
5825 };
5826
5827 #if (KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE) && IS_ENABLED(CONFIG_OF)
5828 module_platform_driver(kbase_platform_driver);
5829 #else
kbase_driver_init(void)5830 static int __init kbase_driver_init(void)
5831 {
5832 int ret;
5833
5834 #if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
5835 mutex_init(&kbase_probe_mutex);
5836 #endif
5837
5838 #ifndef CONFIG_OF
5839 ret = kbase_platform_register();
5840 if (ret)
5841 return ret;
5842 #endif
5843 ret = platform_driver_register(&kbase_platform_driver);
5844 #ifndef CONFIG_OF
5845 if (ret) {
5846 kbase_platform_unregister();
5847 return ret;
5848 }
5849 #endif
5850
5851 return ret;
5852 }
5853
kbase_driver_exit(void)5854 static void __exit kbase_driver_exit(void)
5855 {
5856 platform_driver_unregister(&kbase_platform_driver);
5857 #ifndef CONFIG_OF
5858 kbase_platform_unregister();
5859 #endif
5860 }
5861
5862 module_init(kbase_driver_init);
5863 module_exit(kbase_driver_exit);
5864 #endif
5865 MODULE_LICENSE("GPL");
5866 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
5867 __stringify(BASE_UK_VERSION_MAJOR) "." \
5868 __stringify(BASE_UK_VERSION_MINOR) ")");
5869 MODULE_SOFTDEP("pre: memory_group_manager");
5870 MODULE_INFO(import_ns, "DMA_BUF");
5871
5872 #define CREATE_TRACE_POINTS
5873 /* Create the trace points (otherwise we just get code to call a tracepoint) */
5874 #include "mali_linux_trace.h"
5875
5876 #ifdef CONFIG_MALI_BIFROST_GATOR_SUPPORT
5877 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
5878 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
5879 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
5880 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
5881
kbase_trace_mali_pm_status(u32 dev_id,u32 event,u64 value)5882 void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value)
5883 {
5884 trace_mali_pm_status(dev_id, event, value);
5885 }
5886
kbase_trace_mali_job_slots_event(u32 dev_id,u32 event,const struct kbase_context * kctx,u8 atom_id)5887 void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id)
5888 {
5889 trace_mali_job_slots_event(dev_id, event,
5890 (kctx != NULL ? kctx->tgid : 0),
5891 (kctx != NULL ? kctx->pid : 0),
5892 atom_id);
5893 }
5894
kbase_trace_mali_page_fault_insert_pages(u32 dev_id,int event,u32 value)5895 void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value)
5896 {
5897 trace_mali_page_fault_insert_pages(dev_id, event, value);
5898 }
5899
kbase_trace_mali_total_alloc_pages_change(u32 dev_id,long long event)5900 void kbase_trace_mali_total_alloc_pages_change(u32 dev_id, long long event)
5901 {
5902 trace_mali_total_alloc_pages_change(dev_id, event);
5903 }
5904 #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */
5905