xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/platform/rk/mali_kbase_config_rk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * (C) COPYRIGHT RockChip Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the
5  * GNU General Public License version 2 as published by the Free Software
6  * Foundation, and any use by you of this program is subject to the terms
7  * of such GNU licence.
8  */
9 
10 /* #define ENABLE_DEBUG_LOG */
11 #include "custom_log.h"
12 
13 #include <mali_kbase.h>
14 #include <mali_kbase_defs.h>
15 #include <mali_kbase_config.h>
16 #include <backend/gpu/mali_kbase_devfreq.h>
17 #include <backend/gpu/mali_kbase_pm_internal.h>
18 #include <backend/gpu/mali_kbase_pm_defs.h>
19 
20 #if MALI_USE_CSF
21 #include <asm/arch_timer.h>
22 #endif
23 
24 #include <linux/clk.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/of.h>
28 #include <linux/delay.h>
29 #include <linux/mfd/syscon.h>
30 #include <linux/nvmem-consumer.h>
31 #include <linux/regmap.h>
32 #include <linux/soc/rockchip/pvtm.h>
33 #include <linux/thermal.h>
34 #include <soc/rockchip/rockchip_opp_select.h>
35 #include <soc/rockchip/rockchip_system_monitor.h>
36 
37 #include "mali_kbase_config_platform.h"
38 #include "mali_kbase_rk.h"
39 
40 #define POWER_DOWN_FREQ	200000000
41 
42 /**
43  * @file mali_kbase_config_rk.c
44  * 对 platform_config_of_rk 的具体实现.
45  *
46  * mali_device_driver 包含两部分 :
47  *      .DP : platform_dependent_part_in_mdd :
48  *		依赖 platform 部分,
49  *		源码在 <mdd_src_dir>/platform/<platform_name>/
50  *		在 mali_device_driver 内部,
51  *			记为 platform_dependent_part,
52  *			也被记为 platform_specific_code.
53  *      .DP : common_parts_in_mdd :
54  *		arm 实现的通用的部分,
55  *		源码在 <mdd_src_dir>/ 下.
56  *		在 mali_device_driver 内部, 记为 common_parts.
57  */
58 
59 /*---------------------------------------------------------------------------*/
60 #ifndef CONFIG_MALI_BIFROST_DEVFREQ
kbase_pm_get_dvfs_metrics(struct kbase_device * kbdev,struct kbasep_pm_metrics * last,struct kbasep_pm_metrics * diff)61 static inline void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
62 					     struct kbasep_pm_metrics *last,
63 					     struct kbasep_pm_metrics *diff)
64 {
65 }
66 #endif
67 
68 #ifdef CONFIG_REGULATOR
69 static int rk_pm_enable_regulator(struct kbase_device *kbdev);
70 static void rk_pm_disable_regulator(struct kbase_device *kbdev);
71 #else
rk_pm_enable_regulator(struct kbase_device * kbdev)72 static inline int rk_pm_enable_regulator(struct kbase_device *kbdev)
73 {
74 	return 0;
75 }
76 
rk_pm_disable_regulator(struct kbase_device * kbdev)77 static inline void rk_pm_disable_regulator(struct kbase_device *kbdev)
78 {
79 }
80 #endif
81 
82 static int rk_pm_enable_clk(struct kbase_device *kbdev);
83 
84 static void rk_pm_disable_clk(struct kbase_device *kbdev);
85 
86 static int kbase_platform_rk_create_sysfs_files(struct device *dev);
87 
88 static void kbase_platform_rk_remove_sysfs_files(struct device *dev);
89 
90 /*---------------------------------------------------------------------------*/
91 
rk_pm_power_off_delay_work(struct work_struct * work)92 static void rk_pm_power_off_delay_work(struct work_struct *work)
93 {
94 	struct rk_context *platform =
95 		container_of(to_delayed_work(work), struct rk_context, work);
96 	struct kbase_device *kbdev = platform->kbdev;
97 
98 	mutex_lock(&platform->lock);
99 
100 	if (!platform->is_powered) {
101 		D("mali_dev is already powered off.");
102 		mutex_unlock(&platform->lock);
103 		return;
104 	}
105 
106 	rockchip_monitor_volt_adjust_lock(kbdev->mdev_info);
107 	if (pm_runtime_enabled(kbdev->dev)) {
108 		D("to put_sync_suspend mali_dev.");
109 		pm_runtime_put_sync_suspend(kbdev->dev);
110 	}
111 	rockchip_monitor_volt_adjust_unlock(kbdev->mdev_info);
112 
113 	rk_pm_disable_clk(kbdev);
114 
115 	if (pm_runtime_suspended(kbdev->dev)) {
116 		rk_pm_disable_regulator(kbdev);
117 		platform->is_regulator_on = false;
118 	}
119 
120 	platform->is_powered = false;
121 	wake_unlock(&platform->wake_lock);
122 
123 	mutex_unlock(&platform->lock);
124 }
125 
kbase_platform_rk_init(struct kbase_device * kbdev)126 static int kbase_platform_rk_init(struct kbase_device *kbdev)
127 {
128 	int ret = 0;
129 	struct rk_context *platform;
130 
131 	platform = kzalloc(sizeof(*platform), GFP_KERNEL);
132 	if (!platform) {
133 		E("err.");
134 		return -ENOMEM;
135 	}
136 
137 	platform->is_powered = false;
138 	platform->kbdev = kbdev;
139 
140 	platform->delay_ms = 200;
141 	if (of_property_read_u32(kbdev->dev->of_node, "power-off-delay-ms",
142 				 &platform->delay_ms))
143 		W("power-off-delay-ms not available.");
144 
145 	platform->power_off_wq = create_freezable_workqueue("gpu_power_off_wq");
146 	if (!platform->power_off_wq) {
147 		E("couldn't create workqueue");
148 		ret = -ENOMEM;
149 		goto err_wq;
150 	}
151 	INIT_DEFERRABLE_WORK(&platform->work, rk_pm_power_off_delay_work);
152 
153 	wake_lock_init(&platform->wake_lock, WAKE_LOCK_SUSPEND, "gpu");
154 
155 	platform->utilisation_period = DEFAULT_UTILISATION_PERIOD_IN_MS;
156 
157 	ret = kbase_platform_rk_create_sysfs_files(kbdev->dev);
158 	if (ret) {
159 		E("fail to create sysfs_files. ret = %d.", ret);
160 		goto err_sysfs_files;
161 	}
162 
163 	kbdev->platform_context = (void *)platform;
164 	pm_runtime_enable(kbdev->dev);
165 
166 	mutex_init(&platform->lock);
167 
168 	return 0;
169 
170 err_sysfs_files:
171 	wake_lock_destroy(&platform->wake_lock);
172 	destroy_workqueue(platform->power_off_wq);
173 err_wq:
174 	return ret;
175 }
176 
kbase_platform_rk_term(struct kbase_device * kbdev)177 static void kbase_platform_rk_term(struct kbase_device *kbdev)
178 {
179 	struct rk_context *platform =
180 		(struct rk_context *)kbdev->platform_context;
181 
182 	pm_runtime_disable(kbdev->dev);
183 	kbdev->platform_context = NULL;
184 
185 	if (platform) {
186 		cancel_delayed_work_sync(&platform->work);
187 		wake_lock_destroy(&platform->wake_lock);
188 		destroy_workqueue(platform->power_off_wq);
189 		platform->is_powered = false;
190 		platform->kbdev = NULL;
191 		kfree(platform);
192 	}
193 	kbase_platform_rk_remove_sysfs_files(kbdev->dev);
194 }
195 
196 struct kbase_platform_funcs_conf platform_funcs = {
197 	.platform_init_func = &kbase_platform_rk_init,
198 	.platform_term_func = &kbase_platform_rk_term,
199 };
200 
201 /*---------------------------------------------------------------------------*/
202 
rk_pm_callback_runtime_on(struct kbase_device * kbdev)203 static int rk_pm_callback_runtime_on(struct kbase_device *kbdev)
204 {
205 	struct rockchip_opp_info *opp_info = &kbdev->opp_info;
206 	int ret = 0;
207 
208 	if (!kbdev->current_nominal_freq)
209 		return 0;
210 
211 	ret = clk_bulk_prepare_enable(opp_info->num_clks,  opp_info->clks);
212 	if (ret) {
213 		dev_err(kbdev->dev, "failed to enable opp clks\n");
214 		return ret;
215 	}
216 	if (opp_info->data && opp_info->data->set_read_margin)
217 		opp_info->data->set_read_margin(kbdev->dev, opp_info,
218 						opp_info->target_rm);
219 	if (opp_info->scmi_clk) {
220 		if (clk_set_rate(opp_info->scmi_clk,
221 				 kbdev->current_nominal_freq))
222 			dev_err(kbdev->dev, "failed to restore clk rate\n");
223 	}
224 	clk_bulk_disable_unprepare(opp_info->num_clks, opp_info->clks);
225 
226 	return 0;
227 }
228 
rk_pm_callback_runtime_off(struct kbase_device * kbdev)229 static void rk_pm_callback_runtime_off(struct kbase_device *kbdev)
230 {
231 	struct rockchip_opp_info *opp_info = &kbdev->opp_info;
232 
233 	if (opp_info->scmi_clk) {
234 		if (clk_set_rate(opp_info->scmi_clk, POWER_DOWN_FREQ))
235 			dev_err(kbdev->dev, "failed to set power down rate\n");
236 	}
237 	opp_info->current_rm = UINT_MAX;
238 }
239 
rk_pm_callback_power_on(struct kbase_device * kbdev)240 static int rk_pm_callback_power_on(struct kbase_device *kbdev)
241 {
242 	int ret = 1; /* Assume GPU has been powered off */
243 	int err = 0;
244 	struct rk_context *platform = get_rk_context(kbdev);
245 
246 	cancel_delayed_work_sync(&platform->work);
247 
248 	mutex_lock(&platform->lock);
249 
250 	if (platform->is_powered) {
251 		D("mali_device is already powered.");
252 		ret = 0;
253 		goto out;
254 	}
255 
256 	/* we must enable vdd_gpu before pd_gpu_in_chip. */
257 	if (!platform->is_regulator_on) {
258 		err = rk_pm_enable_regulator(kbdev);
259 		if (err) {
260 			E("fail to enable regulator, err : %d.", err);
261 			ret = err;
262 			goto out;
263 		}
264 		platform->is_regulator_on = true;
265 	}
266 
267 	err = rk_pm_enable_clk(kbdev);
268 	if (err) {
269 		E("failed to enable clk: %d", err);
270 		ret = err;
271 		goto out;
272 	}
273 
274 	rockchip_monitor_volt_adjust_lock(kbdev->mdev_info);
275 	/* 若 mali_dev 的 runtime_pm 是 enabled 的, 则... */
276 	if (pm_runtime_enabled(kbdev->dev)) {
277 		D("to resume mali_dev syncly.");
278 		/* 对 pd_in_chip 的 on 操作,
279 		 * 将在 pm_domain 的 runtime_pm_callbacks 中完成.
280 		 */
281 		err = pm_runtime_get_sync(kbdev->dev);
282 		if (err < 0) {
283 			E("failed to runtime resume device: %d.", err);
284 			ret = err;
285 			goto out;
286 		} else if (err == 1) { /* runtime_pm_status is still active */
287 			D("chip has NOT been powered off, no need to re-init.");
288 			ret = 0;
289 		}
290 	}
291 	rockchip_monitor_volt_adjust_unlock(kbdev->mdev_info);
292 
293 	platform->is_powered = true;
294 	wake_lock(&platform->wake_lock);
295 
296 out:
297 	mutex_unlock(&platform->lock);
298 	return ret;
299 }
300 
rk_pm_callback_power_off(struct kbase_device * kbdev)301 static void rk_pm_callback_power_off(struct kbase_device *kbdev)
302 {
303 	struct rk_context *platform = get_rk_context(kbdev);
304 
305 	D("enter");
306 
307 	queue_delayed_work(platform->power_off_wq, &platform->work,
308 			   msecs_to_jiffies(platform->delay_ms));
309 }
310 
rk_kbase_device_runtime_init(struct kbase_device * kbdev)311 static int rk_kbase_device_runtime_init(struct kbase_device *kbdev)
312 {
313 	return 0;
314 }
315 
rk_kbase_device_runtime_disable(struct kbase_device * kbdev)316 static void rk_kbase_device_runtime_disable(struct kbase_device *kbdev)
317 {
318 }
319 
320 struct kbase_pm_callback_conf pm_callbacks = {
321 	.power_on_callback = rk_pm_callback_power_on,
322 	.power_off_callback = rk_pm_callback_power_off,
323 #ifdef CONFIG_PM
324 	.power_runtime_init_callback = rk_kbase_device_runtime_init,
325 	.power_runtime_term_callback = rk_kbase_device_runtime_disable,
326 	.power_runtime_on_callback = rk_pm_callback_runtime_on,
327 	.power_runtime_off_callback = rk_pm_callback_runtime_off,
328 #else				/* CONFIG_PM */
329 	.power_runtime_init_callback = NULL,
330 	.power_runtime_term_callback = NULL,
331 	.power_runtime_on_callback = NULL,
332 	.power_runtime_off_callback = NULL,
333 #endif				/* CONFIG_PM */
334 };
335 
336 /*---------------------------------------------------------------------------*/
337 
338 #ifdef CONFIG_REGULATOR
rk_pm_enable_regulator(struct kbase_device * kbdev)339 static int rk_pm_enable_regulator(struct kbase_device *kbdev)
340 {
341 	int ret = 0;
342 	unsigned int i;
343 
344 	for (i = 0; i < kbdev->nr_regulators; i++) {
345 		struct regulator *regulator = kbdev->regulators[i];
346 		if (!regulator) {
347 			W("no mali regulator control, no need to enable.");
348 			goto EXIT;
349 		}
350 
351 		D("to enable regulator.");
352 		ret = regulator_enable(regulator);
353 		if (ret) {
354 			E("fail to enable regulator, ret : %d.", ret);
355 			goto EXIT;
356 		}
357 	}
358 
359 EXIT:
360 	return ret;
361 }
362 
rk_pm_disable_regulator(struct kbase_device * kbdev)363 static void rk_pm_disable_regulator(struct kbase_device *kbdev)
364 {
365 	unsigned int i;
366 
367 	for (i = 0; i < kbdev->nr_regulators; i++) {
368 		struct regulator *regulator = kbdev->regulators[i];
369 
370 		if (!regulator) {
371 			W("no mali regulator control, no need to disable.");
372 			return;
373 		}
374 
375 		D("to disable regulator.");
376 		regulator_disable(regulator);
377 	}
378 }
379 #endif
380 
rk_pm_enable_clk(struct kbase_device * kbdev)381 static int rk_pm_enable_clk(struct kbase_device *kbdev)
382 {
383 	int err = 0;
384 	unsigned int i;
385 
386 	for (i = 0; i < kbdev->nr_clocks; i++) {
387 		struct clk *clock = kbdev->clocks[i];
388 
389 		if (!clock) {
390 			W("no mali clock control, no need to enable.");
391 		} else {
392 			D("to enable clk.");
393 			err = clk_enable(clock);
394 			if (err)
395 				E("failed to enable clk: %d.", err);
396 		}
397 	}
398 
399 	return err;
400 }
401 
rk_pm_disable_clk(struct kbase_device * kbdev)402 static void rk_pm_disable_clk(struct kbase_device *kbdev)
403 {
404 	unsigned int i;
405 
406 	for (i = 0; i < kbdev->nr_clocks; i++) {
407 		struct clk *clock = kbdev->clocks[i];
408 
409 		if (!clock) {
410 			W("no mali clock control, no need to disable.");
411 		} else {
412 			D("to disable clk.");
413 			clk_disable(clock);
414 		}
415 	}
416 }
417 
418 /*---------------------------------------------------------------------------*/
419 
utilisation_period_show(struct device * dev,struct device_attribute * attr,char * buf)420 static ssize_t utilisation_period_show(struct device *dev,
421 				       struct device_attribute *attr,
422 				       char *buf)
423 {
424 	struct kbase_device *kbdev = dev_get_drvdata(dev);
425 	struct rk_context *platform = get_rk_context(kbdev);
426 	ssize_t ret = 0;
427 
428 	ret += snprintf(buf, PAGE_SIZE, "%u\n", platform->utilisation_period);
429 
430 	return ret;
431 }
432 
utilisation_period_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)433 static ssize_t utilisation_period_store(struct device *dev,
434 					struct device_attribute *attr,
435 					const char *buf,
436 					size_t count)
437 {
438 	struct kbase_device *kbdev = dev_get_drvdata(dev);
439 	struct rk_context *platform = get_rk_context(kbdev);
440 	int ret = 0;
441 
442 	ret = kstrtouint(buf, 0, &platform->utilisation_period);
443 	if (ret) {
444 		E("invalid input period : %s.", buf);
445 		return ret;
446 	}
447 	D("set utilisation_period to '%d'.", platform->utilisation_period);
448 
449 	return count;
450 }
451 
utilisation_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t utilisation_show(struct device *dev,
453 				struct device_attribute *attr,
454 				char *buf)
455 {
456 	struct kbase_device *kbdev = dev_get_drvdata(dev);
457 	struct rk_context *platform = get_rk_context(kbdev);
458 	ssize_t ret = 0;
459 	unsigned long period_in_us = platform->utilisation_period * 1000;
460 	u32 utilisation;
461 	struct kbasep_pm_metrics metrics_when_start;
462 	struct kbasep_pm_metrics metrics_diff = {}; /* between start and end. */
463 	u32 total_time = 0;
464 	u32 busy_time = 0;
465 
466 	/* get current metrics data. */
467 	kbase_pm_get_dvfs_metrics(kbdev, &metrics_when_start, &metrics_diff);
468 	/* sleep for 'period_in_us'. */
469 	usleep_range(period_in_us, period_in_us + 100);
470 	/* get metrics data between start and end. */
471 	kbase_pm_get_dvfs_metrics(kbdev, &metrics_when_start, &metrics_diff);
472 
473 	total_time = metrics_diff.time_busy + metrics_diff.time_idle;
474 	busy_time = metrics_diff.time_busy;
475 	D("total_time : %u, busy_time : %u.", total_time, busy_time);
476 
477 	utilisation = busy_time * 100 / total_time;
478 	ret += snprintf(buf, PAGE_SIZE, "%d\n", utilisation);
479 
480 	return ret;
481 }
482 
483 static DEVICE_ATTR_RW(utilisation_period);
484 static DEVICE_ATTR_RO(utilisation);
485 
kbase_platform_rk_create_sysfs_files(struct device * dev)486 static int kbase_platform_rk_create_sysfs_files(struct device *dev)
487 {
488 	int ret = 0;
489 
490 	ret = device_create_file(dev, &dev_attr_utilisation_period);
491 	if (ret) {
492 		E("fail to create sysfs file 'utilisation_period'.");
493 		goto out;
494 	}
495 
496 	ret = device_create_file(dev, &dev_attr_utilisation);
497 	if (ret) {
498 		E("fail to create sysfs file 'utilisation'.");
499 		goto remove_utilisation_period;
500 	}
501 
502 	return 0;
503 
504 remove_utilisation_period:
505 	device_remove_file(dev, &dev_attr_utilisation_period);
506 out:
507 	return ret;
508 }
509 
kbase_platform_rk_remove_sysfs_files(struct device * dev)510 static void kbase_platform_rk_remove_sysfs_files(struct device *dev)
511 {
512 	device_remove_file(dev, &dev_attr_utilisation_period);
513 	device_remove_file(dev, &dev_attr_utilisation);
514 }
515 
rk3588_gpu_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)516 static int rk3588_gpu_get_soc_info(struct device *dev, struct device_node *np,
517 			       int *bin, int *process)
518 {
519 	int ret = 0;
520 	u8 value = 0;
521 
522 	if (!bin)
523 		return 0;
524 
525 	if (of_property_match_string(np, "nvmem-cell-names",
526 				     "specification_serial_number") >= 0) {
527 		ret = rockchip_nvmem_cell_read_u8(np,
528 						  "specification_serial_number",
529 						  &value);
530 		if (ret) {
531 			dev_err(dev,
532 				"Failed to get specification_serial_number\n");
533 			return ret;
534 		}
535 		/* RK3588M */
536 		if (value == 0xd)
537 			*bin = 1;
538 		/* RK3588J */
539 		else if (value == 0xa)
540 			*bin = 2;
541 	}
542 	if (*bin < 0)
543 		*bin = 0;
544 	dev_info(dev, "bin=%d\n", *bin);
545 
546 	return ret;
547 }
548 
rk3588_gpu_set_soc_info(struct device * dev,struct device_node * np,int bin,int process,int volt_sel)549 static int rk3588_gpu_set_soc_info(struct device *dev, struct device_node *np,
550 			       int bin, int process, int volt_sel)
551 {
552 	struct opp_table *opp_table;
553 	u32 supported_hw[2];
554 
555 	if (volt_sel < 0)
556 		return 0;
557 	if (bin < 0)
558 		bin = 0;
559 
560 	if (!of_property_read_bool(np, "rockchip,supported-hw"))
561 		return 0;
562 
563 	/* SoC Version */
564 	supported_hw[0] = BIT(bin);
565 	/* Speed Grade */
566 	supported_hw[1] = BIT(volt_sel);
567 	opp_table = dev_pm_opp_set_supported_hw(dev, supported_hw, 2);
568 	if (IS_ERR(opp_table)) {
569 		dev_err(dev, "failed to set supported opp\n");
570 		return PTR_ERR(opp_table);
571 	}
572 
573 	return 0;
574 }
575 
rk3588_gpu_set_read_margin(struct device * dev,struct rockchip_opp_info * opp_info,u32 rm)576 static int rk3588_gpu_set_read_margin(struct device *dev,
577 				      struct rockchip_opp_info *opp_info,
578 				      u32 rm)
579 {
580 	int ret = 0;
581 	u32 val;
582 
583 	if (!opp_info->grf || !opp_info->volt_rm_tbl)
584 		return 0;
585 	if (rm == opp_info->current_rm || rm == UINT_MAX)
586 		return 0;
587 
588 	dev_dbg(dev, "set rm to %d\n", rm);
589 
590 	ret = regmap_read(opp_info->grf, 0x24, &val);
591 	if (ret < 0) {
592 		dev_err(dev, "failed to get rm from 0x24\n");
593 		return ret;
594 	}
595 	val &= ~0x1c;
596 	regmap_write(opp_info->grf, 0x24, val | (rm << 2));
597 
598 	ret = regmap_read(opp_info->grf, 0x28, &val);
599 	if (ret < 0) {
600 		dev_err(dev, "failed to get rm from 0x28\n");
601 		return ret;
602 	}
603 	val &= ~0x1c;
604 	regmap_write(opp_info->grf, 0x28, val | (rm << 2));
605 
606 	opp_info->current_rm = rm;
607 
608 	return 0;
609 }
610 
611 static const struct rockchip_opp_data rk3588_gpu_opp_data = {
612 	.get_soc_info = rk3588_gpu_get_soc_info,
613 	.set_soc_info = rk3588_gpu_set_soc_info,
614 	.set_read_margin = rk3588_gpu_set_read_margin,
615 };
616 
617 static const struct of_device_id rockchip_mali_of_match[] = {
618 	{
619 		.compatible = "rockchip,rk3588",
620 		.data = (void *)&rk3588_gpu_opp_data,
621 	},
622 	{},
623 };
624 
kbase_platform_rk_init_opp_table(struct kbase_device * kbdev)625 int kbase_platform_rk_init_opp_table(struct kbase_device *kbdev)
626 {
627 	rockchip_get_opp_data(rockchip_mali_of_match, &kbdev->opp_info);
628 
629 	return rockchip_init_opp_table(kbdev->dev, &kbdev->opp_info,
630 				       "gpu_leakage", "mali");
631 }
632 
kbase_platform_rk_enable_regulator(struct kbase_device * kbdev)633 int kbase_platform_rk_enable_regulator(struct kbase_device *kbdev)
634 {
635 	struct rk_context *platform = get_rk_context(kbdev);
636 	int err = 0;
637 
638 	if (!platform->is_regulator_on) {
639 		err = rk_pm_enable_regulator(kbdev);
640 		if (err) {
641 			E("fail to enable regulator, err : %d.", err);
642 			return err;
643 		}
644 		platform->is_regulator_on = true;
645 	}
646 
647 	return 0;
648 }
649 
650 /*---------------------------------------------------------------------------*/
651 
enumerate_gpu_clk(struct kbase_device * kbdev,unsigned int index)652 static void *enumerate_gpu_clk(struct kbase_device *kbdev,
653 		unsigned int index)
654 {
655 	if (index >= kbdev->nr_clocks)
656 		return NULL;
657 
658 	return kbdev->clocks[index];
659 }
660 
get_gpu_clk_rate(struct kbase_device * kbdev,void * gpu_clk_handle)661 static unsigned long get_gpu_clk_rate(struct kbase_device *kbdev,
662 		void *gpu_clk_handle)
663 {
664 	return clk_get_rate((struct clk *)gpu_clk_handle);
665 }
666 
gpu_clk_notifier_register(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)667 static int gpu_clk_notifier_register(struct kbase_device *kbdev,
668 		void *gpu_clk_handle, struct notifier_block *nb)
669 {
670 	compiletime_assert(offsetof(struct clk_notifier_data, clk) ==
671 		offsetof(struct kbase_gpu_clk_notifier_data, gpu_clk_handle),
672 		"mismatch in the offset of clk member");
673 
674 	compiletime_assert(sizeof(((struct clk_notifier_data *)0)->clk) ==
675 	     sizeof(((struct kbase_gpu_clk_notifier_data *)0)->gpu_clk_handle),
676 	     "mismatch in the size of clk member");
677 
678 	return clk_notifier_register((struct clk *)gpu_clk_handle, nb);
679 }
680 
gpu_clk_notifier_unregister(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)681 static void gpu_clk_notifier_unregister(struct kbase_device *kbdev,
682 		void *gpu_clk_handle, struct notifier_block *nb)
683 {
684 	clk_notifier_unregister((struct clk *)gpu_clk_handle, nb);
685 }
686 
687 struct kbase_clk_rate_trace_op_conf clk_rate_trace_ops = {
688 	.get_gpu_clk_rate = get_gpu_clk_rate,
689 	.enumerate_gpu_clk = enumerate_gpu_clk,
690 	.gpu_clk_notifier_register = gpu_clk_notifier_register,
691 	.gpu_clk_notifier_unregister = gpu_clk_notifier_unregister,
692 };
693