xref: /OK3568_Linux_fs/kernel/drivers/devfreq/rockchip_dmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Rockchip Generic dmc support.
4  *
5  * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
6  * Author: Finley Xiao <finley.xiao@rock-chips.com>
7  */
8 
9 #include <dt-bindings/clock/rockchip-ddr.h>
10 #include <dt-bindings/soc/rockchip-system-status.h>
11 #include <drm/drm_modeset_lock.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/clk.h>
14 #include <linux/cpu.h>
15 #include <linux/cpufreq.h>
16 #include <linux/delay.h>
17 #include <linux/devfreq.h>
18 #include <linux/devfreq_cooling.h>
19 #include <linux/devfreq-event.h>
20 #include <linux/input.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_opp.h>
29 #include <linux/pm_qos.h>
30 #include <linux/regmap.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/rockchip/rockchip_sip.h>
33 #include <linux/rwsem.h>
34 #include <linux/slab.h>
35 #include <linux/string.h>
36 #include <linux/suspend.h>
37 #include <linux/thermal.h>
38 
39 #include <soc/rockchip/pm_domains.h>
40 #include <soc/rockchip/rkfb_dmc.h>
41 #include <soc/rockchip/rockchip_dmc.h>
42 #include <soc/rockchip/rockchip_sip.h>
43 #include <soc/rockchip/rockchip_system_monitor.h>
44 #include <soc/rockchip/rockchip-system-status.h>
45 #include <soc/rockchip/rockchip_opp_select.h>
46 #include <uapi/drm/drm_mode.h>
47 
48 #include "governor.h"
49 #include "rockchip_dmc_timing.h"
50 #include "../clk/rockchip/clk.h"
51 #include "../gpu/drm/rockchip/rockchip_drm_drv.h"
52 #include "../opp/opp.h"
53 
54 #define system_status_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, \
55 						  status_nb)
56 #define reboot_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, \
57 					   reboot_nb)
58 #define boost_to_dmcfreq(work) container_of(work, struct rockchip_dmcfreq, \
59 					    boost_work)
60 #define input_hd_to_dmcfreq(hd) container_of(hd, struct rockchip_dmcfreq, \
61 					     input_handler)
62 
63 #define VIDEO_1080P_SIZE	(1920 * 1080)
64 #define DTS_PAR_OFFSET		(4096)
65 
66 #define FALLBACK_STATIC_TEMPERATURE 55000
67 
68 struct dmc_freq_table {
69 	unsigned long freq;
70 	struct dev_pm_opp_supply supplies[2];
71 };
72 
73 struct share_params {
74 	u32 hz;
75 	u32 lcdc_type;
76 	u32 vop;
77 	u32 vop_dclk_mode;
78 	u32 sr_idle_en;
79 	u32 addr_mcu_el3;
80 	/*
81 	 * 1: need to wait flag1
82 	 * 0: never wait flag1
83 	 */
84 	u32 wait_flag1;
85 	/*
86 	 * 1: need to wait flag1
87 	 * 0: never wait flag1
88 	 */
89 	u32 wait_flag0;
90 	u32 complt_hwirq;
91 	u32 update_drv_odt_cfg;
92 	u32 update_deskew_cfg;
93 
94 	u32 freq_count;
95 	u32 freq_info_mhz[6];
96 	u32 wait_mode;
97 	u32 vop_scan_line_time_ns;
98 	/* if need, add parameter after */
99 };
100 
101 static struct share_params *ddr_psci_param;
102 
103 struct rockchip_dmcfreq_ondemand_data {
104 	unsigned int upthreshold;
105 	unsigned int downdifferential;
106 };
107 
108 struct rockchip_dmcfreq {
109 	struct device *dev;
110 	struct dmcfreq_common_info info;
111 	struct rockchip_dmcfreq_ondemand_data ondemand_data;
112 	struct clk *dmc_clk;
113 	struct devfreq_event_dev **edev;
114 	struct mutex lock; /* serializes access to video_info_list */
115 	struct dram_timing *timing;
116 	struct regulator *vdd_center;
117 	struct regulator *mem_reg;
118 	struct notifier_block status_nb;
119 	struct notifier_block panic_nb;
120 	struct list_head video_info_list;
121 	struct freq_map_table *cpu_bw_tbl;
122 	struct work_struct boost_work;
123 	struct input_handler input_handler;
124 	struct monitor_dev_info *mdev_info;
125 	struct share_params *set_rate_params;
126 
127 	unsigned long *nocp_bw;
128 	unsigned long rate;
129 	unsigned long volt, mem_volt;
130 	unsigned long sleep_volt, sleep_mem_volt;
131 	unsigned long auto_min_rate;
132 	unsigned long status_rate;
133 	unsigned long normal_rate;
134 	unsigned long video_1080p_rate;
135 	unsigned long video_4k_rate;
136 	unsigned long video_4k_10b_rate;
137 	unsigned long video_4k_60p_rate;
138 	unsigned long video_svep_rate;
139 	unsigned long performance_rate;
140 	unsigned long hdmi_rate;
141 	unsigned long hdmirx_rate;
142 	unsigned long idle_rate;
143 	unsigned long suspend_rate;
144 	unsigned long reboot_rate;
145 	unsigned long boost_rate;
146 	unsigned long fixed_rate;
147 	unsigned long low_power_rate;
148 
149 	unsigned long freq_count;
150 	unsigned long freq_info_rate[6];
151 	unsigned long rate_low;
152 	unsigned long rate_mid_low;
153 	unsigned long rate_mid_high;
154 	unsigned long rate_high;
155 
156 	unsigned int min_cpu_freq;
157 	unsigned int system_status_en;
158 	unsigned int refresh;
159 	int edev_count;
160 	int dfi_id;
161 	int nocp_cpu_id;
162 	int regulator_count;
163 
164 	bool is_fixed;
165 	bool is_set_rate_direct;
166 
167 	struct thermal_cooling_device *devfreq_cooling;
168 	u32 static_coefficient;
169 	s32 ts[4];
170 	struct thermal_zone_device *ddr_tz;
171 
172 	unsigned int touchboostpulse_duration_val;
173 	u64 touchboostpulse_endtime;
174 
175 	int (*set_auto_self_refresh)(u32 en);
176 };
177 
178 static struct pm_qos_request pm_qos;
179 
180 static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data);
181 
182 static struct monitor_dev_profile dmc_mdevp = {
183 	.type = MONITOR_TYPE_DEV,
184 	.low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
185 	.high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
186 	.update_volt = rockchip_monitor_check_rate_volt,
187 	.set_opp = rockchip_dmcfreq_opp_helper,
188 };
189 
is_dualview(unsigned long status)190 static inline unsigned long is_dualview(unsigned long status)
191 {
192 	return (status & SYS_STATUS_LCDC0) && (status & SYS_STATUS_LCDC1);
193 }
194 
is_isp(unsigned long status)195 static inline unsigned long is_isp(unsigned long status)
196 {
197 	return (status & SYS_STATUS_ISP) ||
198 	       (status & SYS_STATUS_CIF0) ||
199 	       (status & SYS_STATUS_CIF1);
200 }
201 
202 /*
203  * function: packaging de-skew setting to px30_ddr_dts_config_timing,
204  *           px30_ddr_dts_config_timing will pass to trust firmware, and
205  *           used direct to set register.
206  * input: de_skew
207  * output: tim
208  */
px30_de_skew_set_2_reg(struct rk3328_ddr_de_skew_setting * de_skew,struct px30_ddr_dts_config_timing * tim)209 static void px30_de_skew_set_2_reg(struct rk3328_ddr_de_skew_setting *de_skew,
210 				   struct px30_ddr_dts_config_timing *tim)
211 {
212 	u32 n;
213 	u32 offset;
214 	u32 shift;
215 
216 	memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
217 	memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
218 	memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
219 
220 	/* CA de-skew */
221 	for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
222 		offset = n / 2;
223 		shift = n % 2;
224 		/* 0 => 4; 1 => 0 */
225 		shift = (shift == 0) ? 4 : 0;
226 		tim->ca_skew[offset] &= ~(0xf << shift);
227 		tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
228 	}
229 
230 	/* CS0 data de-skew */
231 	for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
232 		offset = ((n / 21) * 11) + ((n % 21) / 2);
233 		shift = ((n % 21) % 2);
234 		if ((n % 21) == 20)
235 			shift = 0;
236 		else
237 			/* 0 => 4; 1 => 0 */
238 			shift = (shift == 0) ? 4 : 0;
239 		tim->cs0_skew[offset] &= ~(0xf << shift);
240 		tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
241 	}
242 
243 	/* CS1 data de-skew */
244 	for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
245 		offset = ((n / 21) * 11) + ((n % 21) / 2);
246 		shift = ((n % 21) % 2);
247 		if ((n % 21) == 20)
248 			shift = 0;
249 		else
250 			/* 0 => 4; 1 => 0 */
251 			shift = (shift == 0) ? 4 : 0;
252 		tim->cs1_skew[offset] &= ~(0xf << shift);
253 		tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
254 	}
255 }
256 
257 /*
258  * function: packaging de-skew setting to rk3328_ddr_dts_config_timing,
259  *           rk3328_ddr_dts_config_timing will pass to trust firmware, and
260  *           used direct to set register.
261  * input: de_skew
262  * output: tim
263  */
264 static void
rk3328_de_skew_setting_2_register(struct rk3328_ddr_de_skew_setting * de_skew,struct rk3328_ddr_dts_config_timing * tim)265 rk3328_de_skew_setting_2_register(struct rk3328_ddr_de_skew_setting *de_skew,
266 				  struct rk3328_ddr_dts_config_timing *tim)
267 {
268 	u32 n;
269 	u32 offset;
270 	u32 shift;
271 
272 	memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
273 	memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
274 	memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
275 
276 	/* CA de-skew */
277 	for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
278 		offset = n / 2;
279 		shift = n % 2;
280 		/* 0 => 4; 1 => 0 */
281 		shift = (shift == 0) ? 4 : 0;
282 		tim->ca_skew[offset] &= ~(0xf << shift);
283 		tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
284 	}
285 
286 	/* CS0 data de-skew */
287 	for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
288 		offset = ((n / 21) * 11) + ((n % 21) / 2);
289 		shift = ((n % 21) % 2);
290 		if ((n % 21) == 20)
291 			shift = 0;
292 		else
293 			/* 0 => 4; 1 => 0 */
294 			shift = (shift == 0) ? 4 : 0;
295 		tim->cs0_skew[offset] &= ~(0xf << shift);
296 		tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
297 	}
298 
299 	/* CS1 data de-skew */
300 	for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
301 		offset = ((n / 21) * 11) + ((n % 21) / 2);
302 		shift = ((n % 21) % 2);
303 		if ((n % 21) == 20)
304 			shift = 0;
305 		else
306 			/* 0 => 4; 1 => 0 */
307 			shift = (shift == 0) ? 4 : 0;
308 		tim->cs1_skew[offset] &= ~(0xf << shift);
309 		tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
310 	}
311 }
312 
rk_drm_get_lcdc_type(void)313 static int rk_drm_get_lcdc_type(void)
314 {
315 	u32 lcdc_type = rockchip_drm_get_sub_dev_type();
316 
317 	switch (lcdc_type) {
318 	case DRM_MODE_CONNECTOR_DPI:
319 	case DRM_MODE_CONNECTOR_LVDS:
320 		lcdc_type = SCREEN_LVDS;
321 		break;
322 	case DRM_MODE_CONNECTOR_DisplayPort:
323 		lcdc_type = SCREEN_DP;
324 		break;
325 	case DRM_MODE_CONNECTOR_HDMIA:
326 	case DRM_MODE_CONNECTOR_HDMIB:
327 		lcdc_type = SCREEN_HDMI;
328 		break;
329 	case DRM_MODE_CONNECTOR_TV:
330 		lcdc_type = SCREEN_TVOUT;
331 		break;
332 	case DRM_MODE_CONNECTOR_eDP:
333 		lcdc_type = SCREEN_EDP;
334 		break;
335 	case DRM_MODE_CONNECTOR_DSI:
336 		lcdc_type = SCREEN_MIPI;
337 		break;
338 	default:
339 		lcdc_type = SCREEN_NULL;
340 		break;
341 	}
342 
343 	return lcdc_type;
344 }
345 
rockchip_ddr_set_rate(unsigned long target_rate)346 static int rockchip_ddr_set_rate(unsigned long target_rate)
347 {
348 	struct arm_smccc_res res;
349 
350 	ddr_psci_param->hz = target_rate;
351 	ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
352 	ddr_psci_param->vop_scan_line_time_ns = rockchip_drm_get_scan_line_time_ns();
353 	ddr_psci_param->wait_flag1 = 1;
354 	ddr_psci_param->wait_flag0 = 1;
355 
356 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
357 			   ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE);
358 
359 	if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT)
360 		rockchip_dmcfreq_wait_complete();
361 
362 	return res.a0;
363 }
364 
rockchip_dmcfreq_set_volt(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply,char * reg_name)365 static int rockchip_dmcfreq_set_volt(struct device *dev, struct regulator *reg,
366 				     struct dev_pm_opp_supply *supply,
367 				     char *reg_name)
368 {
369 	int ret;
370 
371 	dev_dbg(dev, "%s: %s voltages (mV): %lu %lu %lu\n", __func__, reg_name,
372 		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
373 	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
374 					    supply->u_volt, INT_MAX);
375 	if (ret)
376 		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
377 			__func__, supply->u_volt_min, supply->u_volt,
378 			supply->u_volt_max, ret);
379 
380 	return ret;
381 }
382 
rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data * data)383 static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data)
384 {
385 	struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
386 	struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
387 	struct regulator *vdd_reg = data->regulators[0];
388 	struct dev_pm_opp_supply *old_supply_mem;
389 	struct dev_pm_opp_supply *new_supply_mem;
390 	struct regulator *mem_reg;
391 	struct device *dev = data->dev;
392 	struct clk *clk = data->clk;
393 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
394 	struct cpufreq_policy *policy;
395 	unsigned long old_freq = data->old_opp.rate;
396 	unsigned long freq = data->new_opp.rate;
397 	unsigned int reg_count = data->regulator_count;
398 	bool is_cpufreq_changed = false;
399 	unsigned int cpu_cur, cpufreq_cur;
400 	int ret = 0;
401 
402 	if (reg_count > 1) {
403 		old_supply_mem = &data->old_opp.supplies[1];
404 		new_supply_mem = &data->new_opp.supplies[1];
405 		mem_reg = data->regulators[1];
406 	}
407 
408 	/*
409 	 * We need to prevent cpu hotplug from happening while a dmc freq rate
410 	 * change is happening.
411 	 *
412 	 * Do this before taking the policy rwsem to avoid deadlocks between the
413 	 * mutex that is locked/unlocked in cpu_hotplug_disable/enable. And it
414 	 * can also avoid deadlocks between the mutex that is locked/unlocked
415 	 * in cpus_read_lock/unlock (such as store_scaling_max_freq()).
416 	 */
417 	cpus_read_lock();
418 
419 	if (dmcfreq->min_cpu_freq) {
420 		/*
421 		 * Go to specified cpufreq and block other cpufreq changes since
422 		 * set_rate needs to complete during vblank.
423 		 */
424 		cpu_cur = raw_smp_processor_id();
425 		policy = cpufreq_cpu_get(cpu_cur);
426 		if (!policy) {
427 			dev_err(dev, "cpu%d policy NULL\n", cpu_cur);
428 			ret = -EINVAL;
429 			goto cpufreq;
430 		}
431 		down_write(&policy->rwsem);
432 		cpufreq_cur = cpufreq_quick_get(cpu_cur);
433 
434 		/* If we're thermally throttled; don't change; */
435 		if (cpufreq_cur < dmcfreq->min_cpu_freq) {
436 			if (policy->max >= dmcfreq->min_cpu_freq) {
437 				__cpufreq_driver_target(policy,
438 							dmcfreq->min_cpu_freq,
439 							CPUFREQ_RELATION_L);
440 				is_cpufreq_changed = true;
441 			} else {
442 				dev_dbg(dev,
443 					"CPU may too slow for DMC (%d MHz)\n",
444 					policy->max);
445 			}
446 		}
447 	}
448 
449 	/* Scaling up? Scale voltage before frequency */
450 	if (freq >= old_freq) {
451 		if (reg_count > 1) {
452 			ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
453 							new_supply_mem, "mem");
454 			if (ret)
455 				goto restore_voltage;
456 		}
457 		ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
458 						"vdd");
459 		if (ret)
460 			goto restore_voltage;
461 		if (freq == old_freq)
462 			goto out;
463 	}
464 
465 	/*
466 	 * Writer in rwsem may block readers even during its waiting in queue,
467 	 * and this may lead to a deadlock when the code path takes read sem
468 	 * twice (e.g. one in vop_lock() and another in rockchip_pmu_lock()).
469 	 * As a (suboptimal) workaround, let writer to spin until it gets the
470 	 * lock.
471 	 */
472 	while (!rockchip_dmcfreq_write_trylock())
473 		cond_resched();
474 	dev_dbg(dev, "%lu Hz --> %lu Hz\n", old_freq, freq);
475 
476 	if (dmcfreq->set_rate_params) {
477 		dmcfreq->set_rate_params->lcdc_type = rk_drm_get_lcdc_type();
478 		dmcfreq->set_rate_params->wait_flag1 = 1;
479 		dmcfreq->set_rate_params->wait_flag0 = 1;
480 	}
481 
482 	if (dmcfreq->is_set_rate_direct)
483 		ret = rockchip_ddr_set_rate(freq);
484 	else
485 		ret = clk_set_rate(clk, freq);
486 
487 	rockchip_dmcfreq_write_unlock();
488 	if (ret) {
489 		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
490 			ret);
491 		goto restore_voltage;
492 	}
493 
494 	/*
495 	 * Check the dpll rate,
496 	 * There only two result we will get,
497 	 * 1. Ddr frequency scaling fail, we still get the old rate.
498 	 * 2. Ddr frequency scaling successful, we get the rate we set.
499 	 */
500 	dmcfreq->rate = clk_get_rate(clk);
501 
502 	/* If get the incorrect rate, set voltage to old value. */
503 	if (dmcfreq->rate != freq) {
504 		dev_err(dev, "Get wrong frequency, Request %lu, Current %lu\n",
505 			freq, dmcfreq->rate);
506 		ret = -EINVAL;
507 		goto restore_voltage;
508 	}
509 
510 	/* Scaling down? Scale voltage after frequency */
511 	if (freq < old_freq) {
512 		ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
513 						"vdd");
514 		if (ret)
515 			goto restore_freq;
516 		if (reg_count > 1) {
517 			ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
518 							new_supply_mem, "mem");
519 			if (ret)
520 				goto restore_freq;
521 		}
522 	}
523 	dmcfreq->volt = new_supply_vdd->u_volt;
524 	if (reg_count > 1)
525 		dmcfreq->mem_volt = new_supply_mem->u_volt;
526 
527 	goto out;
528 
529 restore_freq:
530 	if (dmcfreq->is_set_rate_direct)
531 		ret = rockchip_ddr_set_rate(freq);
532 	else
533 		ret = clk_set_rate(clk, freq);
534 	if (ret)
535 		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
536 			__func__, old_freq);
537 restore_voltage:
538 	if (reg_count > 1 && old_supply_mem->u_volt)
539 		rockchip_dmcfreq_set_volt(dev, mem_reg, old_supply_mem, "mem");
540 	if (old_supply_vdd->u_volt)
541 		rockchip_dmcfreq_set_volt(dev, vdd_reg, old_supply_vdd, "vdd");
542 out:
543 	if (dmcfreq->min_cpu_freq) {
544 		if (is_cpufreq_changed)
545 			__cpufreq_driver_target(policy, cpufreq_cur,
546 						CPUFREQ_RELATION_L);
547 		up_write(&policy->rwsem);
548 		cpufreq_cpu_put(policy);
549 	}
550 cpufreq:
551 	cpus_read_unlock();
552 
553 	return ret;
554 }
555 
rockchip_dmcfreq_target(struct device * dev,unsigned long * freq,u32 flags)556 static int rockchip_dmcfreq_target(struct device *dev, unsigned long *freq,
557 				   u32 flags)
558 {
559 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
560 	struct devfreq *devfreq;
561 	struct dev_pm_opp *opp;
562 	int ret = 0;
563 
564 	if (!dmc_mdevp.is_checked)
565 		return -EINVAL;
566 
567 	opp = devfreq_recommended_opp(dev, freq, flags);
568 	if (IS_ERR(opp)) {
569 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
570 		return PTR_ERR(opp);
571 	}
572 	dev_pm_opp_put(opp);
573 
574 	rockchip_monitor_volt_adjust_lock(dmcfreq->mdev_info);
575 	ret = dev_pm_opp_set_rate(dev, *freq);
576 	if (!ret) {
577 		if (dmcfreq->info.devfreq) {
578 			devfreq = dmcfreq->info.devfreq;
579 			devfreq->last_status.current_frequency = *freq;
580 		}
581 	}
582 	rockchip_monitor_volt_adjust_unlock(dmcfreq->mdev_info);
583 
584 	return ret;
585 }
586 
rockchip_dmcfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)587 static int rockchip_dmcfreq_get_dev_status(struct device *dev,
588 					   struct devfreq_dev_status *stat)
589 {
590 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
591 	struct devfreq_event_data edata;
592 	int i, ret = 0;
593 
594 	if (!dmcfreq->info.auto_freq_en)
595 		return -EINVAL;
596 
597 	/*
598 	 * RK3588 platform may crash if the CPU and MCU access the DFI/DMC
599 	 * registers at same time.
600 	 */
601 	rockchip_monitor_volt_adjust_lock(dmcfreq->mdev_info);
602 	for (i = 0; i < dmcfreq->edev_count; i++) {
603 		ret = devfreq_event_get_event(dmcfreq->edev[i], &edata);
604 		if (ret < 0) {
605 			dev_err(dev, "failed to get event %s\n",
606 				dmcfreq->edev[i]->desc->name);
607 			goto out;
608 		}
609 		if (i == dmcfreq->dfi_id) {
610 			stat->busy_time = edata.load_count;
611 			stat->total_time = edata.total_count;
612 		} else {
613 			dmcfreq->nocp_bw[i] = edata.load_count;
614 		}
615 	}
616 
617 out:
618 	rockchip_monitor_volt_adjust_unlock(dmcfreq->mdev_info);
619 
620 	return ret;
621 }
622 
rockchip_dmcfreq_get_cur_freq(struct device * dev,unsigned long * freq)623 static int rockchip_dmcfreq_get_cur_freq(struct device *dev,
624 					 unsigned long *freq)
625 {
626 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
627 
628 	*freq = dmcfreq->rate;
629 
630 	return 0;
631 }
632 
633 static struct devfreq_dev_profile rockchip_devfreq_dmc_profile = {
634 	.polling_ms	= 50,
635 	.target		= rockchip_dmcfreq_target,
636 	.get_dev_status	= rockchip_dmcfreq_get_dev_status,
637 	.get_cur_freq	= rockchip_dmcfreq_get_cur_freq,
638 };
639 
640 
reset_last_status(struct devfreq * devfreq)641 static inline void reset_last_status(struct devfreq *devfreq)
642 {
643 	devfreq->last_status.total_time = 1;
644 	devfreq->last_status.busy_time = 1;
645 }
646 
of_get_px30_timings(struct device * dev,struct device_node * np,uint32_t * timing)647 static void of_get_px30_timings(struct device *dev,
648 				struct device_node *np, uint32_t *timing)
649 {
650 	struct device_node *np_tim;
651 	u32 *p;
652 	struct px30_ddr_dts_config_timing *dts_timing;
653 	struct rk3328_ddr_de_skew_setting *de_skew;
654 	int ret = 0;
655 	u32 i;
656 
657 	dts_timing =
658 		(struct px30_ddr_dts_config_timing *)(timing +
659 							DTS_PAR_OFFSET / 4);
660 
661 	np_tim = of_parse_phandle(np, "ddr_timing", 0);
662 	if (!np_tim) {
663 		ret = -EINVAL;
664 		goto end;
665 	}
666 	de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
667 	if (!de_skew) {
668 		ret = -ENOMEM;
669 		goto end;
670 	}
671 	p = (u32 *)dts_timing;
672 	for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
673 		ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
674 					p + i);
675 	}
676 	p = (u32 *)de_skew->ca_de_skew;
677 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
678 		ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i],
679 					p + i);
680 	}
681 	p = (u32 *)de_skew->cs0_de_skew;
682 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
683 		ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i],
684 					p + i);
685 	}
686 	p = (u32 *)de_skew->cs1_de_skew;
687 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
688 		ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i],
689 					p + i);
690 	}
691 	if (!ret)
692 		px30_de_skew_set_2_reg(de_skew, dts_timing);
693 	kfree(de_skew);
694 end:
695 	if (!ret) {
696 		dts_timing->available = 1;
697 	} else {
698 		dts_timing->available = 0;
699 		dev_err(dev, "of_get_ddr_timings: fail\n");
700 	}
701 
702 	of_node_put(np_tim);
703 }
704 
of_get_rk1808_timings(struct device * dev,struct device_node * np,uint32_t * timing)705 static void of_get_rk1808_timings(struct device *dev,
706 				  struct device_node *np, uint32_t *timing)
707 {
708 	struct device_node *np_tim;
709 	u32 *p;
710 	struct rk1808_ddr_dts_config_timing *dts_timing;
711 	int ret = 0;
712 	u32 i;
713 
714 	dts_timing =
715 		(struct rk1808_ddr_dts_config_timing *)(timing +
716 							DTS_PAR_OFFSET / 4);
717 
718 	np_tim = of_parse_phandle(np, "ddr_timing", 0);
719 	if (!np_tim) {
720 		ret = -EINVAL;
721 		goto end;
722 	}
723 
724 	p = (u32 *)dts_timing;
725 	for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
726 		ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
727 					p + i);
728 	}
729 	p = (u32 *)dts_timing->ca_de_skew;
730 	for (i = 0; i < ARRAY_SIZE(rk1808_dts_ca_timing); i++) {
731 		ret |= of_property_read_u32(np_tim, rk1808_dts_ca_timing[i],
732 					p + i);
733 	}
734 	p = (u32 *)dts_timing->cs0_a_de_skew;
735 	for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_a_timing); i++) {
736 		ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_a_timing[i],
737 					p + i);
738 	}
739 	p = (u32 *)dts_timing->cs0_b_de_skew;
740 	for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_b_timing); i++) {
741 		ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_b_timing[i],
742 					p + i);
743 	}
744 	p = (u32 *)dts_timing->cs1_a_de_skew;
745 	for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_a_timing); i++) {
746 		ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_a_timing[i],
747 					p + i);
748 	}
749 	p = (u32 *)dts_timing->cs1_b_de_skew;
750 	for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_b_timing); i++) {
751 		ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_b_timing[i],
752 					p + i);
753 	}
754 
755 end:
756 	if (!ret) {
757 		dts_timing->available = 1;
758 	} else {
759 		dts_timing->available = 0;
760 		dev_err(dev, "of_get_ddr_timings: fail\n");
761 	}
762 
763 	of_node_put(np_tim);
764 }
765 
of_get_rk3128_timings(struct device * dev,struct device_node * np,uint32_t * timing)766 static void of_get_rk3128_timings(struct device *dev,
767 				  struct device_node *np, uint32_t *timing)
768 {
769 	struct device_node *np_tim;
770 	u32 *p;
771 	struct rk3128_ddr_dts_config_timing *dts_timing;
772 	struct share_params *init_timing;
773 	int ret = 0;
774 	u32 i;
775 
776 	init_timing = (struct share_params *)timing;
777 
778 	if (of_property_read_u32(np, "vop-dclk-mode",
779 				 &init_timing->vop_dclk_mode))
780 		init_timing->vop_dclk_mode = 0;
781 
782 	p = timing + DTS_PAR_OFFSET / 4;
783 	np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
784 	if (!np_tim) {
785 		ret = -EINVAL;
786 		goto end;
787 	}
788 	for (i = 0; i < ARRAY_SIZE(rk3128_dts_timing); i++) {
789 		ret |= of_property_read_u32(np_tim, rk3128_dts_timing[i],
790 					p + i);
791 	}
792 end:
793 	dts_timing =
794 		(struct rk3128_ddr_dts_config_timing *)(timing +
795 							DTS_PAR_OFFSET / 4);
796 	if (!ret) {
797 		dts_timing->available = 1;
798 	} else {
799 		dts_timing->available = 0;
800 		dev_err(dev, "of_get_ddr_timings: fail\n");
801 	}
802 
803 	of_node_put(np_tim);
804 }
805 
of_get_rk3228_timings(struct device * dev,struct device_node * np,uint32_t * timing)806 static uint32_t of_get_rk3228_timings(struct device *dev,
807 				      struct device_node *np, uint32_t *timing)
808 {
809 	struct device_node *np_tim;
810 	u32 *p;
811 	int ret = 0;
812 	u32 i;
813 
814 	p = timing + DTS_PAR_OFFSET / 4;
815 	np_tim = of_parse_phandle(np, "rockchip,dram_timing", 0);
816 	if (!np_tim) {
817 		ret = -EINVAL;
818 		goto end;
819 	}
820 	for (i = 0; i < ARRAY_SIZE(rk3228_dts_timing); i++) {
821 		ret |= of_property_read_u32(np_tim, rk3228_dts_timing[i],
822 					p + i);
823 	}
824 end:
825 	if (ret)
826 		dev_err(dev, "of_get_ddr_timings: fail\n");
827 
828 	of_node_put(np_tim);
829 	return ret;
830 }
831 
of_get_rk3288_timings(struct device * dev,struct device_node * np,uint32_t * timing)832 static void of_get_rk3288_timings(struct device *dev,
833 				  struct device_node *np, uint32_t *timing)
834 {
835 	struct device_node *np_tim;
836 	u32 *p;
837 	struct rk3288_ddr_dts_config_timing *dts_timing;
838 	struct share_params *init_timing;
839 	int ret = 0;
840 	u32 i;
841 
842 	init_timing = (struct share_params *)timing;
843 
844 	if (of_property_read_u32(np, "vop-dclk-mode",
845 				 &init_timing->vop_dclk_mode))
846 		init_timing->vop_dclk_mode = 0;
847 
848 	p = timing + DTS_PAR_OFFSET / 4;
849 	np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
850 	if (!np_tim) {
851 		ret = -EINVAL;
852 		goto end;
853 	}
854 	for (i = 0; i < ARRAY_SIZE(rk3288_dts_timing); i++) {
855 		ret |= of_property_read_u32(np_tim, rk3288_dts_timing[i],
856 					p + i);
857 	}
858 end:
859 	dts_timing =
860 		(struct rk3288_ddr_dts_config_timing *)(timing +
861 							DTS_PAR_OFFSET / 4);
862 	if (!ret) {
863 		dts_timing->available = 1;
864 	} else {
865 		dts_timing->available = 0;
866 		dev_err(dev, "of_get_ddr_timings: fail\n");
867 	}
868 
869 	of_node_put(np_tim);
870 }
871 
of_get_rk3328_timings(struct device * dev,struct device_node * np,uint32_t * timing)872 static void of_get_rk3328_timings(struct device *dev,
873 				  struct device_node *np, uint32_t *timing)
874 {
875 	struct device_node *np_tim;
876 	u32 *p;
877 	struct rk3328_ddr_dts_config_timing *dts_timing;
878 	struct rk3328_ddr_de_skew_setting *de_skew;
879 	int ret = 0;
880 	u32 i;
881 
882 	dts_timing =
883 		(struct rk3328_ddr_dts_config_timing *)(timing +
884 							DTS_PAR_OFFSET / 4);
885 
886 	np_tim = of_parse_phandle(np, "ddr_timing", 0);
887 	if (!np_tim) {
888 		ret = -EINVAL;
889 		goto end;
890 	}
891 	de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
892 	if (!de_skew) {
893 		ret = -ENOMEM;
894 		goto end;
895 	}
896 	p = (u32 *)dts_timing;
897 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_timing); i++) {
898 		ret |= of_property_read_u32(np_tim, rk3328_dts_timing[i],
899 					p + i);
900 	}
901 	p = (u32 *)de_skew->ca_de_skew;
902 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
903 		ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i],
904 					p + i);
905 	}
906 	p = (u32 *)de_skew->cs0_de_skew;
907 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
908 		ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i],
909 					p + i);
910 	}
911 	p = (u32 *)de_skew->cs1_de_skew;
912 	for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
913 		ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i],
914 					p + i);
915 	}
916 	if (!ret)
917 		rk3328_de_skew_setting_2_register(de_skew, dts_timing);
918 	kfree(de_skew);
919 end:
920 	if (!ret) {
921 		dts_timing->available = 1;
922 	} else {
923 		dts_timing->available = 0;
924 		dev_err(dev, "of_get_ddr_timings: fail\n");
925 	}
926 
927 	of_node_put(np_tim);
928 }
929 
of_get_rv1126_timings(struct device * dev,struct device_node * np,uint32_t * timing)930 static void of_get_rv1126_timings(struct device *dev,
931 				  struct device_node *np, uint32_t *timing)
932 {
933 	struct device_node *np_tim;
934 	u32 *p;
935 	struct rk1808_ddr_dts_config_timing *dts_timing;
936 	int ret = 0;
937 	u32 i;
938 
939 	dts_timing =
940 		(struct rk1808_ddr_dts_config_timing *)(timing +
941 							DTS_PAR_OFFSET / 4);
942 
943 	np_tim = of_parse_phandle(np, "ddr_timing", 0);
944 	if (!np_tim) {
945 		ret = -EINVAL;
946 		goto end;
947 	}
948 
949 	p = (u32 *)dts_timing;
950 	for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
951 		ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
952 					p + i);
953 	}
954 	p = (u32 *)dts_timing->ca_de_skew;
955 	for (i = 0; i < ARRAY_SIZE(rv1126_dts_ca_timing); i++) {
956 		ret |= of_property_read_u32(np_tim, rv1126_dts_ca_timing[i],
957 					p + i);
958 	}
959 	p = (u32 *)dts_timing->cs0_a_de_skew;
960 	for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_a_timing); i++) {
961 		ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_a_timing[i],
962 					p + i);
963 	}
964 	p = (u32 *)dts_timing->cs0_b_de_skew;
965 	for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_b_timing); i++) {
966 		ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_b_timing[i],
967 					p + i);
968 	}
969 	p = (u32 *)dts_timing->cs1_a_de_skew;
970 	for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_a_timing); i++) {
971 		ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_a_timing[i],
972 					p + i);
973 	}
974 	p = (u32 *)dts_timing->cs1_b_de_skew;
975 	for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_b_timing); i++) {
976 		ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_b_timing[i],
977 					p + i);
978 	}
979 
980 end:
981 	if (!ret) {
982 		dts_timing->available = 1;
983 	} else {
984 		dts_timing->available = 0;
985 		dev_err(dev, "of_get_ddr_timings: fail\n");
986 	}
987 
988 	of_node_put(np_tim);
989 }
990 
of_get_rk3399_timings(struct device * dev,struct device_node * np)991 static struct rk3399_dram_timing *of_get_rk3399_timings(struct device *dev,
992 							struct device_node *np)
993 {
994 	struct rk3399_dram_timing *timing = NULL;
995 	struct device_node *np_tim;
996 	int ret;
997 
998 	np_tim = of_parse_phandle(np, "ddr_timing", 0);
999 	if (np_tim) {
1000 		timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
1001 		if (!timing)
1002 			goto err;
1003 
1004 		ret = of_property_read_u32(np_tim, "ddr3_speed_bin",
1005 					   &timing->ddr3_speed_bin);
1006 		ret |= of_property_read_u32(np_tim, "pd_idle",
1007 					    &timing->pd_idle);
1008 		ret |= of_property_read_u32(np_tim, "sr_idle",
1009 					    &timing->sr_idle);
1010 		ret |= of_property_read_u32(np_tim, "sr_mc_gate_idle",
1011 					    &timing->sr_mc_gate_idle);
1012 		ret |= of_property_read_u32(np_tim, "srpd_lite_idle",
1013 					    &timing->srpd_lite_idle);
1014 		ret |= of_property_read_u32(np_tim, "standby_idle",
1015 					    &timing->standby_idle);
1016 		ret |= of_property_read_u32(np_tim, "auto_lp_dis_freq",
1017 					    &timing->auto_lp_dis_freq);
1018 		ret |= of_property_read_u32(np_tim, "ddr3_dll_dis_freq",
1019 					    &timing->ddr3_dll_dis_freq);
1020 		ret |= of_property_read_u32(np_tim, "phy_dll_dis_freq",
1021 					    &timing->phy_dll_dis_freq);
1022 		ret |= of_property_read_u32(np_tim, "ddr3_odt_dis_freq",
1023 					    &timing->ddr3_odt_dis_freq);
1024 		ret |= of_property_read_u32(np_tim, "ddr3_drv",
1025 					    &timing->ddr3_drv);
1026 		ret |= of_property_read_u32(np_tim, "ddr3_odt",
1027 					    &timing->ddr3_odt);
1028 		ret |= of_property_read_u32(np_tim, "phy_ddr3_ca_drv",
1029 					    &timing->phy_ddr3_ca_drv);
1030 		ret |= of_property_read_u32(np_tim, "phy_ddr3_dq_drv",
1031 					    &timing->phy_ddr3_dq_drv);
1032 		ret |= of_property_read_u32(np_tim, "phy_ddr3_odt",
1033 					    &timing->phy_ddr3_odt);
1034 		ret |= of_property_read_u32(np_tim, "lpddr3_odt_dis_freq",
1035 					    &timing->lpddr3_odt_dis_freq);
1036 		ret |= of_property_read_u32(np_tim, "lpddr3_drv",
1037 					    &timing->lpddr3_drv);
1038 		ret |= of_property_read_u32(np_tim, "lpddr3_odt",
1039 					    &timing->lpddr3_odt);
1040 		ret |= of_property_read_u32(np_tim, "phy_lpddr3_ca_drv",
1041 					    &timing->phy_lpddr3_ca_drv);
1042 		ret |= of_property_read_u32(np_tim, "phy_lpddr3_dq_drv",
1043 					    &timing->phy_lpddr3_dq_drv);
1044 		ret |= of_property_read_u32(np_tim, "phy_lpddr3_odt",
1045 					    &timing->phy_lpddr3_odt);
1046 		ret |= of_property_read_u32(np_tim, "lpddr4_odt_dis_freq",
1047 					    &timing->lpddr4_odt_dis_freq);
1048 		ret |= of_property_read_u32(np_tim, "lpddr4_drv",
1049 					    &timing->lpddr4_drv);
1050 		ret |= of_property_read_u32(np_tim, "lpddr4_dq_odt",
1051 					    &timing->lpddr4_dq_odt);
1052 		ret |= of_property_read_u32(np_tim, "lpddr4_ca_odt",
1053 					    &timing->lpddr4_ca_odt);
1054 		ret |= of_property_read_u32(np_tim, "phy_lpddr4_ca_drv",
1055 					    &timing->phy_lpddr4_ca_drv);
1056 		ret |= of_property_read_u32(np_tim, "phy_lpddr4_ck_cs_drv",
1057 					    &timing->phy_lpddr4_ck_cs_drv);
1058 		ret |= of_property_read_u32(np_tim, "phy_lpddr4_dq_drv",
1059 					    &timing->phy_lpddr4_dq_drv);
1060 		ret |= of_property_read_u32(np_tim, "phy_lpddr4_odt",
1061 					    &timing->phy_lpddr4_odt);
1062 		if (ret) {
1063 			devm_kfree(dev, timing);
1064 			goto err;
1065 		}
1066 		of_node_put(np_tim);
1067 		return timing;
1068 	}
1069 
1070 err:
1071 	if (timing) {
1072 		devm_kfree(dev, timing);
1073 		timing = NULL;
1074 	}
1075 	of_node_put(np_tim);
1076 	return timing;
1077 }
1078 
rockchip_ddr_set_auto_self_refresh(uint32_t en)1079 static int rockchip_ddr_set_auto_self_refresh(uint32_t en)
1080 {
1081 	struct arm_smccc_res res;
1082 
1083 	ddr_psci_param->sr_idle_en = en;
1084 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1085 			   ROCKCHIP_SIP_CONFIG_DRAM_SET_AT_SR);
1086 
1087 	return res.a0;
1088 }
1089 
1090 struct dmcfreq_wait_ctrl_t {
1091 	wait_queue_head_t wait_wq;
1092 	int complt_irq;
1093 	int wait_flag;
1094 	int wait_en;
1095 	int wait_time_out_ms;
1096 	int dcf_en;
1097 	struct regmap *regmap_dcf;
1098 };
1099 
1100 static struct dmcfreq_wait_ctrl_t wait_ctrl;
1101 
wait_complete_irq(int irqno,void * dev_id)1102 static irqreturn_t wait_complete_irq(int irqno, void *dev_id)
1103 {
1104 	struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
1105 
1106 	ctrl->wait_flag = 0;
1107 	wake_up(&ctrl->wait_wq);
1108 	return IRQ_HANDLED;
1109 }
1110 
wait_dcf_complete_irq(int irqno,void * dev_id)1111 static irqreturn_t wait_dcf_complete_irq(int irqno, void *dev_id)
1112 {
1113 	struct arm_smccc_res res;
1114 	struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
1115 
1116 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1117 			   ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
1118 	if (res.a0)
1119 		pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
1120 
1121 	ctrl->wait_flag = 0;
1122 	wake_up(&ctrl->wait_wq);
1123 	return IRQ_HANDLED;
1124 }
1125 
rockchip_dmcfreq_wait_complete(void)1126 int rockchip_dmcfreq_wait_complete(void)
1127 {
1128 	struct arm_smccc_res res;
1129 
1130 	if (!wait_ctrl.wait_en) {
1131 		pr_err("%s: Do not support time out!\n", __func__);
1132 		return 0;
1133 	}
1134 	wait_ctrl.wait_flag = -1;
1135 
1136 	enable_irq(wait_ctrl.complt_irq);
1137 	/*
1138 	 * CPUs only enter WFI when idle to make sure that
1139 	 * FIQn can quick response.
1140 	 */
1141 	cpu_latency_qos_update_request(&pm_qos, 0);
1142 
1143 	if (wait_ctrl.dcf_en == 1) {
1144 		/* start dcf */
1145 		regmap_update_bits(wait_ctrl.regmap_dcf, 0x0, 0x1, 0x1);
1146 	} else if (wait_ctrl.dcf_en == 2) {
1147 		res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_MCU_START);
1148 		if (res.a0) {
1149 			pr_err("rockchip_sip_config_mcu_start error:%lx\n", res.a0);
1150 			return -ENOMEM;
1151 		}
1152 	}
1153 
1154 	wait_event_timeout(wait_ctrl.wait_wq, (wait_ctrl.wait_flag == 0),
1155 			   msecs_to_jiffies(wait_ctrl.wait_time_out_ms));
1156 
1157 	/*
1158 	 * If waiting for wait_ctrl.complt_irq times out, clear the IRQ and stop the MCU by
1159 	 * sip_smc_dram(DRAM_POST_SET_RATE).
1160 	 */
1161 	if (wait_ctrl.dcf_en == 2 && wait_ctrl.wait_flag != 0) {
1162 		res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
1163 		if (res.a0)
1164 			pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
1165 	}
1166 
1167 	cpu_latency_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
1168 	disable_irq(wait_ctrl.complt_irq);
1169 
1170 	return 0;
1171 }
1172 
rockchip_get_freq_info(struct rockchip_dmcfreq * dmcfreq)1173 static __maybe_unused int rockchip_get_freq_info(struct rockchip_dmcfreq *dmcfreq)
1174 {
1175 	struct arm_smccc_res res;
1176 	struct dev_pm_opp *opp;
1177 	struct dmc_freq_table *freq_table;
1178 	unsigned long rate;
1179 	int i, j, count, ret = 0;
1180 
1181 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1182 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
1183 	if (res.a0) {
1184 		dev_err(dmcfreq->dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n",
1185 			res.a0);
1186 		return -ENOMEM;
1187 	}
1188 
1189 	if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
1190 		dev_err(dmcfreq->dev, "it is no available frequencies!\n");
1191 		return -EPERM;
1192 	}
1193 
1194 	for (i = 0; i < ddr_psci_param->freq_count; i++)
1195 		dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
1196 	dmcfreq->freq_count = ddr_psci_param->freq_count;
1197 
1198 	/* update dmc_opp_table */
1199 	count = dev_pm_opp_get_opp_count(dmcfreq->dev);
1200 	if (count <= 0) {
1201 		ret = count ? count : -ENODATA;
1202 		return ret;
1203 	}
1204 
1205 	freq_table = kzalloc(sizeof(*freq_table) * count, GFP_KERNEL);
1206 	for (i = 0, rate = 0; i < count; i++, rate++) {
1207 		/* find next rate */
1208 		opp = dev_pm_opp_find_freq_ceil(dmcfreq->dev, &rate);
1209 		if (IS_ERR(opp)) {
1210 			ret = PTR_ERR(opp);
1211 			dev_err(dmcfreq->dev, "failed to find OPP for freq %lu.\n", rate);
1212 			goto out;
1213 		}
1214 		freq_table[i].freq = rate;
1215 		freq_table[i].supplies[0].u_volt = dev_pm_opp_get_voltage(opp);
1216 		dev_pm_opp_put(opp);
1217 
1218 		for (j = 0; j < dmcfreq->freq_count; j++) {
1219 			if (rate == dmcfreq->freq_info_rate[j])
1220 				break;
1221 		}
1222 		if (j == dmcfreq->freq_count)
1223 			dev_pm_opp_disable(dmcfreq->dev, rate);
1224 	}
1225 
1226 	for (i = 0; i < dmcfreq->freq_count; i++) {
1227 		for (j = 0; j < count; j++) {
1228 			if (dmcfreq->freq_info_rate[i] == freq_table[j].freq) {
1229 				break;
1230 			} else if (dmcfreq->freq_info_rate[i] < freq_table[j].freq) {
1231 				dev_pm_opp_add(dmcfreq->dev, dmcfreq->freq_info_rate[i],
1232 					       freq_table[j].supplies[0].u_volt);
1233 				break;
1234 			}
1235 		}
1236 		if (j == count) {
1237 			dev_err(dmcfreq->dev, "failed to match dmc_opp_table for %ld\n",
1238 				dmcfreq->freq_info_rate[i]);
1239 			if (i == 0)
1240 				ret = -EPERM;
1241 			else
1242 				dmcfreq->freq_count = i;
1243 			goto out;
1244 		}
1245 	}
1246 
1247 out:
1248 	kfree(freq_table);
1249 	return ret;
1250 }
1251 
1252 static __maybe_unused int
rockchip_dmcfreq_adjust_opp_table(struct rockchip_dmcfreq * dmcfreq)1253 rockchip_dmcfreq_adjust_opp_table(struct rockchip_dmcfreq *dmcfreq)
1254 {
1255 	struct device *dev = dmcfreq->dev;
1256 	struct arm_smccc_res res;
1257 	struct dev_pm_opp *opp;
1258 	struct opp_table *opp_table;
1259 	struct dmc_freq_table *freq_table;
1260 	int i, j, count = 0, ret = 0;
1261 
1262 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1263 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
1264 	if (res.a0) {
1265 		dev_err(dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n",
1266 			res.a0);
1267 		return -ENOMEM;
1268 	}
1269 
1270 	if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
1271 		dev_err(dev, "there is no available frequencies!\n");
1272 		return -EPERM;
1273 	}
1274 
1275 	for (i = 0; i < ddr_psci_param->freq_count; i++)
1276 		dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
1277 	dmcfreq->freq_count = ddr_psci_param->freq_count;
1278 
1279 	count = dev_pm_opp_get_opp_count(dev);
1280 	if (count <= 0) {
1281 		dev_err(dev, "there is no available opp\n");
1282 		ret = count ? count : -ENODATA;
1283 		return ret;
1284 	}
1285 
1286 	freq_table = kzalloc(sizeof(*freq_table) * count, GFP_KERNEL);
1287 	opp_table = dev_pm_opp_get_opp_table(dev);
1288 	if (!opp_table) {
1289 		ret = -ENOMEM;
1290 		goto out;
1291 	}
1292 
1293 	mutex_lock(&opp_table->lock);
1294 	i = 0;
1295 	list_for_each_entry(opp, &opp_table->opp_list, node) {
1296 		if (!opp->available)
1297 			continue;
1298 
1299 		freq_table[i].freq = opp->rate;
1300 		freq_table[i].supplies[0] = opp->supplies[0];
1301 		if (dmcfreq->regulator_count > 1)
1302 			freq_table[i].supplies[1] = opp->supplies[1];
1303 
1304 		i++;
1305 	}
1306 
1307 	i = 0;
1308 	list_for_each_entry(opp, &opp_table->opp_list, node) {
1309 		if (!opp->available)
1310 			continue;
1311 
1312 		if (i >= dmcfreq->freq_count) {
1313 			opp->available = false;
1314 			continue;
1315 		}
1316 
1317 		for (j = 0; j < count; j++) {
1318 			if (dmcfreq->freq_info_rate[i] <= freq_table[j].freq) {
1319 				opp->rate = dmcfreq->freq_info_rate[i];
1320 				opp->supplies[0] = freq_table[j].supplies[0];
1321 				if (dmcfreq->regulator_count > 1)
1322 					opp->supplies[1] = freq_table[j].supplies[1];
1323 
1324 				break;
1325 			}
1326 		}
1327 		if (j == count) {
1328 			dev_err(dmcfreq->dev, "failed to match dmc_opp_table for %ld\n",
1329 				dmcfreq->freq_info_rate[i]);
1330 			if (i == 0) {
1331 				ret = -EPERM;
1332 				goto out;
1333 			} else {
1334 				opp->available = false;
1335 				dmcfreq->freq_count = i;
1336 			}
1337 		}
1338 		i++;
1339 	}
1340 
1341 	mutex_unlock(&opp_table->lock);
1342 	dev_pm_opp_put_opp_table(opp_table);
1343 
1344 out:
1345 	kfree(freq_table);
1346 	return ret;
1347 }
1348 
px30_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1349 static __maybe_unused int px30_dmc_init(struct platform_device *pdev,
1350 					struct rockchip_dmcfreq *dmcfreq)
1351 {
1352 	struct arm_smccc_res res;
1353 	u32 size;
1354 	int ret;
1355 	int complt_irq;
1356 	u32 complt_hwirq;
1357 	struct irq_data *complt_irq_data;
1358 
1359 	res = sip_smc_dram(0, 0,
1360 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1361 	dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1362 	if (res.a0 || res.a1 < 0x103) {
1363 		dev_err(&pdev->dev,
1364 			"trusted firmware need to update or is invalid!\n");
1365 		return -ENXIO;
1366 	}
1367 
1368 	dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
1369 
1370 	/*
1371 	 * first 4KB is used for interface parameters
1372 	 * after 4KB * N is dts parameters
1373 	 */
1374 	size = sizeof(struct px30_ddr_dts_config_timing);
1375 	res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
1376 					SHARE_PAGE_TYPE_DDR);
1377 	if (res.a0 != 0) {
1378 		dev_err(&pdev->dev, "no ATF memory for init\n");
1379 		return -ENOMEM;
1380 	}
1381 	ddr_psci_param = (struct share_params *)res.a1;
1382 	of_get_px30_timings(&pdev->dev, pdev->dev.of_node,
1383 			    (uint32_t *)ddr_psci_param);
1384 
1385 	init_waitqueue_head(&wait_ctrl.wait_wq);
1386 	wait_ctrl.wait_en = 1;
1387 	wait_ctrl.wait_time_out_ms = 17 * 5;
1388 
1389 	complt_irq = platform_get_irq_byname(pdev, "complete_irq");
1390 	if (complt_irq < 0) {
1391 		dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n",
1392 			complt_irq);
1393 		return complt_irq;
1394 	}
1395 	wait_ctrl.complt_irq = complt_irq;
1396 
1397 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_complete_irq,
1398 			       0, dev_name(&pdev->dev), &wait_ctrl);
1399 	if (ret < 0) {
1400 		dev_err(&pdev->dev, "cannot request complete_irq\n");
1401 		return ret;
1402 	}
1403 	disable_irq(complt_irq);
1404 
1405 	complt_irq_data = irq_get_irq_data(complt_irq);
1406 	complt_hwirq = irqd_to_hwirq(complt_irq_data);
1407 	ddr_psci_param->complt_hwirq = complt_hwirq;
1408 
1409 	dmcfreq->set_rate_params = ddr_psci_param;
1410 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1411 	rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1412 
1413 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1414 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1415 	if (res.a0) {
1416 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1417 			res.a0);
1418 		return -ENOMEM;
1419 	}
1420 
1421 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1422 
1423 	return 0;
1424 }
1425 
rk1808_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1426 static __maybe_unused int rk1808_dmc_init(struct platform_device *pdev,
1427 					  struct rockchip_dmcfreq *dmcfreq)
1428 {
1429 	struct arm_smccc_res res;
1430 	u32 size;
1431 	int ret;
1432 	int complt_irq;
1433 	struct device_node *node;
1434 
1435 	res = sip_smc_dram(0, 0,
1436 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1437 	dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1438 	if (res.a0 || res.a1 < 0x101) {
1439 		dev_err(&pdev->dev,
1440 			"trusted firmware need to update or is invalid!\n");
1441 		return -ENXIO;
1442 	}
1443 
1444 	/*
1445 	 * first 4KB is used for interface parameters
1446 	 * after 4KB * N is dts parameters
1447 	 */
1448 	size = sizeof(struct rk1808_ddr_dts_config_timing);
1449 	res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
1450 					SHARE_PAGE_TYPE_DDR);
1451 	if (res.a0 != 0) {
1452 		dev_err(&pdev->dev, "no ATF memory for init\n");
1453 		return -ENOMEM;
1454 	}
1455 	ddr_psci_param = (struct share_params *)res.a1;
1456 	of_get_rk1808_timings(&pdev->dev, pdev->dev.of_node,
1457 			      (uint32_t *)ddr_psci_param);
1458 
1459 	/* enable start dcf in kernel after dcf ready */
1460 	node = of_parse_phandle(pdev->dev.of_node, "dcf_reg", 0);
1461 	wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
1462 	if (IS_ERR(wait_ctrl.regmap_dcf))
1463 		return PTR_ERR(wait_ctrl.regmap_dcf);
1464 	wait_ctrl.dcf_en = 1;
1465 
1466 	init_waitqueue_head(&wait_ctrl.wait_wq);
1467 	wait_ctrl.wait_en = 1;
1468 	wait_ctrl.wait_time_out_ms = 17 * 5;
1469 
1470 	complt_irq = platform_get_irq_byname(pdev, "complete_irq");
1471 	if (complt_irq < 0) {
1472 		dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n",
1473 			complt_irq);
1474 		return complt_irq;
1475 	}
1476 	wait_ctrl.complt_irq = complt_irq;
1477 
1478 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
1479 			       0, dev_name(&pdev->dev), &wait_ctrl);
1480 	if (ret < 0) {
1481 		dev_err(&pdev->dev, "cannot request complete_irq\n");
1482 		return ret;
1483 	}
1484 	disable_irq(complt_irq);
1485 
1486 	dmcfreq->set_rate_params = ddr_psci_param;
1487 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1488 	rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1489 
1490 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1491 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1492 	if (res.a0) {
1493 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1494 			res.a0);
1495 		return -ENOMEM;
1496 	}
1497 
1498 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1499 
1500 	return 0;
1501 }
1502 
rk3128_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1503 static __maybe_unused int rk3128_dmc_init(struct platform_device *pdev,
1504 					  struct rockchip_dmcfreq *dmcfreq)
1505 {
1506 	struct arm_smccc_res res;
1507 
1508 	res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
1509 					struct rk3128_ddr_dts_config_timing),
1510 					4096) + 1, SHARE_PAGE_TYPE_DDR);
1511 	if (res.a0) {
1512 		dev_err(&pdev->dev, "no ATF memory for init\n");
1513 		return -ENOMEM;
1514 	}
1515 	ddr_psci_param = (struct share_params *)res.a1;
1516 	of_get_rk3128_timings(&pdev->dev, pdev->dev.of_node,
1517 			      (uint32_t *)ddr_psci_param);
1518 
1519 	ddr_psci_param->hz = 0;
1520 	ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1521 
1522 	dmcfreq->set_rate_params = ddr_psci_param;
1523 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1524 
1525 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1526 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1527 
1528 	if (res.a0) {
1529 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1530 			res.a0);
1531 		return -ENOMEM;
1532 	}
1533 
1534 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1535 
1536 	return 0;
1537 }
1538 
rk3228_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1539 static __maybe_unused int rk3228_dmc_init(struct platform_device *pdev,
1540 					  struct rockchip_dmcfreq *dmcfreq)
1541 {
1542 	struct arm_smccc_res res;
1543 
1544 	res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
1545 					struct rk3228_ddr_dts_config_timing),
1546 					4096) + 1, SHARE_PAGE_TYPE_DDR);
1547 	if (res.a0) {
1548 		dev_err(&pdev->dev, "no ATF memory for init\n");
1549 		return -ENOMEM;
1550 	}
1551 
1552 	ddr_psci_param = (struct share_params *)res.a1;
1553 	if (of_get_rk3228_timings(&pdev->dev, pdev->dev.of_node,
1554 				  (uint32_t *)ddr_psci_param))
1555 		return -ENOMEM;
1556 
1557 	ddr_psci_param->hz = 0;
1558 
1559 	dmcfreq->set_rate_params = ddr_psci_param;
1560 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1561 
1562 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1563 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1564 
1565 	if (res.a0) {
1566 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1567 			res.a0);
1568 		return -ENOMEM;
1569 	}
1570 
1571 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1572 
1573 	return 0;
1574 }
1575 
rk3288_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1576 static __maybe_unused int rk3288_dmc_init(struct platform_device *pdev,
1577 					  struct rockchip_dmcfreq *dmcfreq)
1578 {
1579 	struct device *dev = &pdev->dev;
1580 	struct clk *pclk_phy, *pclk_upctl, *dmc_clk;
1581 	struct arm_smccc_res res;
1582 	int ret;
1583 
1584 	dmc_clk = devm_clk_get(dev, "dmc_clk");
1585 	if (IS_ERR(dmc_clk)) {
1586 		dev_err(dev, "Cannot get the clk dmc_clk\n");
1587 		return PTR_ERR(dmc_clk);
1588 	}
1589 	ret = clk_prepare_enable(dmc_clk);
1590 	if (ret < 0) {
1591 		dev_err(dev, "failed to prepare/enable dmc_clk\n");
1592 		return ret;
1593 	}
1594 
1595 	pclk_phy = devm_clk_get(dev, "pclk_phy0");
1596 	if (IS_ERR(pclk_phy)) {
1597 		dev_err(dev, "Cannot get the clk pclk_phy0\n");
1598 		return PTR_ERR(pclk_phy);
1599 	}
1600 	ret = clk_prepare_enable(pclk_phy);
1601 	if (ret < 0) {
1602 		dev_err(dev, "failed to prepare/enable pclk_phy0\n");
1603 		return ret;
1604 	}
1605 	pclk_upctl = devm_clk_get(dev, "pclk_upctl0");
1606 	if (IS_ERR(pclk_upctl)) {
1607 		dev_err(dev, "Cannot get the clk pclk_upctl0\n");
1608 		return PTR_ERR(pclk_upctl);
1609 	}
1610 	ret = clk_prepare_enable(pclk_upctl);
1611 	if (ret < 0) {
1612 		dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
1613 		return ret;
1614 	}
1615 
1616 	pclk_phy = devm_clk_get(dev, "pclk_phy1");
1617 	if (IS_ERR(pclk_phy)) {
1618 		dev_err(dev, "Cannot get the clk pclk_phy1\n");
1619 		return PTR_ERR(pclk_phy);
1620 	}
1621 	ret = clk_prepare_enable(pclk_phy);
1622 	if (ret < 0) {
1623 		dev_err(dev, "failed to prepare/enable pclk_phy1\n");
1624 		return ret;
1625 	}
1626 	pclk_upctl = devm_clk_get(dev, "pclk_upctl1");
1627 	if (IS_ERR(pclk_upctl)) {
1628 		dev_err(dev, "Cannot get the clk pclk_upctl1\n");
1629 		return PTR_ERR(pclk_upctl);
1630 	}
1631 	ret = clk_prepare_enable(pclk_upctl);
1632 	if (ret < 0) {
1633 		dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
1634 		return ret;
1635 	}
1636 
1637 	res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
1638 					struct rk3288_ddr_dts_config_timing),
1639 					4096) + 1, SHARE_PAGE_TYPE_DDR);
1640 	if (res.a0) {
1641 		dev_err(&pdev->dev, "no ATF memory for init\n");
1642 		return -ENOMEM;
1643 	}
1644 
1645 	ddr_psci_param = (struct share_params *)res.a1;
1646 	of_get_rk3288_timings(&pdev->dev, pdev->dev.of_node,
1647 			      (uint32_t *)ddr_psci_param);
1648 
1649 	ddr_psci_param->hz = 0;
1650 	ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1651 
1652 	dmcfreq->set_rate_params = ddr_psci_param;
1653 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1654 
1655 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1656 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1657 
1658 	if (res.a0) {
1659 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1660 			res.a0);
1661 		return -ENOMEM;
1662 	}
1663 
1664 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1665 
1666 	return 0;
1667 }
1668 
rk3328_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1669 static __maybe_unused int rk3328_dmc_init(struct platform_device *pdev,
1670 					  struct rockchip_dmcfreq *dmcfreq)
1671 {
1672 	struct arm_smccc_res res;
1673 	u32 size;
1674 
1675 	res = sip_smc_dram(0, 0,
1676 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1677 	dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1678 	if (res.a0 || (res.a1 < 0x101)) {
1679 		dev_err(&pdev->dev,
1680 			"trusted firmware need to update or is invalid!\n");
1681 		return -ENXIO;
1682 	}
1683 
1684 	dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
1685 
1686 	/*
1687 	 * first 4KB is used for interface parameters
1688 	 * after 4KB * N is dts parameters
1689 	 */
1690 	size = sizeof(struct rk3328_ddr_dts_config_timing);
1691 	res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
1692 					SHARE_PAGE_TYPE_DDR);
1693 	if (res.a0 != 0) {
1694 		dev_err(&pdev->dev, "no ATF memory for init\n");
1695 		return -ENOMEM;
1696 	}
1697 	ddr_psci_param = (struct share_params *)res.a1;
1698 	of_get_rk3328_timings(&pdev->dev, pdev->dev.of_node,
1699 			      (uint32_t *)ddr_psci_param);
1700 
1701 	dmcfreq->set_rate_params = ddr_psci_param;
1702 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1703 
1704 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1705 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1706 	if (res.a0) {
1707 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1708 			res.a0);
1709 		return -ENOMEM;
1710 	}
1711 
1712 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1713 
1714 	return 0;
1715 }
1716 
rk3399_set_msch_readlatency(unsigned int readlatency)1717 static int rk3399_set_msch_readlatency(unsigned int readlatency)
1718 {
1719 	struct arm_smccc_res res;
1720 
1721 	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, readlatency, 0,
1722 		      ROCKCHIP_SIP_CONFIG_DRAM_SET_MSCH_RL,
1723 		      0, 0, 0, 0, &res);
1724 
1725 	return res.a0;
1726 }
1727 
rk3399_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1728 static __maybe_unused int rk3399_dmc_init(struct platform_device *pdev,
1729 					  struct rockchip_dmcfreq *dmcfreq)
1730 {
1731 	struct device *dev = &pdev->dev;
1732 	struct device_node *np = pdev->dev.of_node;
1733 	struct arm_smccc_res res;
1734 	struct rk3399_dram_timing *dram_timing;
1735 	int index, size;
1736 	u32 *timing;
1737 
1738 	/*
1739 	 * Get dram timing and pass it to arm trust firmware,
1740 	 * the dram drvier in arm trust firmware will get these
1741 	 * timing and to do dram initial.
1742 	 */
1743 	dram_timing = of_get_rk3399_timings(dev, np);
1744 	if (dram_timing) {
1745 		timing = (u32 *)dram_timing;
1746 		size = sizeof(struct rk3399_dram_timing) / 4;
1747 		for (index = 0; index < size; index++) {
1748 			arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index,
1749 				      ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM,
1750 				      0, 0, 0, 0, &res);
1751 			if (res.a0) {
1752 				dev_err(dev, "Failed to set dram param: %ld\n",
1753 					res.a0);
1754 				return -EINVAL;
1755 			}
1756 		}
1757 	}
1758 
1759 	dmcfreq->set_rate_params =
1760 		devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
1761 	if (!dmcfreq->set_rate_params)
1762 		return -ENOMEM;
1763 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1764 
1765 	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
1766 		      ROCKCHIP_SIP_CONFIG_DRAM_INIT,
1767 		      0, 0, 0, 0, &res);
1768 
1769 	dmcfreq->info.set_msch_readlatency = rk3399_set_msch_readlatency;
1770 
1771 	return 0;
1772 }
1773 
rk3528_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1774 static __maybe_unused int rk3528_dmc_init(struct platform_device *pdev,
1775 					  struct rockchip_dmcfreq *dmcfreq)
1776 {
1777 	struct arm_smccc_res res;
1778 	int ret;
1779 	int complt_irq;
1780 	u32 complt_hwirq;
1781 	struct irq_data *complt_irq_data;
1782 
1783 	res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1784 	dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1785 	if (res.a0 || res.a1 < 0x100) {
1786 		dev_err(&pdev->dev, "trusted firmware need update to V1.00 and above.\n");
1787 		return -ENXIO;
1788 	}
1789 
1790 	/*
1791 	 * first 4KB is used for interface parameters
1792 	 * after 4KB is dts parameters
1793 	 * request share memory size 4KB * 2
1794 	 */
1795 	res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
1796 	if (res.a0 != 0) {
1797 		dev_err(&pdev->dev, "no ATF memory for init\n");
1798 		return -ENOMEM;
1799 	}
1800 	ddr_psci_param = (struct share_params *)res.a1;
1801 	/* Clear ddr_psci_param, size is 4KB * 2 */
1802 	memset_io(ddr_psci_param, 0x0, 4096 * 2);
1803 
1804 	wait_ctrl.dcf_en = 0;
1805 
1806 	init_waitqueue_head(&wait_ctrl.wait_wq);
1807 	wait_ctrl.wait_en = 1;
1808 	wait_ctrl.wait_time_out_ms = 17 * 5;
1809 
1810 	complt_irq = platform_get_irq_byname(pdev, "complete");
1811 	if (complt_irq < 0) {
1812 		dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1813 		return complt_irq;
1814 	}
1815 	wait_ctrl.complt_irq = complt_irq;
1816 
1817 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
1818 			       0, dev_name(&pdev->dev), &wait_ctrl);
1819 	if (ret < 0) {
1820 		dev_err(&pdev->dev, "cannot request complt_irq\n");
1821 		return ret;
1822 	}
1823 	disable_irq(complt_irq);
1824 
1825 	complt_irq_data = irq_get_irq_data(complt_irq);
1826 	complt_hwirq = irqd_to_hwirq(complt_irq_data);
1827 	ddr_psci_param->complt_hwirq = complt_hwirq;
1828 
1829 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1830 	if (res.a0) {
1831 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1832 		return -ENOMEM;
1833 	}
1834 
1835 	ret = rockchip_get_freq_info(dmcfreq);
1836 	if (ret < 0) {
1837 		dev_err(&pdev->dev, "cannot get frequency info\n");
1838 		return ret;
1839 	}
1840 	dmcfreq->is_set_rate_direct = true;
1841 
1842 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1843 
1844 	return 0;
1845 }
1846 
rk3568_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1847 static __maybe_unused int rk3568_dmc_init(struct platform_device *pdev,
1848 					  struct rockchip_dmcfreq *dmcfreq)
1849 {
1850 	struct arm_smccc_res res;
1851 	int ret;
1852 	int complt_irq;
1853 
1854 	res = sip_smc_dram(0, 0,
1855 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1856 	dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1857 	if (res.a0 || res.a1 < 0x101) {
1858 		dev_err(&pdev->dev, "trusted firmware need update to V1.01 and above.\n");
1859 		return -ENXIO;
1860 	}
1861 
1862 	/*
1863 	 * first 4KB is used for interface parameters
1864 	 * after 4KB is dts parameters
1865 	 * request share memory size 4KB * 2
1866 	 */
1867 	res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
1868 	if (res.a0 != 0) {
1869 		dev_err(&pdev->dev, "no ATF memory for init\n");
1870 		return -ENOMEM;
1871 	}
1872 	ddr_psci_param = (struct share_params *)res.a1;
1873 	/* Clear ddr_psci_param, size is 4KB * 2 */
1874 	memset_io(ddr_psci_param, 0x0, 4096 * 2);
1875 
1876 	/* start mcu with sip_smc_dram */
1877 	wait_ctrl.dcf_en = 2;
1878 
1879 	init_waitqueue_head(&wait_ctrl.wait_wq);
1880 	wait_ctrl.wait_en = 1;
1881 	wait_ctrl.wait_time_out_ms = 17 * 5;
1882 
1883 	complt_irq = platform_get_irq_byname(pdev, "complete");
1884 	if (complt_irq < 0) {
1885 		dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n",
1886 			complt_irq);
1887 		return complt_irq;
1888 	}
1889 	wait_ctrl.complt_irq = complt_irq;
1890 
1891 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
1892 			       0, dev_name(&pdev->dev), &wait_ctrl);
1893 	if (ret < 0) {
1894 		dev_err(&pdev->dev, "cannot request complt_irq\n");
1895 		return ret;
1896 	}
1897 	disable_irq(complt_irq);
1898 
1899 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1900 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1901 	if (res.a0) {
1902 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
1903 			res.a0);
1904 		return -ENOMEM;
1905 	}
1906 
1907 	ret = rockchip_get_freq_info(dmcfreq);
1908 	if (ret < 0) {
1909 		dev_err(&pdev->dev, "cannot get frequency info\n");
1910 		return ret;
1911 	}
1912 	dmcfreq->is_set_rate_direct = true;
1913 
1914 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1915 
1916 	return 0;
1917 }
1918 
rk3588_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)1919 static __maybe_unused int rk3588_dmc_init(struct platform_device *pdev,
1920 					  struct rockchip_dmcfreq *dmcfreq)
1921 {
1922 	struct arm_smccc_res res;
1923 	struct dev_pm_opp *opp;
1924 	unsigned long opp_rate;
1925 	int ret;
1926 	int complt_irq;
1927 
1928 	res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1929 	dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1930 	if (res.a0) {
1931 		dev_err(&pdev->dev, "trusted firmware unsupported, please update.\n");
1932 		return -ENXIO;
1933 	}
1934 
1935 	/*
1936 	 * first 4KB is used for interface parameters
1937 	 * after 4KB is dts parameters
1938 	 * request share memory size 4KB * 2
1939 	 */
1940 	res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
1941 	if (res.a0 != 0) {
1942 		dev_err(&pdev->dev, "no ATF memory for init\n");
1943 		return -ENOMEM;
1944 	}
1945 	ddr_psci_param = (struct share_params *)res.a1;
1946 	/* Clear ddr_psci_param, size is 4KB * 2 */
1947 	memset_io(ddr_psci_param, 0x0, 4096 * 2);
1948 
1949 	/* start mcu with sip_smc_dram */
1950 	wait_ctrl.dcf_en = 2;
1951 
1952 	init_waitqueue_head(&wait_ctrl.wait_wq);
1953 	wait_ctrl.wait_en = 1;
1954 	wait_ctrl.wait_time_out_ms = 17 * 5;
1955 
1956 	complt_irq = platform_get_irq_byname(pdev, "complete");
1957 	if (complt_irq < 0) {
1958 		dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1959 		return complt_irq;
1960 	}
1961 	wait_ctrl.complt_irq = complt_irq;
1962 
1963 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
1964 			       0, dev_name(&pdev->dev), &wait_ctrl);
1965 	if (ret < 0) {
1966 		dev_err(&pdev->dev, "cannot request complt_irq\n");
1967 		return ret;
1968 	}
1969 	disable_irq(complt_irq);
1970 
1971 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1972 	if (res.a0) {
1973 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1974 		return -ENOMEM;
1975 	}
1976 
1977 	ret = rockchip_dmcfreq_adjust_opp_table(dmcfreq);
1978 	if (ret < 0) {
1979 		dev_err(&pdev->dev, "cannot get frequency info\n");
1980 		return ret;
1981 	}
1982 	dmcfreq->is_set_rate_direct = true;
1983 
1984 	/* Config the dmcfreq->sleep_volt for deepsleep */
1985 	opp_rate = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
1986 	opp = devfreq_recommended_opp(&pdev->dev, &opp_rate, 0);
1987 	if (IS_ERR(opp)) {
1988 		dev_err(&pdev->dev, "Failed to find opp for %lu Hz\n", opp_rate);
1989 		return PTR_ERR(opp);
1990 	}
1991 	dmcfreq->sleep_volt = opp->supplies[0].u_volt;
1992 	if (dmcfreq->regulator_count > 1)
1993 		dmcfreq->sleep_mem_volt = opp->supplies[1].u_volt;
1994 	dev_pm_opp_put(opp);
1995 
1996 	if (of_property_read_u32(pdev->dev.of_node, "wait-mode", &ddr_psci_param->wait_mode))
1997 		ddr_psci_param->wait_mode = 0;
1998 
1999 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
2000 
2001 	return 0;
2002 }
2003 
rv1126_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)2004 static __maybe_unused int rv1126_dmc_init(struct platform_device *pdev,
2005 					  struct rockchip_dmcfreq *dmcfreq)
2006 {
2007 	struct arm_smccc_res res;
2008 	u32 size;
2009 	int ret;
2010 	int complt_irq;
2011 	struct device_node *node;
2012 
2013 	res = sip_smc_dram(0, 0,
2014 			   ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
2015 	dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
2016 	if (res.a0 || res.a1 < 0x100) {
2017 		dev_err(&pdev->dev,
2018 			"trusted firmware need to update or is invalid!\n");
2019 		return -ENXIO;
2020 	}
2021 
2022 	/*
2023 	 * first 4KB is used for interface parameters
2024 	 * after 4KB * N is dts parameters
2025 	 */
2026 	size = sizeof(struct rk1808_ddr_dts_config_timing);
2027 	res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
2028 					SHARE_PAGE_TYPE_DDR);
2029 	if (res.a0 != 0) {
2030 		dev_err(&pdev->dev, "no ATF memory for init\n");
2031 		return -ENOMEM;
2032 	}
2033 	ddr_psci_param = (struct share_params *)res.a1;
2034 	of_get_rv1126_timings(&pdev->dev, pdev->dev.of_node,
2035 			      (uint32_t *)ddr_psci_param);
2036 
2037 	/* enable start dcf in kernel after dcf ready */
2038 	node = of_parse_phandle(pdev->dev.of_node, "dcf", 0);
2039 	wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
2040 	if (IS_ERR(wait_ctrl.regmap_dcf))
2041 		return PTR_ERR(wait_ctrl.regmap_dcf);
2042 	wait_ctrl.dcf_en = 1;
2043 
2044 	init_waitqueue_head(&wait_ctrl.wait_wq);
2045 	wait_ctrl.wait_en = 1;
2046 	wait_ctrl.wait_time_out_ms = 17 * 5;
2047 
2048 	complt_irq = platform_get_irq_byname(pdev, "complete");
2049 	if (complt_irq < 0) {
2050 		dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n",
2051 			complt_irq);
2052 		return complt_irq;
2053 	}
2054 	wait_ctrl.complt_irq = complt_irq;
2055 
2056 	ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
2057 			       0, dev_name(&pdev->dev), &wait_ctrl);
2058 	if (ret < 0) {
2059 		dev_err(&pdev->dev, "cannot request complt_irq\n");
2060 		return ret;
2061 	}
2062 	disable_irq(complt_irq);
2063 
2064 	if (of_property_read_u32(pdev->dev.of_node, "update_drv_odt_cfg",
2065 				 &ddr_psci_param->update_drv_odt_cfg))
2066 		ddr_psci_param->update_drv_odt_cfg = 0;
2067 
2068 	if (of_property_read_u32(pdev->dev.of_node, "update_deskew_cfg",
2069 				 &ddr_psci_param->update_deskew_cfg))
2070 		ddr_psci_param->update_deskew_cfg = 0;
2071 
2072 	dmcfreq->set_rate_params = ddr_psci_param;
2073 	rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
2074 	rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
2075 
2076 	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
2077 			   ROCKCHIP_SIP_CONFIG_DRAM_INIT);
2078 	if (res.a0) {
2079 		dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
2080 			res.a0);
2081 		return -ENOMEM;
2082 	}
2083 
2084 	dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
2085 
2086 	return 0;
2087 }
2088 
2089 static const struct of_device_id rockchip_dmcfreq_of_match[] = {
2090 #if IS_ENABLED(CONFIG_CPU_PX30)
2091 	{ .compatible = "rockchip,px30-dmc", .data = px30_dmc_init },
2092 #endif
2093 #if IS_ENABLED(CONFIG_CPU_RK1808)
2094 	{ .compatible = "rockchip,rk1808-dmc", .data = rk1808_dmc_init },
2095 #endif
2096 #if IS_ENABLED(CONFIG_CPU_RK312X)
2097 	{ .compatible = "rockchip,rk3128-dmc", .data = rk3128_dmc_init },
2098 #endif
2099 #if IS_ENABLED(CONFIG_CPU_RK322X)
2100 	{ .compatible = "rockchip,rk3228-dmc", .data = rk3228_dmc_init },
2101 #endif
2102 #if IS_ENABLED(CONFIG_CPU_RK3288)
2103 	{ .compatible = "rockchip,rk3288-dmc", .data = rk3288_dmc_init },
2104 #endif
2105 #if IS_ENABLED(CONFIG_CPU_RK3308)
2106 	{ .compatible = "rockchip,rk3308-dmc", .data = NULL },
2107 #endif
2108 #if IS_ENABLED(CONFIG_CPU_RK3328)
2109 	{ .compatible = "rockchip,rk3328-dmc", .data = rk3328_dmc_init },
2110 #endif
2111 #if IS_ENABLED(CONFIG_CPU_RK3399)
2112 	{ .compatible = "rockchip,rk3399-dmc", .data = rk3399_dmc_init },
2113 #endif
2114 #if IS_ENABLED(CONFIG_CPU_RK3528)
2115 	{ .compatible = "rockchip,rk3528-dmc", .data = rk3528_dmc_init },
2116 #endif
2117 #if IS_ENABLED(CONFIG_CPU_RK3562)
2118 	{ .compatible = "rockchip,rk3562-dmc", .data = rk3568_dmc_init },
2119 #endif
2120 #if IS_ENABLED(CONFIG_CPU_RK3568)
2121 	{ .compatible = "rockchip,rk3568-dmc", .data = rk3568_dmc_init },
2122 #endif
2123 #if IS_ENABLED(CONFIG_CPU_RK3588)
2124 	{ .compatible = "rockchip,rk3588-dmc", .data = rk3588_dmc_init },
2125 #endif
2126 #if IS_ENABLED(CONFIG_CPU_RV1126)
2127 	{ .compatible = "rockchip,rv1126-dmc", .data = rv1126_dmc_init },
2128 #endif
2129 	{ },
2130 };
2131 MODULE_DEVICE_TABLE(of, rockchip_dmcfreq_of_match);
2132 
rockchip_get_freq_map_talbe(struct device_node * np,char * porp_name,struct freq_map_table ** table)2133 static int rockchip_get_freq_map_talbe(struct device_node *np, char *porp_name,
2134 				       struct freq_map_table **table)
2135 {
2136 	struct freq_map_table *tbl;
2137 	const struct property *prop;
2138 	unsigned int temp_freq = 0;
2139 	int count, i;
2140 
2141 	prop = of_find_property(np, porp_name, NULL);
2142 	if (!prop)
2143 		return -EINVAL;
2144 
2145 	if (!prop->value)
2146 		return -ENODATA;
2147 
2148 	count = of_property_count_u32_elems(np, porp_name);
2149 	if (count < 0)
2150 		return -EINVAL;
2151 
2152 	if (count % 3)
2153 		return -EINVAL;
2154 
2155 	tbl = kzalloc(sizeof(*tbl) * (count / 3 + 1), GFP_KERNEL);
2156 	if (!tbl)
2157 		return -ENOMEM;
2158 
2159 	for (i = 0; i < count / 3; i++) {
2160 		of_property_read_u32_index(np, porp_name, 3 * i, &tbl[i].min);
2161 		of_property_read_u32_index(np, porp_name, 3 * i + 1,
2162 					   &tbl[i].max);
2163 		of_property_read_u32_index(np, porp_name, 3 * i + 2,
2164 					   &temp_freq);
2165 		tbl[i].freq = temp_freq * 1000;
2166 	}
2167 
2168 	tbl[i].min = 0;
2169 	tbl[i].max = 0;
2170 	tbl[i].freq = DMCFREQ_TABLE_END;
2171 
2172 	*table = tbl;
2173 
2174 	return 0;
2175 }
2176 
rockchip_get_rl_map_talbe(struct device_node * np,char * porp_name,struct rl_map_table ** table)2177 static int rockchip_get_rl_map_talbe(struct device_node *np, char *porp_name,
2178 				     struct rl_map_table **table)
2179 {
2180 	struct rl_map_table *tbl;
2181 	const struct property *prop;
2182 	int count, i;
2183 
2184 	prop = of_find_property(np, porp_name, NULL);
2185 	if (!prop)
2186 		return -EINVAL;
2187 
2188 	if (!prop->value)
2189 		return -ENODATA;
2190 
2191 	count = of_property_count_u32_elems(np, porp_name);
2192 	if (count < 0)
2193 		return -EINVAL;
2194 
2195 	if (count % 2)
2196 		return -EINVAL;
2197 
2198 	tbl = kzalloc(sizeof(*tbl) * (count / 2 + 1), GFP_KERNEL);
2199 	if (!tbl)
2200 		return -ENOMEM;
2201 
2202 	for (i = 0; i < count / 2; i++) {
2203 		of_property_read_u32_index(np, porp_name, 2 * i, &tbl[i].pn);
2204 		of_property_read_u32_index(np, porp_name, 2 * i + 1,
2205 					   &tbl[i].rl);
2206 	}
2207 
2208 	tbl[i].pn = 0;
2209 	tbl[i].rl = DMCFREQ_TABLE_END;
2210 
2211 	*table = tbl;
2212 
2213 	return 0;
2214 }
2215 
rockchip_get_system_status_rate(struct device_node * np,char * porp_name,struct rockchip_dmcfreq * dmcfreq)2216 static int rockchip_get_system_status_rate(struct device_node *np,
2217 					   char *porp_name,
2218 					   struct rockchip_dmcfreq *dmcfreq)
2219 {
2220 	const struct property *prop;
2221 	unsigned int status = 0, freq = 0;
2222 	unsigned long temp_rate = 0;
2223 	int count, i;
2224 
2225 	prop = of_find_property(np, porp_name, NULL);
2226 	if (!prop)
2227 		return -ENODEV;
2228 
2229 	if (!prop->value)
2230 		return -ENODATA;
2231 
2232 	count = of_property_count_u32_elems(np, porp_name);
2233 	if (count < 0)
2234 		return -EINVAL;
2235 
2236 	if (count % 2)
2237 		return -EINVAL;
2238 
2239 	for (i = 0; i < count / 2; i++) {
2240 		of_property_read_u32_index(np, porp_name, 2 * i,
2241 					   &status);
2242 		of_property_read_u32_index(np, porp_name, 2 * i + 1,
2243 					   &freq);
2244 		switch (status) {
2245 		case SYS_STATUS_NORMAL:
2246 			dmcfreq->normal_rate = freq * 1000;
2247 			break;
2248 		case SYS_STATUS_SUSPEND:
2249 			dmcfreq->suspend_rate = freq * 1000;
2250 			break;
2251 		case SYS_STATUS_VIDEO_1080P:
2252 			dmcfreq->video_1080p_rate = freq * 1000;
2253 			break;
2254 		case SYS_STATUS_VIDEO_4K:
2255 			dmcfreq->video_4k_rate = freq * 1000;
2256 			break;
2257 		case SYS_STATUS_VIDEO_4K_10B:
2258 			dmcfreq->video_4k_10b_rate = freq * 1000;
2259 			break;
2260 		case SYS_STATUS_VIDEO_SVEP:
2261 			dmcfreq->video_svep_rate = freq * 1000;
2262 			break;
2263 		case SYS_STATUS_PERFORMANCE:
2264 			dmcfreq->performance_rate = freq * 1000;
2265 			break;
2266 		case SYS_STATUS_HDMI:
2267 			dmcfreq->hdmi_rate = freq * 1000;
2268 			break;
2269 		case SYS_STATUS_HDMIRX:
2270 			dmcfreq->hdmirx_rate = freq * 1000;
2271 			break;
2272 		case SYS_STATUS_IDLE:
2273 			dmcfreq->idle_rate = freq * 1000;
2274 			break;
2275 		case SYS_STATUS_REBOOT:
2276 			dmcfreq->reboot_rate = freq * 1000;
2277 			break;
2278 		case SYS_STATUS_BOOST:
2279 			dmcfreq->boost_rate = freq * 1000;
2280 			break;
2281 		case SYS_STATUS_ISP:
2282 		case SYS_STATUS_CIF0:
2283 		case SYS_STATUS_CIF1:
2284 		case SYS_STATUS_DUALVIEW:
2285 			temp_rate = freq * 1000;
2286 			if (dmcfreq->fixed_rate < temp_rate)
2287 				dmcfreq->fixed_rate = temp_rate;
2288 			break;
2289 		case SYS_STATUS_LOW_POWER:
2290 			dmcfreq->low_power_rate = freq * 1000;
2291 			break;
2292 		default:
2293 			break;
2294 		}
2295 	}
2296 
2297 	return 0;
2298 }
2299 
rockchip_freq_level_2_rate(struct rockchip_dmcfreq * dmcfreq,unsigned int level)2300 static unsigned long rockchip_freq_level_2_rate(struct rockchip_dmcfreq *dmcfreq,
2301 						unsigned int level)
2302 {
2303 	unsigned long rate = 0;
2304 
2305 	switch (level) {
2306 	case DMC_FREQ_LEVEL_LOW:
2307 		rate = dmcfreq->rate_low;
2308 		break;
2309 	case DMC_FREQ_LEVEL_MID_LOW:
2310 		rate = dmcfreq->rate_mid_low;
2311 		break;
2312 	case DMC_FREQ_LEVEL_MID_HIGH:
2313 		rate = dmcfreq->rate_mid_high;
2314 		break;
2315 	case DMC_FREQ_LEVEL_HIGH:
2316 		rate = dmcfreq->rate_high;
2317 		break;
2318 	default:
2319 		break;
2320 	}
2321 
2322 	return rate;
2323 }
2324 
rockchip_get_system_status_level(struct device_node * np,char * porp_name,struct rockchip_dmcfreq * dmcfreq)2325 static int rockchip_get_system_status_level(struct device_node *np,
2326 					    char *porp_name,
2327 					    struct rockchip_dmcfreq *dmcfreq)
2328 {
2329 	const struct property *prop;
2330 	unsigned int status = 0, level = 0;
2331 	unsigned long temp_rate = 0;
2332 	int count, i;
2333 
2334 	prop = of_find_property(np, porp_name, NULL);
2335 	if (!prop)
2336 		return -ENODEV;
2337 
2338 	if (!prop->value)
2339 		return -ENODATA;
2340 
2341 	count = of_property_count_u32_elems(np, porp_name);
2342 	if (count < 0)
2343 		return -EINVAL;
2344 
2345 	if (count % 2)
2346 		return -EINVAL;
2347 
2348 	if (dmcfreq->freq_count == 1) {
2349 		dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2350 		dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
2351 		dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[0];
2352 		dmcfreq->rate_high = dmcfreq->freq_info_rate[0];
2353 	} else if (dmcfreq->freq_count == 2) {
2354 		dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2355 		dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
2356 		dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
2357 		dmcfreq->rate_high = dmcfreq->freq_info_rate[1];
2358 	} else if (dmcfreq->freq_count == 3) {
2359 		dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2360 		dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
2361 		dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
2362 		dmcfreq->rate_high = dmcfreq->freq_info_rate[2];
2363 	} else if (dmcfreq->freq_count == 4) {
2364 		dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2365 		dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
2366 		dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[2];
2367 		dmcfreq->rate_high = dmcfreq->freq_info_rate[3];
2368 	} else if (dmcfreq->freq_count == 5 || dmcfreq->freq_count == 6) {
2369 		dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2370 		dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
2371 		dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 2];
2372 		dmcfreq->rate_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
2373 	} else {
2374 		return -EINVAL;
2375 	}
2376 
2377 	dmcfreq->auto_min_rate = dmcfreq->rate_low;
2378 
2379 	for (i = 0; i < count / 2; i++) {
2380 		of_property_read_u32_index(np, porp_name, 2 * i,
2381 					   &status);
2382 		of_property_read_u32_index(np, porp_name, 2 * i + 1,
2383 					   &level);
2384 		switch (status) {
2385 		case SYS_STATUS_NORMAL:
2386 			dmcfreq->normal_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2387 			dev_info(dmcfreq->dev, "normal_rate = %ld\n", dmcfreq->normal_rate);
2388 			break;
2389 		case SYS_STATUS_SUSPEND:
2390 			dmcfreq->suspend_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2391 			dev_info(dmcfreq->dev, "suspend_rate = %ld\n", dmcfreq->suspend_rate);
2392 			break;
2393 		case SYS_STATUS_VIDEO_1080P:
2394 			dmcfreq->video_1080p_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2395 			dev_info(dmcfreq->dev, "video_1080p_rate = %ld\n",
2396 				 dmcfreq->video_1080p_rate);
2397 			break;
2398 		case SYS_STATUS_VIDEO_4K:
2399 			dmcfreq->video_4k_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2400 			dev_info(dmcfreq->dev, "video_4k_rate = %ld\n", dmcfreq->video_4k_rate);
2401 			break;
2402 		case SYS_STATUS_VIDEO_4K_10B:
2403 			dmcfreq->video_4k_10b_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2404 			dev_info(dmcfreq->dev, "video_4k_10b_rate = %ld\n",
2405 				 dmcfreq->video_4k_10b_rate);
2406 			break;
2407 		case SYS_STATUS_VIDEO_4K_60P:
2408 			dmcfreq->video_4k_60p_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2409 			dev_info(dmcfreq->dev, "video_4k_60p_rate = %ld\n",
2410 				 dmcfreq->video_4k_60p_rate);
2411 			break;
2412 		case SYS_STATUS_VIDEO_SVEP:
2413 			dmcfreq->video_svep_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2414 			dev_info(dmcfreq->dev, "video_svep_rate = %ld\n",
2415 				 dmcfreq->video_svep_rate);
2416 			break;
2417 		case SYS_STATUS_PERFORMANCE:
2418 			dmcfreq->performance_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2419 			dev_info(dmcfreq->dev, "performance_rate = %ld\n",
2420 				 dmcfreq->performance_rate);
2421 			break;
2422 		case SYS_STATUS_HDMI:
2423 			dmcfreq->hdmi_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2424 			dev_info(dmcfreq->dev, "hdmi_rate = %ld\n", dmcfreq->hdmi_rate);
2425 			break;
2426 		case SYS_STATUS_HDMIRX:
2427 			dmcfreq->hdmirx_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2428 			dev_info(dmcfreq->dev, "hdmirx_rate = %ld\n", dmcfreq->hdmirx_rate);
2429 			break;
2430 		case SYS_STATUS_IDLE:
2431 			dmcfreq->idle_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2432 			dev_info(dmcfreq->dev, "idle_rate = %ld\n", dmcfreq->idle_rate);
2433 			break;
2434 		case SYS_STATUS_REBOOT:
2435 			dmcfreq->reboot_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2436 			dev_info(dmcfreq->dev, "reboot_rate = %ld\n", dmcfreq->reboot_rate);
2437 			break;
2438 		case SYS_STATUS_BOOST:
2439 			dmcfreq->boost_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2440 			dev_info(dmcfreq->dev, "boost_rate = %ld\n", dmcfreq->boost_rate);
2441 			break;
2442 		case SYS_STATUS_ISP:
2443 		case SYS_STATUS_CIF0:
2444 		case SYS_STATUS_CIF1:
2445 		case SYS_STATUS_DUALVIEW:
2446 			temp_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2447 			if (dmcfreq->fixed_rate < temp_rate) {
2448 				dmcfreq->fixed_rate = temp_rate;
2449 				dev_info(dmcfreq->dev,
2450 					 "fixed_rate(isp|cif0|cif1|dualview) = %ld\n",
2451 					 dmcfreq->fixed_rate);
2452 			}
2453 			break;
2454 		case SYS_STATUS_LOW_POWER:
2455 			dmcfreq->low_power_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2456 			dev_info(dmcfreq->dev, "low_power_rate = %ld\n", dmcfreq->low_power_rate);
2457 			break;
2458 		default:
2459 			break;
2460 		}
2461 	}
2462 
2463 	return 0;
2464 }
2465 
rockchip_dmcfreq_update_target(struct rockchip_dmcfreq * dmcfreq)2466 static void rockchip_dmcfreq_update_target(struct rockchip_dmcfreq *dmcfreq)
2467 {
2468 	struct devfreq *devfreq = dmcfreq->info.devfreq;
2469 
2470 	mutex_lock(&devfreq->lock);
2471 	update_devfreq(devfreq);
2472 	mutex_unlock(&devfreq->lock);
2473 }
2474 
rockchip_dmcfreq_system_status_notifier(struct notifier_block * nb,unsigned long status,void * ptr)2475 static int rockchip_dmcfreq_system_status_notifier(struct notifier_block *nb,
2476 						   unsigned long status,
2477 						   void *ptr)
2478 {
2479 	struct rockchip_dmcfreq *dmcfreq = system_status_to_dmcfreq(nb);
2480 	unsigned long target_rate = 0;
2481 	unsigned int refresh = false;
2482 	bool is_fixed = false;
2483 
2484 	if (dmcfreq->fixed_rate && (is_dualview(status) || is_isp(status))) {
2485 		if (dmcfreq->is_fixed)
2486 			return NOTIFY_OK;
2487 		is_fixed = true;
2488 		target_rate = dmcfreq->fixed_rate;
2489 		goto next;
2490 	}
2491 
2492 	if (dmcfreq->reboot_rate && (status & SYS_STATUS_REBOOT)) {
2493 		if (dmcfreq->info.auto_freq_en)
2494 			devfreq_monitor_stop(dmcfreq->info.devfreq);
2495 		target_rate = dmcfreq->reboot_rate;
2496 		goto next;
2497 	}
2498 
2499 	if (dmcfreq->suspend_rate && (status & SYS_STATUS_SUSPEND)) {
2500 		target_rate = dmcfreq->suspend_rate;
2501 		refresh = true;
2502 		goto next;
2503 	}
2504 
2505 	if (dmcfreq->low_power_rate && (status & SYS_STATUS_LOW_POWER)) {
2506 		target_rate = dmcfreq->low_power_rate;
2507 		goto next;
2508 	}
2509 
2510 	if (dmcfreq->performance_rate && (status & SYS_STATUS_PERFORMANCE)) {
2511 		if (dmcfreq->performance_rate > target_rate)
2512 			target_rate = dmcfreq->performance_rate;
2513 	}
2514 
2515 	if (dmcfreq->hdmi_rate && (status & SYS_STATUS_HDMI)) {
2516 		if (dmcfreq->hdmi_rate > target_rate)
2517 			target_rate = dmcfreq->hdmi_rate;
2518 	}
2519 
2520 	if (dmcfreq->hdmirx_rate && (status & SYS_STATUS_HDMIRX)) {
2521 		if (dmcfreq->hdmirx_rate > target_rate)
2522 			target_rate = dmcfreq->hdmirx_rate;
2523 	}
2524 
2525 	if (dmcfreq->video_4k_rate && (status & SYS_STATUS_VIDEO_4K)) {
2526 		if (dmcfreq->video_4k_rate > target_rate)
2527 			target_rate = dmcfreq->video_4k_rate;
2528 	}
2529 
2530 	if (dmcfreq->video_4k_10b_rate && (status & SYS_STATUS_VIDEO_4K_10B)) {
2531 		if (dmcfreq->video_4k_10b_rate > target_rate)
2532 			target_rate = dmcfreq->video_4k_10b_rate;
2533 	}
2534 
2535 	if (dmcfreq->video_4k_60p_rate && (status & SYS_STATUS_VIDEO_4K_60P)) {
2536 		if (dmcfreq->video_4k_60p_rate > target_rate)
2537 			target_rate = dmcfreq->video_4k_60p_rate;
2538 	}
2539 
2540 	if (dmcfreq->video_1080p_rate && (status & SYS_STATUS_VIDEO_1080P)) {
2541 		if (dmcfreq->video_1080p_rate > target_rate)
2542 			target_rate = dmcfreq->video_1080p_rate;
2543 	}
2544 
2545 	if (dmcfreq->video_svep_rate && (status & SYS_STATUS_VIDEO_SVEP)) {
2546 		if (dmcfreq->video_svep_rate > target_rate)
2547 			target_rate = dmcfreq->video_svep_rate;
2548 	}
2549 
2550 next:
2551 
2552 	dev_dbg(dmcfreq->dev, "status=0x%x\n", (unsigned int)status);
2553 	dmcfreq->is_fixed = is_fixed;
2554 	dmcfreq->status_rate = target_rate;
2555 	if (dmcfreq->refresh != refresh) {
2556 		if (dmcfreq->set_auto_self_refresh)
2557 			dmcfreq->set_auto_self_refresh(refresh);
2558 		dmcfreq->refresh = refresh;
2559 	}
2560 	rockchip_dmcfreq_update_target(dmcfreq);
2561 
2562 	return NOTIFY_OK;
2563 }
2564 
rockchip_dmcfreq_panic_notifier(struct notifier_block * nb,unsigned long v,void * p)2565 static int rockchip_dmcfreq_panic_notifier(struct notifier_block *nb,
2566 					   unsigned long v, void *p)
2567 {
2568 	struct rockchip_dmcfreq *dmcfreq =
2569 		container_of(nb, struct rockchip_dmcfreq, panic_nb);
2570 	struct device *dev = dmcfreq->dev;
2571 
2572 	if (dmcfreq->regulator_count == 1)
2573 		dev_info(dev, "cur_freq: %lu Hz, volt: %lu uV\n",
2574 			 dmcfreq->rate, dmcfreq->volt);
2575 	else
2576 		dev_info(dev, "cur_freq: %lu Hz, volt_vdd: %lu uV, volt_mem: %lu uV\n",
2577 			 dmcfreq->rate, dmcfreq->volt, dmcfreq->mem_volt);
2578 
2579 	return 0;
2580 }
2581 
rockchip_dmcfreq_status_show(struct device * dev,struct device_attribute * attr,char * buf)2582 static ssize_t rockchip_dmcfreq_status_show(struct device *dev,
2583 					    struct device_attribute *attr,
2584 					    char *buf)
2585 {
2586 	unsigned int status = rockchip_get_system_status();
2587 
2588 	return sprintf(buf, "0x%x\n", status);
2589 }
2590 
rockchip_dmcfreq_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2591 static ssize_t rockchip_dmcfreq_status_store(struct device *dev,
2592 					     struct device_attribute *attr,
2593 					     const char *buf,
2594 					     size_t count)
2595 {
2596 	if (!count)
2597 		return -EINVAL;
2598 
2599 	rockchip_update_system_status(buf);
2600 
2601 	return count;
2602 }
2603 
2604 static DEVICE_ATTR(system_status, 0644, rockchip_dmcfreq_status_show,
2605 		   rockchip_dmcfreq_status_store);
2606 
upthreshold_show(struct device * dev,struct device_attribute * attr,char * buf)2607 static ssize_t upthreshold_show(struct device *dev,
2608 				struct device_attribute *attr,
2609 				char *buf)
2610 {
2611 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2612 	struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2613 
2614 	return sprintf(buf, "%d\n", data->upthreshold);
2615 }
2616 
upthreshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2617 static ssize_t upthreshold_store(struct device *dev,
2618 				 struct device_attribute *attr,
2619 				 const char *buf,
2620 				 size_t count)
2621 {
2622 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2623 	struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2624 	unsigned int value;
2625 
2626 	if (kstrtouint(buf, 10, &value))
2627 		return -EINVAL;
2628 
2629 	data->upthreshold = value;
2630 
2631 	return count;
2632 }
2633 
2634 static DEVICE_ATTR_RW(upthreshold);
2635 
downdifferential_show(struct device * dev,struct device_attribute * attr,char * buf)2636 static ssize_t downdifferential_show(struct device *dev,
2637 				     struct device_attribute *attr,
2638 				     char *buf)
2639 {
2640 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2641 	struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2642 
2643 	return sprintf(buf, "%d\n", data->downdifferential);
2644 }
2645 
downdifferential_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2646 static ssize_t downdifferential_store(struct device *dev,
2647 				      struct device_attribute *attr,
2648 				      const char *buf,
2649 				      size_t count)
2650 {
2651 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2652 	struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2653 	unsigned int value;
2654 
2655 	if (kstrtouint(buf, 10, &value))
2656 		return -EINVAL;
2657 
2658 	data->downdifferential = value;
2659 
2660 	return count;
2661 }
2662 
2663 static DEVICE_ATTR_RW(downdifferential);
2664 
get_nocp_req_rate(struct rockchip_dmcfreq * dmcfreq)2665 static unsigned long get_nocp_req_rate(struct rockchip_dmcfreq *dmcfreq)
2666 {
2667 	unsigned long target = 0, cpu_bw = 0;
2668 	int i;
2669 
2670 	if (!dmcfreq->cpu_bw_tbl || dmcfreq->nocp_cpu_id < 0)
2671 		goto out;
2672 
2673 	cpu_bw = dmcfreq->nocp_bw[dmcfreq->nocp_cpu_id];
2674 
2675 	for (i = 0; dmcfreq->cpu_bw_tbl[i].freq != CPUFREQ_TABLE_END; i++) {
2676 		if (cpu_bw >= dmcfreq->cpu_bw_tbl[i].min)
2677 			target = dmcfreq->cpu_bw_tbl[i].freq;
2678 	}
2679 
2680 out:
2681 	return target;
2682 }
2683 
devfreq_dmc_ondemand_func(struct devfreq * df,unsigned long * freq)2684 static int devfreq_dmc_ondemand_func(struct devfreq *df,
2685 				     unsigned long *freq)
2686 {
2687 	int err;
2688 	struct devfreq_dev_status *stat;
2689 	unsigned long long a, b;
2690 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(df->dev.parent);
2691 	struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2692 	unsigned int upthreshold = data->upthreshold;
2693 	unsigned int downdifferential = data->downdifferential;
2694 	unsigned long target_freq = 0, nocp_req_rate = 0;
2695 	u64 now;
2696 
2697 	if (dmcfreq->info.auto_freq_en && !dmcfreq->is_fixed) {
2698 		if (dmcfreq->status_rate)
2699 			target_freq = dmcfreq->status_rate;
2700 		else if (dmcfreq->auto_min_rate)
2701 			target_freq = dmcfreq->auto_min_rate;
2702 		nocp_req_rate = get_nocp_req_rate(dmcfreq);
2703 		target_freq = max3(target_freq, nocp_req_rate,
2704 				   dmcfreq->info.vop_req_rate);
2705 		now = ktime_to_us(ktime_get());
2706 		if (now < dmcfreq->touchboostpulse_endtime)
2707 			target_freq = max(target_freq, dmcfreq->boost_rate);
2708 	} else {
2709 		if (dmcfreq->status_rate)
2710 			target_freq = dmcfreq->status_rate;
2711 		else if (dmcfreq->normal_rate)
2712 			target_freq = dmcfreq->normal_rate;
2713 		if (target_freq)
2714 			*freq = target_freq;
2715 		if (dmcfreq->info.auto_freq_en && !devfreq_update_stats(df))
2716 			return 0;
2717 		goto reset_last_status;
2718 	}
2719 
2720 	if (!upthreshold || !downdifferential)
2721 		goto reset_last_status;
2722 
2723 	if (upthreshold > 100 ||
2724 	    upthreshold < downdifferential)
2725 		goto reset_last_status;
2726 
2727 	err = devfreq_update_stats(df);
2728 	if (err)
2729 		goto reset_last_status;
2730 
2731 	stat = &df->last_status;
2732 
2733 	/* Assume MAX if it is going to be divided by zero */
2734 	if (stat->total_time == 0) {
2735 		*freq = DEVFREQ_MAX_FREQ;
2736 		return 0;
2737 	}
2738 
2739 	/* Prevent overflow */
2740 	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
2741 		stat->busy_time >>= 7;
2742 		stat->total_time >>= 7;
2743 	}
2744 
2745 	/* Set MAX if it's busy enough */
2746 	if (stat->busy_time * 100 >
2747 	    stat->total_time * upthreshold) {
2748 		*freq = DEVFREQ_MAX_FREQ;
2749 		return 0;
2750 	}
2751 
2752 	/* Set MAX if we do not know the initial frequency */
2753 	if (stat->current_frequency == 0) {
2754 		*freq = DEVFREQ_MAX_FREQ;
2755 		return 0;
2756 	}
2757 
2758 	/* Keep the current frequency */
2759 	if (stat->busy_time * 100 >
2760 	    stat->total_time * (upthreshold - downdifferential)) {
2761 		*freq = max(target_freq, stat->current_frequency);
2762 		return 0;
2763 	}
2764 
2765 	/* Set the desired frequency based on the load */
2766 	a = stat->busy_time;
2767 	a *= stat->current_frequency;
2768 	b = div_u64(a, stat->total_time);
2769 	b *= 100;
2770 	b = div_u64(b, (upthreshold - downdifferential / 2));
2771 	*freq = max_t(unsigned long, target_freq, b);
2772 
2773 	return 0;
2774 
2775 reset_last_status:
2776 	reset_last_status(df);
2777 
2778 	return 0;
2779 }
2780 
devfreq_dmc_ondemand_handler(struct devfreq * devfreq,unsigned int event,void * data)2781 static int devfreq_dmc_ondemand_handler(struct devfreq *devfreq,
2782 					unsigned int event, void *data)
2783 {
2784 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(devfreq->dev.parent);
2785 
2786 	if (!dmcfreq->info.auto_freq_en)
2787 		return 0;
2788 
2789 	switch (event) {
2790 	case DEVFREQ_GOV_START:
2791 		devfreq_monitor_start(devfreq);
2792 		break;
2793 
2794 	case DEVFREQ_GOV_STOP:
2795 		devfreq_monitor_stop(devfreq);
2796 		break;
2797 
2798 	case DEVFREQ_GOV_UPDATE_INTERVAL:
2799 		devfreq_update_interval(devfreq, (unsigned int *)data);
2800 		break;
2801 
2802 	case DEVFREQ_GOV_SUSPEND:
2803 		devfreq_monitor_suspend(devfreq);
2804 		break;
2805 
2806 	case DEVFREQ_GOV_RESUME:
2807 		devfreq_monitor_resume(devfreq);
2808 		break;
2809 
2810 	default:
2811 		break;
2812 	}
2813 
2814 	return 0;
2815 }
2816 
2817 static struct devfreq_governor devfreq_dmc_ondemand = {
2818 	.name = "dmc_ondemand",
2819 	.get_target_freq = devfreq_dmc_ondemand_func,
2820 	.event_handler = devfreq_dmc_ondemand_handler,
2821 };
2822 
rockchip_dmcfreq_enable_event(struct rockchip_dmcfreq * dmcfreq)2823 static int rockchip_dmcfreq_enable_event(struct rockchip_dmcfreq *dmcfreq)
2824 {
2825 	int i, ret;
2826 
2827 	if (!dmcfreq->info.auto_freq_en)
2828 		return 0;
2829 
2830 	for (i = 0; i < dmcfreq->edev_count; i++) {
2831 		ret = devfreq_event_enable_edev(dmcfreq->edev[i]);
2832 		if (ret < 0) {
2833 			dev_err(dmcfreq->dev,
2834 				"failed to enable devfreq-event\n");
2835 			return ret;
2836 		}
2837 	}
2838 
2839 	return 0;
2840 }
2841 
rockchip_dmcfreq_disable_event(struct rockchip_dmcfreq * dmcfreq)2842 static int rockchip_dmcfreq_disable_event(struct rockchip_dmcfreq *dmcfreq)
2843 {
2844 	int i, ret;
2845 
2846 	if (!dmcfreq->info.auto_freq_en)
2847 		return 0;
2848 
2849 	for (i = 0; i < dmcfreq->edev_count; i++) {
2850 		ret = devfreq_event_disable_edev(dmcfreq->edev[i]);
2851 		if (ret < 0) {
2852 			dev_err(dmcfreq->dev,
2853 				"failed to disable devfreq-event\n");
2854 			return ret;
2855 		}
2856 	}
2857 
2858 	return 0;
2859 }
2860 
rockchip_get_edev_id(struct rockchip_dmcfreq * dmcfreq,const char * name)2861 static int rockchip_get_edev_id(struct rockchip_dmcfreq *dmcfreq,
2862 				const char *name)
2863 {
2864 	struct devfreq_event_dev *edev;
2865 	int i;
2866 
2867 	for (i = 0; i < dmcfreq->edev_count; i++) {
2868 		edev = dmcfreq->edev[i];
2869 		if (!strcmp(edev->desc->name, name))
2870 			return i;
2871 	}
2872 
2873 	return -EINVAL;
2874 }
2875 
rockchip_dmcfreq_get_event(struct rockchip_dmcfreq * dmcfreq)2876 static int rockchip_dmcfreq_get_event(struct rockchip_dmcfreq *dmcfreq)
2877 {
2878 	struct device *dev = dmcfreq->dev;
2879 	struct device_node *events_np, *np = dev->of_node;
2880 	int i, j, count, available_count = 0;
2881 
2882 	count = devfreq_event_get_edev_count(dev, "devfreq-events");
2883 	if (count < 0) {
2884 		dev_dbg(dev, "failed to get count of devfreq-event dev\n");
2885 		return 0;
2886 	}
2887 	for (i = 0; i < count; i++) {
2888 		events_np = of_parse_phandle(np, "devfreq-events", i);
2889 		if (!events_np)
2890 			continue;
2891 		if (of_device_is_available(events_np))
2892 			available_count++;
2893 		of_node_put(events_np);
2894 	}
2895 	if (!available_count) {
2896 		dev_dbg(dev, "failed to get available devfreq-event\n");
2897 		return 0;
2898 	}
2899 	dmcfreq->edev_count = available_count;
2900 	dmcfreq->edev = devm_kzalloc(dev,
2901 				     sizeof(*dmcfreq->edev) * available_count,
2902 				     GFP_KERNEL);
2903 	if (!dmcfreq->edev)
2904 		return -ENOMEM;
2905 
2906 	for (i = 0, j = 0; i < count; i++) {
2907 		events_np = of_parse_phandle(np, "devfreq-events", i);
2908 		if (!events_np)
2909 			continue;
2910 		if (of_device_is_available(events_np)) {
2911 			of_node_put(events_np);
2912 			if (j >= available_count) {
2913 				dev_err(dev, "invalid event conut\n");
2914 				return -EINVAL;
2915 			}
2916 			dmcfreq->edev[j] =
2917 				devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
2918 			if (IS_ERR(dmcfreq->edev[j]))
2919 				return -EPROBE_DEFER;
2920 			j++;
2921 		} else {
2922 			of_node_put(events_np);
2923 		}
2924 	}
2925 	dmcfreq->info.auto_freq_en = true;
2926 	dmcfreq->dfi_id = rockchip_get_edev_id(dmcfreq, "dfi");
2927 	dmcfreq->nocp_cpu_id = rockchip_get_edev_id(dmcfreq, "nocp-cpu");
2928 	dmcfreq->nocp_bw =
2929 		devm_kzalloc(dev, sizeof(*dmcfreq->nocp_bw) * available_count,
2930 			     GFP_KERNEL);
2931 	if (!dmcfreq->nocp_bw)
2932 		return -ENOMEM;
2933 
2934 	return 0;
2935 }
2936 
rockchip_dmcfreq_power_control(struct rockchip_dmcfreq * dmcfreq)2937 static int rockchip_dmcfreq_power_control(struct rockchip_dmcfreq *dmcfreq)
2938 {
2939 	struct device *dev = dmcfreq->dev;
2940 	struct device_node *np = dev->of_node;
2941 	struct opp_table *opp_table = NULL, *reg_opp_table = NULL;
2942 	const char * const reg_names[] = {"center", "mem"};
2943 	int ret = 0;
2944 
2945 	if (of_find_property(np, "mem-supply", NULL))
2946 		dmcfreq->regulator_count = 2;
2947 	else
2948 		dmcfreq->regulator_count = 1;
2949 	reg_opp_table = dev_pm_opp_set_regulators(dev, reg_names,
2950 						  dmcfreq->regulator_count);
2951 	if (IS_ERR(reg_opp_table)) {
2952 		dev_err(dev, "failed to set regulators\n");
2953 		return PTR_ERR(reg_opp_table);
2954 	}
2955 	opp_table = dev_pm_opp_register_set_opp_helper(dev, rockchip_dmcfreq_opp_helper);
2956 	if (IS_ERR(opp_table)) {
2957 		dev_err(dev, "failed to set opp helper\n");
2958 		ret = PTR_ERR(opp_table);
2959 		goto reg_opp_table;
2960 	}
2961 
2962 	dmcfreq->vdd_center = devm_regulator_get_optional(dev, "center");
2963 	if (IS_ERR(dmcfreq->vdd_center)) {
2964 		dev_err(dev, "Cannot get the regulator \"center\"\n");
2965 		ret = PTR_ERR(dmcfreq->vdd_center);
2966 		goto opp_table;
2967 	}
2968 	if (dmcfreq->regulator_count > 1) {
2969 		dmcfreq->mem_reg = devm_regulator_get_optional(dev, "mem");
2970 		if (IS_ERR(dmcfreq->mem_reg)) {
2971 			dev_err(dev, "Cannot get the regulator \"mem\"\n");
2972 			ret = PTR_ERR(dmcfreq->mem_reg);
2973 			goto opp_table;
2974 		}
2975 	}
2976 
2977 	dmcfreq->dmc_clk = devm_clk_get(dev, "dmc_clk");
2978 	if (IS_ERR(dmcfreq->dmc_clk)) {
2979 		dev_err(dev, "Cannot get the clk dmc_clk. If using SCMI, trusted firmware need update to V1.01 and above.\n");
2980 		ret = PTR_ERR(dmcfreq->dmc_clk);
2981 		goto opp_table;
2982 	}
2983 	dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
2984 
2985 	return 0;
2986 
2987 opp_table:
2988 	if (opp_table)
2989 		dev_pm_opp_unregister_set_opp_helper(opp_table);
2990 reg_opp_table:
2991 	if (reg_opp_table)
2992 		dev_pm_opp_put_regulators(reg_opp_table);
2993 
2994 	return ret;
2995 }
2996 
rockchip_dmcfreq_dmc_init(struct platform_device * pdev,struct rockchip_dmcfreq * dmcfreq)2997 static int rockchip_dmcfreq_dmc_init(struct platform_device *pdev,
2998 				     struct rockchip_dmcfreq *dmcfreq)
2999 {
3000 	const struct of_device_id *match;
3001 	int (*init)(struct platform_device *pdev,
3002 		    struct rockchip_dmcfreq *data);
3003 	int ret;
3004 
3005 	match = of_match_node(rockchip_dmcfreq_of_match, pdev->dev.of_node);
3006 	if (match) {
3007 		init = match->data;
3008 		if (init) {
3009 			ret = init(pdev, dmcfreq);
3010 			if (ret)
3011 				return ret;
3012 		}
3013 	}
3014 
3015 	return 0;
3016 }
3017 
rockchip_dmcfreq_parse_dt(struct rockchip_dmcfreq * dmcfreq)3018 static void rockchip_dmcfreq_parse_dt(struct rockchip_dmcfreq *dmcfreq)
3019 {
3020 	struct device *dev = dmcfreq->dev;
3021 	struct device_node *np = dev->of_node;
3022 
3023 	if (!rockchip_get_system_status_rate(np, "system-status-freq", dmcfreq))
3024 		dmcfreq->system_status_en = true;
3025 	else if (!rockchip_get_system_status_level(np, "system-status-level", dmcfreq))
3026 		dmcfreq->system_status_en = true;
3027 
3028 	of_property_read_u32(np, "min-cpu-freq", &dmcfreq->min_cpu_freq);
3029 
3030 	of_property_read_u32(np, "upthreshold",
3031 			     &dmcfreq->ondemand_data.upthreshold);
3032 	of_property_read_u32(np, "downdifferential",
3033 			     &dmcfreq->ondemand_data.downdifferential);
3034 	if (dmcfreq->info.auto_freq_en)
3035 		of_property_read_u32(np, "auto-freq-en",
3036 				     &dmcfreq->info.auto_freq_en);
3037 	if (!dmcfreq->auto_min_rate) {
3038 		of_property_read_u32(np, "auto-min-freq",
3039 				     (u32 *)&dmcfreq->auto_min_rate);
3040 		dmcfreq->auto_min_rate *= 1000;
3041 	}
3042 
3043 	if (rockchip_get_freq_map_talbe(np, "cpu-bw-dmc-freq",
3044 					&dmcfreq->cpu_bw_tbl))
3045 		dev_dbg(dev, "failed to get cpu bandwidth to dmc rate\n");
3046 	if (rockchip_get_freq_map_talbe(np, "vop-frame-bw-dmc-freq",
3047 					&dmcfreq->info.vop_frame_bw_tbl))
3048 		dev_dbg(dev, "failed to get vop frame bandwidth to dmc rate\n");
3049 	if (rockchip_get_freq_map_talbe(np, "vop-bw-dmc-freq",
3050 					&dmcfreq->info.vop_bw_tbl))
3051 		dev_err(dev, "failed to get vop bandwidth to dmc rate\n");
3052 	if (rockchip_get_rl_map_talbe(np, "vop-pn-msch-readlatency",
3053 				      &dmcfreq->info.vop_pn_rl_tbl))
3054 		dev_err(dev, "failed to get vop pn to msch rl\n");
3055 	if (dmcfreq->video_4k_rate)
3056 		dmcfreq->info.vop_4k_rate = dmcfreq->video_4k_rate;
3057 	else if (dmcfreq->video_4k_10b_rate)
3058 		dmcfreq->info.vop_4k_rate = dmcfreq->video_4k_10b_rate;
3059 
3060 	of_property_read_u32(np, "touchboost_duration",
3061 			     (u32 *)&dmcfreq->touchboostpulse_duration_val);
3062 	if (dmcfreq->touchboostpulse_duration_val)
3063 		dmcfreq->touchboostpulse_duration_val *= USEC_PER_MSEC;
3064 	else
3065 		dmcfreq->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
3066 }
3067 
rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq * dmcfreq)3068 static int rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq *dmcfreq)
3069 {
3070 	struct devfreq_dev_profile *devp = &rockchip_devfreq_dmc_profile;
3071 	struct device *dev = dmcfreq->dev;
3072 	struct dev_pm_opp *opp;
3073 	struct devfreq *devfreq;
3074 	unsigned long opp_rate = dmcfreq->rate;
3075 
3076 	opp = devfreq_recommended_opp(dev, &opp_rate, 0);
3077 	if (IS_ERR(opp)) {
3078 		dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
3079 		return PTR_ERR(opp);
3080 	}
3081 	dev_pm_opp_put(opp);
3082 
3083 	devp->initial_freq = dmcfreq->rate;
3084 	devfreq = devm_devfreq_add_device(dev, devp, "dmc_ondemand",
3085 					  &dmcfreq->ondemand_data);
3086 	if (IS_ERR(devfreq)) {
3087 		dev_err(dev, "failed to add devfreq\n");
3088 		return PTR_ERR(devfreq);
3089 	}
3090 
3091 	devm_devfreq_register_opp_notifier(dev, devfreq);
3092 
3093 	devfreq->last_status.current_frequency = opp_rate;
3094 
3095 	reset_last_status(devfreq);
3096 
3097 	dmcfreq->info.devfreq = devfreq;
3098 
3099 	return 0;
3100 }
3101 
rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq * dmcfreq)3102 static void rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq *dmcfreq)
3103 {
3104 	int ret;
3105 
3106 	if (dmcfreq->system_status_en || dmcfreq->info.auto_freq_en) {
3107 		if (vop_register_dmc())
3108 			dev_err(dmcfreq->dev, "fail to register notify to vop.\n");
3109 
3110 		dmcfreq->status_nb.notifier_call =
3111 			rockchip_dmcfreq_system_status_notifier;
3112 		ret = rockchip_register_system_status_notifier(&dmcfreq->status_nb);
3113 		if (ret)
3114 			dev_err(dmcfreq->dev, "failed to register system_status nb\n");
3115 	}
3116 
3117 	dmcfreq->panic_nb.notifier_call = rockchip_dmcfreq_panic_notifier;
3118 	ret = atomic_notifier_chain_register(&panic_notifier_list,
3119 					     &dmcfreq->panic_nb);
3120 	if (ret)
3121 		dev_err(dmcfreq->dev, "failed to register panic nb\n");
3122 
3123 	dmc_mdevp.data = dmcfreq->info.devfreq;
3124 	dmcfreq->mdev_info = rockchip_system_monitor_register(dmcfreq->dev,
3125 							      &dmc_mdevp);
3126 	if (IS_ERR(dmcfreq->mdev_info)) {
3127 		dev_dbg(dmcfreq->dev, "without without system monitor\n");
3128 		dmcfreq->mdev_info = NULL;
3129 	}
3130 }
3131 
rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq * dmcfreq)3132 static void rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq *dmcfreq)
3133 {
3134 	struct devfreq *devfreq = dmcfreq->info.devfreq;
3135 
3136 	if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_upthreshold.attr))
3137 		dev_err(dmcfreq->dev,
3138 			"failed to register upthreshold sysfs file\n");
3139 	if (sysfs_create_file(&devfreq->dev.kobj,
3140 			      &dev_attr_downdifferential.attr))
3141 		dev_err(dmcfreq->dev,
3142 			"failed to register downdifferential sysfs file\n");
3143 
3144 	if (!rockchip_add_system_status_interface(&devfreq->dev))
3145 		return;
3146 	if (sysfs_create_file(&devfreq->dev.kobj,
3147 			      &dev_attr_system_status.attr))
3148 		dev_err(dmcfreq->dev,
3149 			"failed to register system_status sysfs file\n");
3150 }
3151 
rockchip_dmcfreq_boost_work(struct work_struct * work)3152 static void rockchip_dmcfreq_boost_work(struct work_struct *work)
3153 {
3154 	struct rockchip_dmcfreq *dmcfreq = boost_to_dmcfreq(work);
3155 
3156 	rockchip_dmcfreq_update_target(dmcfreq);
3157 }
3158 
rockchip_dmcfreq_input_event(struct input_handle * handle,unsigned int type,unsigned int code,int value)3159 static void rockchip_dmcfreq_input_event(struct input_handle *handle,
3160 					 unsigned int type,
3161 					 unsigned int code,
3162 					 int value)
3163 {
3164 	struct rockchip_dmcfreq *dmcfreq = handle->private;
3165 	u64 now, endtime;
3166 
3167 	if (type != EV_ABS && type != EV_KEY)
3168 		return;
3169 
3170 	now = ktime_to_us(ktime_get());
3171 	endtime = now + dmcfreq->touchboostpulse_duration_val;
3172 	if (endtime < (dmcfreq->touchboostpulse_endtime + 10 * USEC_PER_MSEC))
3173 		return;
3174 	dmcfreq->touchboostpulse_endtime = endtime;
3175 
3176 	queue_work(system_freezable_wq, &dmcfreq->boost_work);
3177 }
3178 
rockchip_dmcfreq_input_connect(struct input_handler * handler,struct input_dev * dev,const struct input_device_id * id)3179 static int rockchip_dmcfreq_input_connect(struct input_handler *handler,
3180 					  struct input_dev *dev,
3181 					  const struct input_device_id *id)
3182 {
3183 	int error;
3184 	struct input_handle *handle;
3185 	struct rockchip_dmcfreq *dmcfreq = input_hd_to_dmcfreq(handler);
3186 
3187 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
3188 	if (!handle)
3189 		return -ENOMEM;
3190 
3191 	handle->dev = dev;
3192 	handle->handler = handler;
3193 	handle->name = "dmcfreq";
3194 	handle->private = dmcfreq;
3195 
3196 	error = input_register_handle(handle);
3197 	if (error)
3198 		goto err2;
3199 
3200 	error = input_open_device(handle);
3201 	if (error)
3202 		goto err1;
3203 
3204 	return 0;
3205 err1:
3206 	input_unregister_handle(handle);
3207 err2:
3208 	kfree(handle);
3209 	return error;
3210 }
3211 
rockchip_dmcfreq_input_disconnect(struct input_handle * handle)3212 static void rockchip_dmcfreq_input_disconnect(struct input_handle *handle)
3213 {
3214 	input_close_device(handle);
3215 	input_unregister_handle(handle);
3216 	kfree(handle);
3217 }
3218 
3219 static const struct input_device_id rockchip_dmcfreq_input_ids[] = {
3220 	{
3221 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
3222 			INPUT_DEVICE_ID_MATCH_ABSBIT,
3223 		.evbit = { BIT_MASK(EV_ABS) },
3224 		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
3225 			BIT_MASK(ABS_MT_POSITION_X) |
3226 			BIT_MASK(ABS_MT_POSITION_Y) },
3227 	},
3228 	{
3229 		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
3230 			INPUT_DEVICE_ID_MATCH_ABSBIT,
3231 		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
3232 		.absbit = { [BIT_WORD(ABS_X)] =
3233 			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
3234 	},
3235 	{
3236 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
3237 		.evbit = { BIT_MASK(EV_KEY) },
3238 	},
3239 	{ },
3240 };
3241 
rockchip_dmcfreq_boost_init(struct rockchip_dmcfreq * dmcfreq)3242 static void rockchip_dmcfreq_boost_init(struct rockchip_dmcfreq *dmcfreq)
3243 {
3244 	if (!dmcfreq->boost_rate)
3245 		return;
3246 	INIT_WORK(&dmcfreq->boost_work, rockchip_dmcfreq_boost_work);
3247 	dmcfreq->input_handler.event = rockchip_dmcfreq_input_event;
3248 	dmcfreq->input_handler.connect = rockchip_dmcfreq_input_connect;
3249 	dmcfreq->input_handler.disconnect = rockchip_dmcfreq_input_disconnect;
3250 	dmcfreq->input_handler.name = "dmcfreq";
3251 	dmcfreq->input_handler.id_table = rockchip_dmcfreq_input_ids;
3252 	if (input_register_handler(&dmcfreq->input_handler))
3253 		dev_err(dmcfreq->dev, "failed to register input handler\n");
3254 }
3255 
model_static_power(struct devfreq * devfreq,unsigned long voltage)3256 static unsigned long model_static_power(struct devfreq *devfreq,
3257 					unsigned long voltage)
3258 {
3259 	struct device *dev = devfreq->dev.parent;
3260 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
3261 
3262 	int temperature;
3263 	unsigned long temp;
3264 	unsigned long temp_squared, temp_cubed, temp_scaling_factor;
3265 	const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
3266 
3267 	if (!IS_ERR_OR_NULL(dmcfreq->ddr_tz) && dmcfreq->ddr_tz->ops->get_temp) {
3268 		int ret;
3269 
3270 		ret =
3271 		    dmcfreq->ddr_tz->ops->get_temp(dmcfreq->ddr_tz,
3272 						   &temperature);
3273 		if (ret) {
3274 			dev_warn_ratelimited(dev,
3275 					     "failed to read temp for ddr thermal zone: %d\n",
3276 					     ret);
3277 			temperature = FALLBACK_STATIC_TEMPERATURE;
3278 		}
3279 	} else {
3280 		temperature = FALLBACK_STATIC_TEMPERATURE;
3281 	}
3282 
3283 	/*
3284 	 * Calculate the temperature scaling factor. To be applied to the
3285 	 * voltage scaled power.
3286 	 */
3287 	temp = temperature / 1000;
3288 	temp_squared = temp * temp;
3289 	temp_cubed = temp_squared * temp;
3290 	temp_scaling_factor = (dmcfreq->ts[3] * temp_cubed)
3291 	    + (dmcfreq->ts[2] * temp_squared)
3292 	    + (dmcfreq->ts[1] * temp)
3293 	    + dmcfreq->ts[0];
3294 
3295 	return (((dmcfreq->static_coefficient * voltage_cubed) >> 20)
3296 		* temp_scaling_factor) / 1000000;
3297 }
3298 
3299 static struct devfreq_cooling_power ddr_cooling_power_data = {
3300 	.get_static_power = model_static_power,
3301 	.dyn_power_coeff = 120,
3302 };
3303 
ddr_power_model_simple_init(struct rockchip_dmcfreq * dmcfreq)3304 static int ddr_power_model_simple_init(struct rockchip_dmcfreq *dmcfreq)
3305 {
3306 	struct device_node *power_model_node;
3307 	const char *tz_name;
3308 	u32 temp;
3309 
3310 	power_model_node = of_get_child_by_name(dmcfreq->dev->of_node,
3311 						"ddr_power_model");
3312 	if (!power_model_node) {
3313 		dev_err(dmcfreq->dev, "could not find power_model node\n");
3314 		return -ENODEV;
3315 	}
3316 
3317 	if (of_property_read_string(power_model_node, "thermal-zone", &tz_name)) {
3318 		dev_err(dmcfreq->dev, "ts in power_model not available\n");
3319 		return -EINVAL;
3320 	}
3321 
3322 	dmcfreq->ddr_tz = thermal_zone_get_zone_by_name(tz_name);
3323 	if (IS_ERR(dmcfreq->ddr_tz)) {
3324 		pr_warn_ratelimited
3325 		    ("Error getting ddr thermal zone (%ld), not yet ready?\n",
3326 		     PTR_ERR(dmcfreq->ddr_tz));
3327 		dmcfreq->ddr_tz = NULL;
3328 
3329 		return -EPROBE_DEFER;
3330 	}
3331 
3332 	if (of_property_read_u32(power_model_node, "static-power-coefficient",
3333 				 &dmcfreq->static_coefficient)) {
3334 		dev_err(dmcfreq->dev,
3335 			"static-power-coefficient not available\n");
3336 		return -EINVAL;
3337 	}
3338 	if (of_property_read_u32(power_model_node, "dynamic-power-coefficient",
3339 				 &temp)) {
3340 		dev_err(dmcfreq->dev,
3341 			"dynamic-power-coefficient not available\n");
3342 		return -EINVAL;
3343 	}
3344 	ddr_cooling_power_data.dyn_power_coeff = (unsigned long)temp;
3345 
3346 	if (of_property_read_u32_array
3347 	    (power_model_node, "ts", (u32 *)dmcfreq->ts, 4)) {
3348 		dev_err(dmcfreq->dev, "ts in power_model not available\n");
3349 		return -EINVAL;
3350 	}
3351 
3352 	return 0;
3353 }
3354 
3355 static void
rockchip_dmcfreq_register_cooling_device(struct rockchip_dmcfreq * dmcfreq)3356 rockchip_dmcfreq_register_cooling_device(struct rockchip_dmcfreq *dmcfreq)
3357 {
3358 	int ret;
3359 
3360 	ret = ddr_power_model_simple_init(dmcfreq);
3361 	if (ret)
3362 		return;
3363 	dmcfreq->devfreq_cooling =
3364 		of_devfreq_cooling_register_power(dmcfreq->dev->of_node,
3365 						  dmcfreq->info.devfreq,
3366 						  &ddr_cooling_power_data);
3367 	if (IS_ERR(dmcfreq->devfreq_cooling)) {
3368 		ret = PTR_ERR(dmcfreq->devfreq_cooling);
3369 		dev_err(dmcfreq->dev,
3370 			"Failed to register cooling device (%d)\n",
3371 			ret);
3372 	}
3373 }
3374 
rockchip_dmcfreq_probe(struct platform_device * pdev)3375 static int rockchip_dmcfreq_probe(struct platform_device *pdev)
3376 {
3377 	struct device *dev = &pdev->dev;
3378 	struct rockchip_dmcfreq *data;
3379 	int ret;
3380 
3381 	data = devm_kzalloc(dev, sizeof(struct rockchip_dmcfreq), GFP_KERNEL);
3382 	if (!data)
3383 		return -ENOMEM;
3384 
3385 	data->dev = dev;
3386 	data->info.dev = dev;
3387 	mutex_init(&data->lock);
3388 	INIT_LIST_HEAD(&data->video_info_list);
3389 
3390 	ret = rockchip_dmcfreq_get_event(data);
3391 	if (ret)
3392 		return ret;
3393 
3394 	ret = rockchip_dmcfreq_power_control(data);
3395 	if (ret)
3396 		return ret;
3397 
3398 	ret = rockchip_init_opp_table(dev, NULL, "ddr_leakage", "center");
3399 	if (ret)
3400 		return ret;
3401 
3402 	ret = rockchip_dmcfreq_dmc_init(pdev, data);
3403 	if (ret)
3404 		return ret;
3405 
3406 	rockchip_dmcfreq_parse_dt(data);
3407 
3408 	platform_set_drvdata(pdev, data);
3409 
3410 	if (!data->system_status_en && !data->info.auto_freq_en) {
3411 		dev_info(dev, "don't add devfreq feature\n");
3412 		rockchip_dmcfreq_register_notifier(data);
3413 		return 0;
3414 	}
3415 
3416 	cpu_latency_qos_add_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
3417 
3418 	ret = devfreq_add_governor(&devfreq_dmc_ondemand);
3419 	if (ret)
3420 		return ret;
3421 	ret = rockchip_dmcfreq_enable_event(data);
3422 	if (ret)
3423 		return ret;
3424 	ret = rockchip_dmcfreq_add_devfreq(data);
3425 	if (ret) {
3426 		rockchip_dmcfreq_disable_event(data);
3427 		return ret;
3428 	}
3429 
3430 	rockchip_dmcfreq_register_notifier(data);
3431 	rockchip_dmcfreq_add_interface(data);
3432 	rockchip_dmcfreq_boost_init(data);
3433 	rockchip_dmcfreq_vop_bandwidth_init(&data->info);
3434 	rockchip_dmcfreq_register_cooling_device(data);
3435 
3436 	rockchip_set_system_status(SYS_STATUS_NORMAL);
3437 
3438 	return 0;
3439 }
3440 
rockchip_dmcfreq_suspend(struct device * dev)3441 static __maybe_unused int rockchip_dmcfreq_suspend(struct device *dev)
3442 {
3443 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
3444 	int ret = 0;
3445 
3446 	if (!dmcfreq)
3447 		return 0;
3448 
3449 	ret = rockchip_dmcfreq_disable_event(dmcfreq);
3450 	if (ret)
3451 		return ret;
3452 
3453 	if (dmcfreq->info.devfreq) {
3454 		ret = devfreq_suspend_device(dmcfreq->info.devfreq);
3455 		if (ret < 0) {
3456 			dev_err(dev, "failed to suspend the devfreq devices\n");
3457 			return ret;
3458 		}
3459 	}
3460 
3461 	/* set voltage to sleep_volt if need */
3462 	if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
3463 		ret = regulator_set_voltage(dmcfreq->vdd_center,
3464 					    dmcfreq->sleep_volt, INT_MAX);
3465 		if (ret) {
3466 			dev_err(dev, "Cannot set vdd voltage %lu uV\n",
3467 				dmcfreq->sleep_volt);
3468 			return ret;
3469 		}
3470 	}
3471 	if (dmcfreq->sleep_mem_volt &&
3472 	    dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
3473 		ret = regulator_set_voltage(dmcfreq->mem_reg,
3474 					    dmcfreq->sleep_mem_volt, INT_MAX);
3475 		if (ret) {
3476 			dev_err(dev, "Cannot set mem voltage %lu uV\n",
3477 				dmcfreq->sleep_mem_volt);
3478 			return ret;
3479 		}
3480 	}
3481 
3482 	return 0;
3483 }
3484 
rockchip_dmcfreq_resume(struct device * dev)3485 static __maybe_unused int rockchip_dmcfreq_resume(struct device *dev)
3486 {
3487 	struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
3488 	int ret = 0;
3489 
3490 	if (!dmcfreq)
3491 		return 0;
3492 
3493 	/* restore voltage if it is sleep_volt */
3494 	if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
3495 		ret = regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
3496 					    INT_MAX);
3497 		if (ret) {
3498 			dev_err(dev, "Cannot set vdd voltage %lu uV\n",
3499 				dmcfreq->volt);
3500 			return ret;
3501 		}
3502 	}
3503 	if (dmcfreq->sleep_mem_volt &&
3504 	    dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
3505 		ret = regulator_set_voltage(dmcfreq->mem_reg, dmcfreq->mem_volt,
3506 					    INT_MAX);
3507 		if (ret) {
3508 			dev_err(dev, "Cannot set mem voltage %lu uV\n",
3509 				dmcfreq->mem_volt);
3510 			return ret;
3511 		}
3512 	}
3513 
3514 	ret = rockchip_dmcfreq_enable_event(dmcfreq);
3515 	if (ret)
3516 		return ret;
3517 
3518 	if (dmcfreq->info.devfreq) {
3519 		ret = devfreq_resume_device(dmcfreq->info.devfreq);
3520 		if (ret < 0) {
3521 			dev_err(dev, "failed to resume the devfreq devices\n");
3522 			return ret;
3523 		}
3524 	}
3525 
3526 	return ret;
3527 }
3528 
3529 static SIMPLE_DEV_PM_OPS(rockchip_dmcfreq_pm, rockchip_dmcfreq_suspend,
3530 			 rockchip_dmcfreq_resume);
3531 static struct platform_driver rockchip_dmcfreq_driver = {
3532 	.probe	= rockchip_dmcfreq_probe,
3533 	.driver = {
3534 		.name	= "rockchip-dmc",
3535 		.pm	= &rockchip_dmcfreq_pm,
3536 		.of_match_table = rockchip_dmcfreq_of_match,
3537 	},
3538 };
3539 module_platform_driver(rockchip_dmcfreq_driver);
3540 
3541 MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
3542 MODULE_DESCRIPTION("rockchip dmcfreq driver with devfreq framework");
3543 MODULE_LICENSE("GPL v2");
3544