1 /*
2 * Rockchip CPUFreq Driver
3 *
4 * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/clk.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpuidle.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/nvmem-consumer.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_opp.h>
30 #include <linux/pm_qos.h>
31 #include <linux/slab.h>
32 #include <linux/regmap.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/rockchip/cpu.h>
35 #include <soc/rockchip/rockchip_opp_select.h>
36 #include <soc/rockchip/rockchip_system_monitor.h>
37
38 #include "cpufreq-dt.h"
39 #include "rockchip-cpufreq.h"
40
41 struct cluster_info {
42 struct list_head list_head;
43 struct monitor_dev_info *mdev_info;
44 struct rockchip_opp_info opp_info;
45 struct freq_qos_request dsu_qos_req;
46 cpumask_t cpus;
47 unsigned int idle_threshold_freq;
48 int scale;
49 bool is_idle_disabled;
50 bool is_opp_shared_dsu;
51 unsigned int regulator_count;
52 unsigned long rate;
53 unsigned long volt, mem_volt;
54 };
55 static LIST_HEAD(cluster_info_list);
56
px30_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)57 static int px30_get_soc_info(struct device *dev, struct device_node *np,
58 int *bin, int *process)
59 {
60 int ret = 0;
61 u8 value = 0;
62
63 if (!bin)
64 return 0;
65
66 if (of_property_match_string(np, "nvmem-cell-names",
67 "performance") >= 0) {
68 ret = rockchip_nvmem_cell_read_u8(np, "performance", &value);
69 if (ret) {
70 dev_err(dev, "Failed to get soc performance value\n");
71 return ret;
72 }
73 *bin = value;
74 }
75 if (*bin >= 0)
76 dev_info(dev, "bin=%d\n", *bin);
77
78 return ret;
79 }
80
rk3288_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)81 static int rk3288_get_soc_info(struct device *dev, struct device_node *np,
82 int *bin, int *process)
83 {
84 int ret = 0;
85 u8 value = 0;
86 char *name;
87
88 if (!bin)
89 goto next;
90 if (of_property_match_string(np, "nvmem-cell-names", "special") >= 0) {
91 ret = rockchip_nvmem_cell_read_u8(np, "special", &value);
92 if (ret) {
93 dev_err(dev, "Failed to get soc special value\n");
94 goto out;
95 }
96 if (value == 0xc)
97 *bin = 0;
98 else
99 *bin = 1;
100 }
101
102 if (soc_is_rk3288w())
103 name = "performance-w";
104 else
105 name = "performance";
106
107 if (of_property_match_string(np, "nvmem-cell-names", name) >= 0) {
108 ret = rockchip_nvmem_cell_read_u8(np, name, &value);
109 if (ret) {
110 dev_err(dev, "Failed to get soc performance value\n");
111 goto out;
112 }
113 if (value & 0x2)
114 *bin = 3;
115 else if (value & 0x01)
116 *bin = 2;
117 }
118 if (*bin >= 0)
119 dev_info(dev, "bin=%d\n", *bin);
120
121 next:
122 if (!process)
123 goto out;
124 if (of_property_match_string(np, "nvmem-cell-names",
125 "process") >= 0) {
126 ret = rockchip_nvmem_cell_read_u8(np, "process", &value);
127 if (ret) {
128 dev_err(dev, "Failed to get soc process version\n");
129 goto out;
130 }
131 if (soc_is_rk3288() && (value == 0 || value == 1))
132 *process = 0;
133 }
134 if (*process >= 0)
135 dev_info(dev, "process=%d\n", *process);
136
137 out:
138 return ret;
139 }
140
rk3399_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)141 static int rk3399_get_soc_info(struct device *dev, struct device_node *np,
142 int *bin, int *process)
143 {
144 int ret = 0;
145 u8 value = 0;
146
147 if (!bin)
148 return 0;
149
150 if (of_property_match_string(np, "nvmem-cell-names",
151 "specification_serial_number") >= 0) {
152 ret = rockchip_nvmem_cell_read_u8(np,
153 "specification_serial_number",
154 &value);
155 if (ret) {
156 dev_err(dev,
157 "Failed to get specification_serial_number\n");
158 goto out;
159 }
160
161 if (value == 0xb) {
162 *bin = 0;
163 } else if (value == 0x1) {
164 if (of_property_match_string(np, "nvmem-cell-names",
165 "customer_demand") >= 0) {
166 ret = rockchip_nvmem_cell_read_u8(np,
167 "customer_demand",
168 &value);
169 if (ret) {
170 dev_err(dev, "Failed to get customer_demand\n");
171 goto out;
172 }
173 if (value == 0x0)
174 *bin = 0;
175 else
176 *bin = 1;
177 }
178 } else if (value == 0x10) {
179 *bin = 1;
180 }
181 }
182
183 out:
184 if (*bin >= 0)
185 dev_info(dev, "bin=%d\n", *bin);
186
187 return ret;
188 }
189
rk3588_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)190 static int rk3588_get_soc_info(struct device *dev, struct device_node *np,
191 int *bin, int *process)
192 {
193 int ret = 0;
194 u8 value = 0;
195
196 if (!bin)
197 return 0;
198
199 if (of_property_match_string(np, "nvmem-cell-names",
200 "specification_serial_number") >= 0) {
201 ret = rockchip_nvmem_cell_read_u8(np,
202 "specification_serial_number",
203 &value);
204 if (ret) {
205 dev_err(dev,
206 "Failed to get specification_serial_number\n");
207 return ret;
208 }
209 /* RK3588M */
210 if (value == 0xd)
211 *bin = 1;
212 /* RK3588J */
213 else if (value == 0xa)
214 *bin = 2;
215 }
216 if (*bin < 0)
217 *bin = 0;
218 dev_info(dev, "bin=%d\n", *bin);
219
220 return ret;
221 }
222
rk3588_change_length(struct device * dev,struct device_node * np,int bin,int process,int volt_sel)223 static int rk3588_change_length(struct device *dev, struct device_node *np,
224 int bin, int process, int volt_sel)
225 {
226 struct clk *clk;
227 unsigned long old_rate;
228 unsigned int low_len_sel;
229 u32 opp_flag = 0;
230 int ret = 0;
231
232 clk = clk_get(dev, NULL);
233 if (IS_ERR(clk)) {
234 dev_warn(dev, "failed to get cpu clk\n");
235 return PTR_ERR(clk);
236 }
237
238 /* RK3588 low speed grade should change to low length */
239 if (of_property_read_u32(np, "rockchip,pvtm-low-len-sel",
240 &low_len_sel))
241 goto out;
242 if (volt_sel > low_len_sel)
243 goto out;
244 opp_flag = OPP_LENGTH_LOW;
245
246 old_rate = clk_get_rate(clk);
247 ret = clk_set_rate(clk, old_rate | opp_flag);
248 if (ret) {
249 dev_err(dev, "failed to change length\n");
250 goto out;
251 }
252 clk_set_rate(clk, old_rate);
253 out:
254 clk_put(clk);
255
256 return ret;
257 }
258
rk3588_set_supported_hw(struct device * dev,struct device_node * np,int bin,int process,int volt_sel)259 static int rk3588_set_supported_hw(struct device *dev, struct device_node *np,
260 int bin, int process, int volt_sel)
261 {
262 struct opp_table *opp_table;
263 u32 supported_hw[2];
264
265 if (!of_property_read_bool(np, "rockchip,supported-hw"))
266 return 0;
267
268 /* SoC Version */
269 supported_hw[0] = BIT(bin);
270 /* Speed Grade */
271 supported_hw[1] = BIT(volt_sel);
272 opp_table = dev_pm_opp_set_supported_hw(dev, supported_hw, 2);
273 if (IS_ERR(opp_table)) {
274 dev_err(dev, "failed to set supported opp\n");
275 return PTR_ERR(opp_table);
276 }
277
278 return 0;
279 }
280
rk3588_set_soc_info(struct device * dev,struct device_node * np,int bin,int process,int volt_sel)281 static int rk3588_set_soc_info(struct device *dev, struct device_node *np,
282 int bin, int process, int volt_sel)
283 {
284 if (volt_sel < 0)
285 return 0;
286 if (bin < 0)
287 bin = 0;
288
289 rk3588_change_length(dev, np, bin, process, volt_sel);
290 rk3588_set_supported_hw(dev, np, bin, process, volt_sel);
291
292 return 0;
293 }
294
rk3588_cpu_set_read_margin(struct device * dev,struct rockchip_opp_info * opp_info,u32 rm)295 static int rk3588_cpu_set_read_margin(struct device *dev,
296 struct rockchip_opp_info *opp_info,
297 u32 rm)
298 {
299 if (!opp_info->volt_rm_tbl)
300 return 0;
301 if (rm == opp_info->current_rm || rm == UINT_MAX)
302 return 0;
303
304 dev_dbg(dev, "set rm to %d\n", rm);
305 if (opp_info->grf) {
306 regmap_write(opp_info->grf, 0x20, 0x001c0000 | (rm << 2));
307 regmap_write(opp_info->grf, 0x28, 0x003c0000 | (rm << 2));
308 regmap_write(opp_info->grf, 0x2c, 0x003c0000 | (rm << 2));
309 regmap_write(opp_info->grf, 0x30, 0x00200020);
310 udelay(1);
311 regmap_write(opp_info->grf, 0x30, 0x00200000);
312 }
313 if (opp_info->dsu_grf) {
314 regmap_write(opp_info->dsu_grf, 0x20, 0x001c0000 | (rm << 2));
315 regmap_write(opp_info->dsu_grf, 0x28, 0x003c0000 | (rm << 2));
316 regmap_write(opp_info->dsu_grf, 0x2c, 0x003c0000 | (rm << 2));
317 regmap_write(opp_info->dsu_grf, 0x30, 0x001c0000 | (rm << 2));
318 regmap_write(opp_info->dsu_grf, 0x38, 0x001c0000 | (rm << 2));
319 regmap_write(opp_info->dsu_grf, 0x18, 0x40004000);
320 udelay(1);
321 regmap_write(opp_info->dsu_grf, 0x18, 0x40000000);
322 }
323
324 opp_info->current_rm = rm;
325
326 return 0;
327 }
328
rv1126_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)329 static int rv1126_get_soc_info(struct device *dev, struct device_node *np,
330 int *bin, int *process)
331 {
332 int ret = 0;
333 u8 value = 0;
334
335 if (of_property_match_string(np, "nvmem-cell-names", "performance") >= 0) {
336 ret = rockchip_nvmem_cell_read_u8(np, "performance", &value);
337 if (ret) {
338 dev_err(dev, "Failed to get soc performance value\n");
339 return ret;
340 }
341 if (value == 0x1)
342 *bin = 1;
343 else
344 *bin = 0;
345 }
346 if (*bin >= 0)
347 dev_info(dev, "bin=%d\n", *bin);
348
349 return ret;
350 }
351
352 static const struct rockchip_opp_data px30_cpu_opp_data = {
353 .get_soc_info = px30_get_soc_info,
354 };
355
356 static const struct rockchip_opp_data rk3288_cpu_opp_data = {
357 .get_soc_info = rk3288_get_soc_info,
358 };
359
360 static const struct rockchip_opp_data rk3399_cpu_opp_data = {
361 .get_soc_info = rk3399_get_soc_info,
362 };
363
364 static const struct rockchip_opp_data rk3588_cpu_opp_data = {
365 .get_soc_info = rk3588_get_soc_info,
366 .set_soc_info = rk3588_set_soc_info,
367 .set_read_margin = rk3588_cpu_set_read_margin,
368 };
369
370 static const struct rockchip_opp_data rv1126_cpu_opp_data = {
371 .get_soc_info = rv1126_get_soc_info,
372 };
373
374 static const struct of_device_id rockchip_cpufreq_of_match[] = {
375 {
376 .compatible = "rockchip,px30",
377 .data = (void *)&px30_cpu_opp_data,
378 },
379 {
380 .compatible = "rockchip,rk3288",
381 .data = (void *)&rk3288_cpu_opp_data,
382 },
383 {
384 .compatible = "rockchip,rk3288w",
385 .data = (void *)&rk3288_cpu_opp_data,
386 },
387 {
388 .compatible = "rockchip,rk3326",
389 .data = (void *)&px30_cpu_opp_data,
390 },
391 {
392 .compatible = "rockchip,rk3399",
393 .data = (void *)&rk3399_cpu_opp_data,
394 },
395 {
396 .compatible = "rockchip,rk3588",
397 .data = (void *)&rk3588_cpu_opp_data,
398 },
399 {
400 .compatible = "rockchip,rv1109",
401 .data = (void *)&rv1126_cpu_opp_data,
402 },
403 {
404 .compatible = "rockchip,rv1126",
405 .data = (void *)&rv1126_cpu_opp_data,
406 },
407 {},
408 };
409
rockchip_cluster_info_lookup(int cpu)410 static struct cluster_info *rockchip_cluster_info_lookup(int cpu)
411 {
412 struct cluster_info *cluster;
413
414 list_for_each_entry(cluster, &cluster_info_list, list_head) {
415 if (cpumask_test_cpu(cpu, &cluster->cpus))
416 return cluster;
417 }
418
419 return NULL;
420 }
421
rockchip_cpufreq_set_volt(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply,char * reg_name)422 static int rockchip_cpufreq_set_volt(struct device *dev,
423 struct regulator *reg,
424 struct dev_pm_opp_supply *supply,
425 char *reg_name)
426 {
427 int ret;
428
429 dev_dbg(dev, "%s: %s voltages (uV): %lu %lu %lu\n", __func__, reg_name,
430 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
431
432 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
433 supply->u_volt, supply->u_volt_max);
434 if (ret)
435 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu uV): %d\n",
436 __func__, supply->u_volt_min, supply->u_volt,
437 supply->u_volt_max, ret);
438
439 return ret;
440 }
441
cpu_opp_helper(struct dev_pm_set_opp_data * data)442 static int cpu_opp_helper(struct dev_pm_set_opp_data *data)
443 {
444 struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
445 struct dev_pm_opp_supply *old_supply_mem = &data->old_opp.supplies[1];
446 struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
447 struct dev_pm_opp_supply *new_supply_mem = &data->new_opp.supplies[1];
448 struct regulator *vdd_reg = data->regulators[0];
449 struct regulator *mem_reg = data->regulators[1];
450 struct device *dev = data->dev;
451 struct clk *clk = data->clk;
452 struct cluster_info *cluster;
453 struct rockchip_opp_info *opp_info;
454 unsigned long old_freq = data->old_opp.rate;
455 unsigned long new_freq = data->new_opp.rate;
456 u32 target_rm = UINT_MAX;
457 int ret = 0;
458
459 cluster = rockchip_cluster_info_lookup(dev->id);
460 if (!cluster)
461 return -EINVAL;
462 opp_info = &cluster->opp_info;
463 rockchip_get_read_margin(dev, opp_info, new_supply_vdd->u_volt,
464 &target_rm);
465
466 /* Change frequency */
467 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
468 old_freq, new_freq);
469 /* Scaling up? Scale voltage before frequency */
470 if (new_freq >= old_freq) {
471 ret = rockchip_set_intermediate_rate(dev, opp_info, clk,
472 old_freq, new_freq,
473 true, true);
474 if (ret) {
475 dev_err(dev, "%s: failed to set clk rate: %lu\n",
476 __func__, new_freq);
477 return -EINVAL;
478 }
479 ret = rockchip_cpufreq_set_volt(dev, mem_reg, new_supply_mem,
480 "mem");
481 if (ret)
482 goto restore_voltage;
483 ret = rockchip_cpufreq_set_volt(dev, vdd_reg, new_supply_vdd,
484 "vdd");
485 if (ret)
486 goto restore_voltage;
487 rockchip_set_read_margin(dev, opp_info, target_rm, true);
488 ret = clk_set_rate(clk, new_freq);
489 if (ret) {
490 dev_err(dev, "%s: failed to set clk rate: %lu %d\n",
491 __func__, new_freq, ret);
492 goto restore_rm;
493 }
494 /* Scaling down? Scale voltage after frequency */
495 } else {
496 ret = rockchip_set_intermediate_rate(dev, opp_info, clk,
497 old_freq, new_freq,
498 false, true);
499 if (ret) {
500 dev_err(dev, "%s: failed to set clk rate: %lu\n",
501 __func__, new_freq);
502 return -EINVAL;
503 }
504 rockchip_set_read_margin(dev, opp_info, target_rm, true);
505 ret = clk_set_rate(clk, new_freq);
506 if (ret) {
507 dev_err(dev, "%s: failed to set clk rate: %lu %d\n",
508 __func__, new_freq, ret);
509 goto restore_rm;
510 }
511 ret = rockchip_cpufreq_set_volt(dev, vdd_reg, new_supply_vdd,
512 "vdd");
513 if (ret)
514 goto restore_freq;
515 ret = rockchip_cpufreq_set_volt(dev, mem_reg, new_supply_mem,
516 "mem");
517 if (ret)
518 goto restore_freq;
519 }
520
521 cluster->volt = new_supply_vdd->u_volt;
522 cluster->mem_volt = new_supply_mem->u_volt;
523
524 return 0;
525
526 restore_freq:
527 if (clk_set_rate(clk, old_freq))
528 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
529 __func__, old_freq);
530 restore_rm:
531 rockchip_get_read_margin(dev, opp_info, old_supply_vdd->u_volt,
532 &target_rm);
533 rockchip_set_read_margin(dev, opp_info, target_rm, true);
534 restore_voltage:
535 rockchip_cpufreq_set_volt(dev, mem_reg, old_supply_mem, "mem");
536 rockchip_cpufreq_set_volt(dev, vdd_reg, old_supply_vdd, "vdd");
537
538 return ret;
539 }
540
rockchip_cpufreq_cluster_init(int cpu,struct cluster_info * cluster)541 static int rockchip_cpufreq_cluster_init(int cpu, struct cluster_info *cluster)
542 {
543 struct rockchip_opp_info *opp_info = &cluster->opp_info;
544 struct opp_table *pname_table = NULL;
545 struct opp_table *reg_table = NULL;
546 struct opp_table *opp_table;
547 struct device_node *np;
548 struct device *dev;
549 const char * const reg_names[] = {"cpu", "mem"};
550 char *reg_name = NULL;
551 int bin = -EINVAL;
552 int process = -EINVAL;
553 int volt_sel = -EINVAL;
554 int ret = 0;
555 u32 freq = 0;
556
557 dev = get_cpu_device(cpu);
558 if (!dev)
559 return -ENODEV;
560
561 opp_info->dev = dev;
562
563 if (of_find_property(dev->of_node, "cpu-supply", NULL))
564 reg_name = "cpu";
565 else if (of_find_property(dev->of_node, "cpu0-supply", NULL))
566 reg_name = "cpu0";
567 else
568 return -ENOENT;
569
570 np = of_parse_phandle(dev->of_node, "operating-points-v2", 0);
571 if (!np) {
572 dev_warn(dev, "OPP-v2 not supported\n");
573 return -ENOENT;
574 }
575
576 opp_info->grf = syscon_regmap_lookup_by_phandle(np,
577 "rockchip,grf");
578 if (IS_ERR(opp_info->grf))
579 opp_info->grf = NULL;
580
581 ret = dev_pm_opp_of_get_sharing_cpus(dev, &cluster->cpus);
582 if (ret) {
583 dev_err(dev, "Failed to get sharing cpus\n");
584 goto np_err;
585 }
586
587 cluster->is_opp_shared_dsu = of_property_read_bool(np, "rockchip,opp-shared-dsu");
588 if (!of_property_read_u32(np, "rockchip,idle-threshold-freq", &freq))
589 cluster->idle_threshold_freq = freq;
590 rockchip_get_opp_data(rockchip_cpufreq_of_match, opp_info);
591 if (opp_info->data && opp_info->data->set_read_margin) {
592 opp_info->current_rm = UINT_MAX;
593 opp_info->target_rm = UINT_MAX;
594 opp_info->dsu_grf =
595 syscon_regmap_lookup_by_phandle(np, "rockchip,dsu-grf");
596 if (IS_ERR(opp_info->dsu_grf))
597 opp_info->dsu_grf = NULL;
598 rockchip_get_volt_rm_table(dev, np, "volt-mem-read-margin",
599 &opp_info->volt_rm_tbl);
600 of_property_read_u32(np, "low-volt-mem-read-margin",
601 &opp_info->low_rm);
602 if (!of_property_read_u32(np, "intermediate-threshold-freq", &freq))
603 opp_info->intermediate_threshold_freq = freq * 1000;
604 rockchip_init_read_margin(dev, opp_info, reg_name);
605 }
606 if (opp_info->data && opp_info->data->get_soc_info)
607 opp_info->data->get_soc_info(dev, np, &bin, &process);
608 rockchip_get_scale_volt_sel(dev, "cpu_leakage", reg_name, bin, process,
609 &cluster->scale, &volt_sel);
610 if (opp_info->data && opp_info->data->set_soc_info)
611 opp_info->data->set_soc_info(dev, np, bin, process, volt_sel);
612 pname_table = rockchip_set_opp_prop_name(dev, process, volt_sel);
613
614 if (of_find_property(dev->of_node, "cpu-supply", NULL) &&
615 of_find_property(dev->of_node, "mem-supply", NULL)) {
616 cluster->regulator_count = 2;
617 reg_table = dev_pm_opp_set_regulators(dev, reg_names,
618 ARRAY_SIZE(reg_names));
619 if (IS_ERR(reg_table)) {
620 ret = PTR_ERR(reg_table);
621 goto pname_opp_table;
622 }
623 opp_table = dev_pm_opp_register_set_opp_helper(dev,
624 cpu_opp_helper);
625 if (IS_ERR(opp_table)) {
626 ret = PTR_ERR(opp_table);
627 goto reg_opp_table;
628 }
629 } else {
630 cluster->regulator_count = 1;
631 }
632
633 of_node_put(np);
634
635 return 0;
636
637 reg_opp_table:
638 if (reg_table)
639 dev_pm_opp_put_regulators(reg_table);
640 pname_opp_table:
641 if (!IS_ERR_OR_NULL(pname_table))
642 dev_pm_opp_put_prop_name(pname_table);
643 np_err:
644 of_node_put(np);
645
646 return ret;
647 }
648
rockchip_cpufreq_adjust_power_scale(struct device * dev)649 int rockchip_cpufreq_adjust_power_scale(struct device *dev)
650 {
651 struct cluster_info *cluster;
652
653 cluster = rockchip_cluster_info_lookup(dev->id);
654 if (!cluster)
655 return -EINVAL;
656 rockchip_adjust_power_scale(dev, cluster->scale);
657 rockchip_pvtpll_calibrate_opp(&cluster->opp_info);
658 rockchip_pvtpll_add_length(&cluster->opp_info);
659
660 return 0;
661 }
662 EXPORT_SYMBOL_GPL(rockchip_cpufreq_adjust_power_scale);
663
rockchip_cpufreq_opp_set_rate(struct device * dev,unsigned long target_freq)664 int rockchip_cpufreq_opp_set_rate(struct device *dev, unsigned long target_freq)
665 {
666 struct cluster_info *cluster;
667 struct dev_pm_opp *opp;
668 unsigned long freq;
669 int ret = 0;
670
671 cluster = rockchip_cluster_info_lookup(dev->id);
672 if (!cluster)
673 return -EINVAL;
674
675 rockchip_monitor_volt_adjust_lock(cluster->mdev_info);
676 ret = dev_pm_opp_set_rate(dev, target_freq);
677 if (!ret) {
678 cluster->rate = target_freq;
679 if (cluster->regulator_count == 1) {
680 freq = target_freq;
681 opp = dev_pm_opp_find_freq_ceil(cluster->opp_info.dev, &freq);
682 if (!IS_ERR(opp)) {
683 cluster->volt = dev_pm_opp_get_voltage(opp);
684 dev_pm_opp_put(opp);
685 }
686 }
687 }
688 rockchip_monitor_volt_adjust_unlock(cluster->mdev_info);
689
690 return ret;
691 }
692 EXPORT_SYMBOL_GPL(rockchip_cpufreq_opp_set_rate);
693
rockchip_cpufreq_suspend(struct cpufreq_policy * policy)694 static int rockchip_cpufreq_suspend(struct cpufreq_policy *policy)
695 {
696 int ret = 0;
697
698 ret = cpufreq_generic_suspend(policy);
699 if (!ret)
700 rockchip_monitor_suspend_low_temp_adjust(policy->cpu);
701
702 return ret;
703 }
704
rockchip_cpufreq_add_monitor(struct cluster_info * cluster,struct cpufreq_policy * policy)705 static int rockchip_cpufreq_add_monitor(struct cluster_info *cluster,
706 struct cpufreq_policy *policy)
707 {
708 struct device *dev = cluster->opp_info.dev;
709 struct monitor_dev_profile *mdevp = NULL;
710 struct monitor_dev_info *mdev_info = NULL;
711
712 mdevp = kzalloc(sizeof(*mdevp), GFP_KERNEL);
713 if (!mdevp)
714 return -ENOMEM;
715
716 mdevp->type = MONITOR_TYPE_CPU;
717 mdevp->low_temp_adjust = rockchip_monitor_cpu_low_temp_adjust;
718 mdevp->high_temp_adjust = rockchip_monitor_cpu_high_temp_adjust;
719 mdevp->update_volt = rockchip_monitor_check_rate_volt;
720 mdevp->data = (void *)policy;
721 mdevp->opp_info = &cluster->opp_info;
722 cpumask_copy(&mdevp->allowed_cpus, policy->cpus);
723 mdev_info = rockchip_system_monitor_register(dev, mdevp);
724 if (IS_ERR(mdev_info)) {
725 kfree(mdevp);
726 dev_err(dev, "failed to register system monitor\n");
727 return -EINVAL;
728 }
729 mdev_info->devp = mdevp;
730 cluster->mdev_info = mdev_info;
731
732 return 0;
733 }
734
rockchip_cpufreq_remove_monitor(struct cluster_info * cluster)735 static int rockchip_cpufreq_remove_monitor(struct cluster_info *cluster)
736 {
737 if (cluster->mdev_info) {
738 kfree(cluster->mdev_info->devp);
739 rockchip_system_monitor_unregister(cluster->mdev_info);
740 cluster->mdev_info = NULL;
741 }
742
743 return 0;
744 }
745
rockchip_cpufreq_remove_dsu_qos(struct cluster_info * cluster)746 static int rockchip_cpufreq_remove_dsu_qos(struct cluster_info *cluster)
747 {
748 struct cluster_info *ci;
749
750 if (!cluster->is_opp_shared_dsu)
751 return 0;
752
753 list_for_each_entry(ci, &cluster_info_list, list_head) {
754 if (ci->is_opp_shared_dsu)
755 continue;
756 if (freq_qos_request_active(&ci->dsu_qos_req))
757 freq_qos_remove_request(&ci->dsu_qos_req);
758 }
759
760 return 0;
761 }
762
rockchip_cpufreq_add_dsu_qos_req(struct cluster_info * cluster,struct cpufreq_policy * policy)763 static int rockchip_cpufreq_add_dsu_qos_req(struct cluster_info *cluster,
764 struct cpufreq_policy *policy)
765 {
766 struct device *dev = cluster->opp_info.dev;
767 struct cluster_info *ci;
768 int ret;
769
770 if (!cluster->is_opp_shared_dsu)
771 return 0;
772
773 list_for_each_entry(ci, &cluster_info_list, list_head) {
774 if (ci->is_opp_shared_dsu)
775 continue;
776 ret = freq_qos_add_request(&policy->constraints,
777 &ci->dsu_qos_req,
778 FREQ_QOS_MIN,
779 FREQ_QOS_MIN_DEFAULT_VALUE);
780 if (ret < 0) {
781 dev_err(dev, "failed to add dsu freq constraint\n");
782 goto error;
783 }
784 }
785
786 return 0;
787
788 error:
789 rockchip_cpufreq_remove_dsu_qos(cluster);
790
791 return ret;
792 }
793
rockchip_cpufreq_notifier(struct notifier_block * nb,unsigned long event,void * data)794 static int rockchip_cpufreq_notifier(struct notifier_block *nb,
795 unsigned long event, void *data)
796 {
797 struct cpufreq_policy *policy = data;
798 struct cluster_info *cluster;
799
800 cluster = rockchip_cluster_info_lookup(policy->cpu);
801 if (!cluster)
802 return NOTIFY_BAD;
803
804 if (event == CPUFREQ_CREATE_POLICY) {
805 if (rockchip_cpufreq_add_monitor(cluster, policy))
806 return NOTIFY_BAD;
807 if (rockchip_cpufreq_add_dsu_qos_req(cluster, policy))
808 return NOTIFY_BAD;
809 } else if (event == CPUFREQ_REMOVE_POLICY) {
810 rockchip_cpufreq_remove_monitor(cluster);
811 rockchip_cpufreq_remove_dsu_qos(cluster);
812 }
813
814 return NOTIFY_OK;
815 }
816
817 static struct notifier_block rockchip_cpufreq_notifier_block = {
818 .notifier_call = rockchip_cpufreq_notifier,
819 };
820
821 #ifdef MODULE
822 static struct pm_qos_request idle_pm_qos;
823 static int idle_disable_refcnt;
824 static DEFINE_MUTEX(idle_disable_lock);
825
rockchip_cpufreq_idle_state_disable(struct cpumask * cpumask,int index,bool disable)826 static int rockchip_cpufreq_idle_state_disable(struct cpumask *cpumask,
827 int index, bool disable)
828 {
829 mutex_lock(&idle_disable_lock);
830
831 if (disable) {
832 if (idle_disable_refcnt == 0)
833 cpu_latency_qos_update_request(&idle_pm_qos, 0);
834 idle_disable_refcnt++;
835 } else {
836 if (--idle_disable_refcnt == 0)
837 cpu_latency_qos_update_request(&idle_pm_qos,
838 PM_QOS_DEFAULT_VALUE);
839 }
840
841 mutex_unlock(&idle_disable_lock);
842
843 return 0;
844 }
845 #else
rockchip_cpufreq_idle_state_disable(struct cpumask * cpumask,int index,bool disable)846 static int rockchip_cpufreq_idle_state_disable(struct cpumask *cpumask,
847 int index, bool disable)
848 {
849 unsigned int cpu;
850
851 for_each_cpu(cpu, cpumask) {
852 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
853 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
854
855 if (!dev || !drv)
856 continue;
857 if (index >= drv->state_count)
858 continue;
859 cpuidle_driver_state_disabled(drv, index, disable);
860 }
861
862 if (disable) {
863 preempt_disable();
864 for_each_cpu(cpu, cpumask) {
865 if (cpu != smp_processor_id() && cpu_online(cpu))
866 wake_up_if_idle(cpu);
867 }
868 preempt_enable();
869 }
870
871 return 0;
872 }
873 #endif
874
875 #define cpu_to_dsu_freq(freq) ((freq) * 4 / 5)
876
rockchip_cpufreq_update_dsu_req(struct cluster_info * cluster,unsigned int freq)877 static int rockchip_cpufreq_update_dsu_req(struct cluster_info *cluster,
878 unsigned int freq)
879 {
880 struct device *dev = cluster->opp_info.dev;
881 unsigned int dsu_freq = rounddown(cpu_to_dsu_freq(freq), 100000);
882
883 if (cluster->is_opp_shared_dsu ||
884 !freq_qos_request_active(&cluster->dsu_qos_req))
885 return 0;
886
887 dev_dbg(dev, "cpu to dsu: %u -> %u\n", freq, dsu_freq);
888
889 return freq_qos_update_request(&cluster->dsu_qos_req, dsu_freq);
890 }
891
rockchip_cpufreq_transition_notifier(struct notifier_block * nb,unsigned long event,void * data)892 static int rockchip_cpufreq_transition_notifier(struct notifier_block *nb,
893 unsigned long event, void *data)
894 {
895 struct cpufreq_freqs *freqs = data;
896 struct cpufreq_policy *policy = freqs->policy;
897 struct cluster_info *cluster;
898
899 cluster = rockchip_cluster_info_lookup(policy->cpu);
900 if (!cluster)
901 return NOTIFY_BAD;
902
903 if (event == CPUFREQ_PRECHANGE) {
904 if (cluster->idle_threshold_freq &&
905 freqs->new >= cluster->idle_threshold_freq &&
906 !cluster->is_idle_disabled) {
907 rockchip_cpufreq_idle_state_disable(policy->cpus, 1,
908 true);
909 cluster->is_idle_disabled = true;
910 }
911 } else if (event == CPUFREQ_POSTCHANGE) {
912 if (cluster->idle_threshold_freq &&
913 freqs->new < cluster->idle_threshold_freq &&
914 cluster->is_idle_disabled) {
915 rockchip_cpufreq_idle_state_disable(policy->cpus, 1,
916 false);
917 cluster->is_idle_disabled = false;
918 }
919 rockchip_cpufreq_update_dsu_req(cluster, freqs->new);
920 }
921
922 return NOTIFY_OK;
923 }
924
925 static struct notifier_block rockchip_cpufreq_transition_notifier_block = {
926 .notifier_call = rockchip_cpufreq_transition_notifier,
927 };
928
rockchip_cpufreq_panic_notifier(struct notifier_block * nb,unsigned long v,void * p)929 static int rockchip_cpufreq_panic_notifier(struct notifier_block *nb,
930 unsigned long v, void *p)
931 {
932 struct cluster_info *ci;
933 struct device *dev;
934
935 list_for_each_entry(ci, &cluster_info_list, list_head) {
936 dev = ci->opp_info.dev;
937
938 if (ci->regulator_count == 1)
939 dev_info(dev, "cur_freq: %lu Hz, volt: %lu uV\n",
940 ci->rate, ci->volt);
941 else
942 dev_info(dev, "cur_freq: %lu Hz, volt_vdd: %lu uV, volt_mem: %lu uV\n",
943 ci->rate, ci->volt, ci->mem_volt);
944 }
945
946 return 0;
947 }
948
949 static struct notifier_block rockchip_cpufreq_panic_notifier_block = {
950 .notifier_call = rockchip_cpufreq_panic_notifier,
951 };
952
rockchip_cpufreq_driver_init(void)953 static int __init rockchip_cpufreq_driver_init(void)
954 {
955 struct cluster_info *cluster, *pos;
956 struct cpufreq_dt_platform_data pdata = {0};
957 int cpu, ret;
958
959 for_each_possible_cpu(cpu) {
960 cluster = rockchip_cluster_info_lookup(cpu);
961 if (cluster)
962 continue;
963
964 cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
965 if (!cluster) {
966 ret = -ENOMEM;
967 goto release_cluster_info;
968 }
969
970 ret = rockchip_cpufreq_cluster_init(cpu, cluster);
971 if (ret) {
972 pr_err("Failed to initialize dvfs info cpu%d\n", cpu);
973 goto release_cluster_info;
974 }
975 list_add(&cluster->list_head, &cluster_info_list);
976 }
977
978 pdata.have_governor_per_policy = true;
979 pdata.suspend = rockchip_cpufreq_suspend;
980
981 ret = cpufreq_register_notifier(&rockchip_cpufreq_notifier_block,
982 CPUFREQ_POLICY_NOTIFIER);
983 if (ret) {
984 pr_err("failed to register cpufreq notifier\n");
985 goto release_cluster_info;
986 }
987
988 if (of_machine_is_compatible("rockchip,rk3588")) {
989 ret = cpufreq_register_notifier(&rockchip_cpufreq_transition_notifier_block,
990 CPUFREQ_TRANSITION_NOTIFIER);
991 if (ret) {
992 cpufreq_unregister_notifier(&rockchip_cpufreq_notifier_block,
993 CPUFREQ_POLICY_NOTIFIER);
994 pr_err("failed to register cpufreq notifier\n");
995 goto release_cluster_info;
996 }
997 #ifdef MODULE
998 cpu_latency_qos_add_request(&idle_pm_qos, PM_QOS_DEFAULT_VALUE);
999 #endif
1000 }
1001
1002 ret = atomic_notifier_chain_register(&panic_notifier_list,
1003 &rockchip_cpufreq_panic_notifier_block);
1004 if (ret)
1005 pr_err("failed to register cpufreq panic notifier\n");
1006
1007 return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
1008 -1, (void *)&pdata,
1009 sizeof(struct cpufreq_dt_platform_data)));
1010
1011 release_cluster_info:
1012 list_for_each_entry_safe(cluster, pos, &cluster_info_list, list_head) {
1013 list_del(&cluster->list_head);
1014 kfree(cluster);
1015 }
1016 return ret;
1017 }
1018 module_init(rockchip_cpufreq_driver_init);
1019
1020 MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
1021 MODULE_DESCRIPTION("Rockchip cpufreq driver");
1022 MODULE_LICENSE("GPL v2");
1023