1 /* 2 * Copyright (c) 2024-2025, Rockchip Electronics Co., Ltd. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <drivers/delay_timer.h> 8 #include <drivers/scmi.h> 9 10 #include "otp.h" 11 #include <plat_private.h> 12 #include <platform_def.h> 13 #include <rk3568_clk.h> 14 #include <scmi_clock.h> 15 16 enum pll_type_sel { 17 PLL_SEL_AUTO, /* all plls (normal pll or pvtpll) */ 18 PLL_SEL_PVT, 19 PLL_SEL_NOR, 20 PLL_SEL_AUTO_NOR /* all normal plls (apll/gpll/npll) */ 21 }; 22 23 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 24 25 #define RK3568_CPU_PVTPLL_CON0 0x10 26 #define RK3568_GPU_PVTPLL_CON0 0x700 27 #define RK3568_NPU_PVTPLL_CON0 0x740 28 29 #define GPLL_RATE 1188000000 30 #define MAX_RATE_TABLE 16 31 32 #define CLKDIV_5BITS_SHF0(div) BITS_WITH_WMASK(div, 0x1f, 0) 33 #define CLKDIV_5BITS_SHF8(div) BITS_WITH_WMASK(div, 0x1f, 8) 34 35 #define CLKDIV_4BITS_SHF0(div) BITS_WITH_WMASK(div, 0xf, 0) 36 #define CLKDIV_2BITS_SHF4(div) BITS_WITH_WMASK(div, 0x3, 4) 37 38 /* core_i: from gpll or apll */ 39 #define CLK_CORE_I_SEL_APLL WMSK_BIT(6) 40 #define CLK_CORE_I_SEL_GPLL BIT_WITH_WMSK(6) 41 42 /* clk_core: 43 * from normal pll(core_i: gpll or apll) path or direct pass from apll 44 */ 45 #define CLK_CORE_SEL_CORE_I WMSK_BIT(7) 46 #define CLK_CORE_SEL_APLL BIT_WITH_WMSK(7) 47 48 /* cpu clk: form clk_core or pvtpll */ 49 #define CLK_CORE_NDFT_CLK_CORE WMSK_BIT(15) 50 #define CLK_CORE_NDFT_CLK_PVTPLL BIT_WITH_WMSK(15) 51 52 /* clk_core_ndft path */ 53 #define CLK_CORE_PATH_NOR_GPLL (CLK_CORE_I_SEL_GPLL | CLK_CORE_SEL_CORE_I) 54 #define CLK_CORE_PATH_NOR_APLL (CLK_CORE_I_SEL_APLL | CLK_CORE_SEL_CORE_I) 55 #define CLK_CORE_PATH_DIR_APLL (CLK_CORE_SEL_APLL) /* from apll directly*/ 56 57 /* cpu clk path */ 58 #define CPU_CLK_PATH_NOR_GPLL (CLK_CORE_PATH_NOR_GPLL | \ 59 CLK_CORE_NDFT_CLK_CORE) 60 #define CPU_CLK_PATH_NOR_APLL (CLK_CORE_PATH_NOR_APLL | \ 61 CLK_CORE_NDFT_CLK_CORE) 62 #define CPU_CLK_PATH_DIR_APLL (CLK_CORE_PATH_DIR_APLL | \ 63 CLK_CORE_NDFT_CLK_CORE) 64 #define CPU_CLK_PATH_PVTPLL CLK_CORE_NDFT_CLK_PVTPLL 65 66 /* dsu clk path */ 67 #define SCLK_PATH_NOR_APLL (BITS_WITH_WMASK(0, 0x3, 8) | WMSK_BIT(15)) 68 #define SCLK_PATH_NOR_GPLL (BITS_WITH_WMASK(0x1, 0x3, 8) | WMSK_BIT(15)) 69 #define SCLK_PATH_NOR_NPLL BITS_WITH_WMASK(0x2, 0x3, 8) | WMSK_BIT(15) 70 #define SCLK_PATH_DIR_NPLL BIT_WITH_WMSK(15) 71 72 /* npu clk path */ 73 #define CLK_NPU_SRC_NPLL WMSK_BIT(6) 74 #define CLK_NPU_SRC_GPLL BIT_WITH_WMSK(6) 75 76 #define CLK_NPU_NP5_SRC_NPLL WMSK_BIT(7) 77 #define CLK_NPU_NP5_SRC_GPLL BIT_WITH_WMSK(7) 78 79 #define NPU_PRE_CLK_SEL_PLL_SRC WMSK_BIT(8) 80 #define NPU_PRE_CLK_SEL_NP5 BIT_WITH_WMSK(8) 81 82 #define CLK_NPU_MUX_PLL_SRC WMSK_BIT(15) 83 #define CLK_NPU_MUX_PVTPLL BIT_WITH_WMSK(15) 84 85 #define NPU_PRE_CLK_PATH_NPLL (CLK_NPU_SRC_NPLL | NPU_PRE_CLK_SEL_PLL_SRC) 86 #define NPU_PRE_CLK_PATH_GPLL (CLK_NPU_SRC_GPLL | NPU_PRE_CLK_SEL_PLL_SRC) 87 #define NPU_PRE_CLK_PATH_NP5_NPLL (CLK_NPU_NP5_SRC_NPLL | \ 88 NPU_PRE_CLK_SEL_NP5) 89 #define NPU_PRE_CLK_PATH_NP5_GPLL (CLK_NPU_NP5_SRC_GPLL | \ 90 NPU_PRE_CLK_SEL_NP5) 91 92 #define NPU_CLK_PATH_NOR_NPLL (NPU_PRE_CLK_PATH_NPLL | CLK_NPU_MUX_PLL_SRC) 93 #define NPU_CLK_PATH_NOR_GPLL (NPU_PRE_CLK_PATH_GPLL | CLK_NPU_MUX_PLL_SRC) 94 #define NPU_CLK_PATH_NP5_NPLL (NPU_PRE_CLK_PATH_NP5_NPLL | \ 95 CLK_NPU_MUX_PLL_SRC) 96 #define NPU_CLK_PATH_NP5_GPLL (NPU_PRE_CLK_PATH_NP5_GPLL | \ 97 CLK_NPU_MUX_PLL_SRC) 98 #define NPU_CLK_PATH_PVTPLL CLK_NPU_MUX_PVTPLL 99 100 /* gpu clk path */ 101 #define GPU_CLK_PATH_NOR_MPLL (WMSK_BIT(11) | BITS_WITH_WMASK(0, 0x3, 6)) 102 #define GPU_CLK_PATH_NOR_GPLL (WMSK_BIT(11) | BITS_WITH_WMASK(0x1, 0x3, 6)) 103 #define GPU_CLK_PATH_NOR_CPLL (WMSK_BIT(11) | BITS_WITH_WMASK(0x2, 0x3, 6)) 104 #define GPU_CLK_PATH_NOR_NPLL (WMSK_BIT(11) | BITS_WITH_WMASK(0x3, 0x3, 6)) 105 #define GPU_CLK_PATH_PVTPLL BIT_WITH_WMSK(11) 106 107 #define PVTPLL_NEED(type, length) (((type) == PLL_SEL_PVT || \ 108 (type) == PLL_SEL_AUTO) && \ 109 (length)) 110 111 #define RK3568_CPU_OPP_INFO_OFFSET (OTP_S_BYTE_SIZE + 54) 112 #define RK3568_GPU_OPP_INFO_OFFSET (OTP_S_BYTE_SIZE + 60) 113 #define RK3568_NPU_OPP_INFO_OFFSET (OTP_S_BYTE_SIZE + 66) 114 115 struct sys_clk_info_t { 116 unsigned long cpu_rate; 117 unsigned long gpu_rate; 118 unsigned long npu_rate; 119 }; 120 121 struct otp_opp_info { 122 uint16_t min_freq; 123 uint16_t max_freq; 124 uint8_t volt; 125 uint8_t length; 126 } __packed __aligned(2); 127 128 struct pvtpll_table { 129 unsigned int rate; 130 uint32_t refdiv; 131 uint32_t fbdiv; 132 uint32_t postdiv1; 133 uint32_t postdiv2; 134 uint32_t dsmpd; 135 uint32_t frac; 136 uint32_t length; 137 }; 138 139 #define ROCKCHIP_CPU_PVTPLL(_rate, _refdiv, _fbdiv, _postdiv1, \ 140 _postdiv2, _dsmpd, _frac, _length) \ 141 { \ 142 .rate = _rate##U, \ 143 .refdiv = _refdiv, \ 144 .fbdiv = _fbdiv, \ 145 .postdiv1 = _postdiv1, \ 146 .postdiv2 = _postdiv2, \ 147 .dsmpd = _dsmpd, \ 148 .frac = _frac, \ 149 .length = _length, \ 150 } 151 152 #define ROCKCHIP_GPU_PVTPLL(_rate, _length) \ 153 { \ 154 .rate = _rate##U, \ 155 .length = _length, \ 156 } 157 158 static struct pvtpll_table rk3568_cpu_pvtpll_table[] = { 159 ROCKCHIP_CPU_PVTPLL(1992000000, 1, 83, 1, 1, 1, 0, 0x33), 160 ROCKCHIP_CPU_PVTPLL(1800000000, 1, 75, 1, 1, 1, 0, 0x33), 161 ROCKCHIP_CPU_PVTPLL(1608000000, 1, 67, 1, 1, 1, 0, 0x3b), 162 ROCKCHIP_CPU_PVTPLL(1416000000, 1, 118, 2, 1, 1, 0, 0x43), 163 ROCKCHIP_CPU_PVTPLL(1200000000, 1, 100, 2, 1, 1, 0, 0x53), 164 ROCKCHIP_CPU_PVTPLL(1104000000, 1, 92, 2, 1, 1, 0, 0x53), 165 ROCKCHIP_CPU_PVTPLL(1008000000, 1, 84, 2, 1, 1, 0, 0x5b), 166 ROCKCHIP_CPU_PVTPLL(816000000, 1, 68, 2, 1, 1, 0, 0), 167 ROCKCHIP_CPU_PVTPLL(600000000, 1, 100, 4, 1, 1, 0, 0), 168 ROCKCHIP_CPU_PVTPLL(408000000, 1, 68, 2, 2, 1, 0, 0), 169 ROCKCHIP_CPU_PVTPLL(312000000, 1, 78, 6, 1, 1, 0, 0), 170 ROCKCHIP_CPU_PVTPLL(216000000, 1, 72, 4, 2, 1, 0, 0), 171 { /* sentinel */ }, 172 }; 173 174 static struct pvtpll_table rk3568_gpu_pvtpll_table[] = { 175 /* rate_hz, length */ 176 ROCKCHIP_GPU_PVTPLL(800000000, 0x1db), 177 ROCKCHIP_GPU_PVTPLL(700000000, 0x1db), 178 ROCKCHIP_GPU_PVTPLL(600000000, 0x1db), 179 ROCKCHIP_GPU_PVTPLL(400000000, 0), 180 ROCKCHIP_GPU_PVTPLL(300000000, 0), 181 ROCKCHIP_GPU_PVTPLL(200000000, 0), 182 { /* sentinel */ }, 183 }; 184 185 static struct pvtpll_table rk3568_npu_pvtpll_table[] = { 186 /* rate_hz, length */ 187 ROCKCHIP_GPU_PVTPLL(1000000000, 0xd3), 188 ROCKCHIP_GPU_PVTPLL(900000000, 0xd3), 189 ROCKCHIP_GPU_PVTPLL(800000000, 0xd3), 190 ROCKCHIP_GPU_PVTPLL(700000000, 0xdb), 191 ROCKCHIP_GPU_PVTPLL(600000000, 0xfb), 192 ROCKCHIP_GPU_PVTPLL(400000000, 0), 193 ROCKCHIP_GPU_PVTPLL(300000000, 0), 194 ROCKCHIP_GPU_PVTPLL(200000000, 0), 195 { /* sentinel */ }, 196 }; 197 198 static unsigned long rk3568_cpu_rates[] = { 199 216000000, 312000000, 408000000, 816000000, 200 1008000000, 1200000000, 1416000000, 1608000000, 201 1800000000, 1992000000 202 }; 203 204 static unsigned long rk3568_gpu_rates[] = { 205 100000000, 200000000, 300000000, 400000000, 206 500000000, 600000000, 700000000, 800000000, 207 900000000, 1000000000, 1100000000, 1200000000 208 }; 209 210 static struct sys_clk_info_t sys_clk_info; 211 212 static bool check_otp_ecc_ok(uint32_t addr) 213 { 214 int i; 215 216 for (i = 0; i < sizeof(struct otp_opp_info); i++) { 217 if (rk_otp_ns_ecc_flag(addr + i)) 218 return false; 219 } 220 221 return true; 222 } 223 224 static void rk3568_adjust_pvtpll_table(struct pvtpll_table *pvtpll, 225 unsigned int count, 226 uint16_t min_freq, 227 uint16_t max_freq, 228 uint8_t length) 229 { 230 uint16_t freq; 231 uint8_t cur_length; 232 int i; 233 234 if (length > 31) 235 return; 236 237 for (i = 0; i < count; i++) { 238 if (!pvtpll[i].length) 239 continue; 240 cur_length = (pvtpll[i].length >> 3) & 0x1f; 241 242 /* 243 * Max value of length is 31, so adjust length to 244 * make sure (cur_length + length) <= 31. 245 */ 246 if ((cur_length + length) > 31) 247 length = 31 - cur_length; 248 freq = pvtpll[i].rate / 1000000; 249 if ((freq >= min_freq) && (freq <= max_freq)) 250 pvtpll[i].length += (length << 3); 251 } 252 } 253 254 static unsigned int 255 rockchip_get_pvtpll_length(struct pvtpll_table *table, int count, 256 unsigned long rate) 257 { 258 int i; 259 260 for (i = 0; i < count; i++) { 261 if (rate == table[i].rate) 262 return table[i].length; 263 } 264 return 0; 265 } 266 267 static struct pvtpll_table *rkclk_get_pll_config(unsigned int freq_hz) 268 { 269 unsigned int rate_count = ARRAY_SIZE(rk3568_cpu_pvtpll_table); 270 int i; 271 272 for (i = 0; i < rate_count; i++) { 273 if (freq_hz == rk3568_cpu_pvtpll_table[i].rate) 274 return &rk3568_cpu_pvtpll_table[i]; 275 } 276 return NULL; 277 } 278 279 static int rk3568_apll_set_rate(unsigned long rate, enum pll_type_sel type) 280 { 281 struct pvtpll_table *div; 282 int delay = 2400; 283 284 div = rkclk_get_pll_config(rate); 285 if (div == NULL) 286 return SCMI_INVALID_PARAMETERS; 287 288 if (PVTPLL_NEED(type, div->length)) { 289 /* set pvtpll length */ 290 mmio_write_32(CPUGRF_BASE + RK3568_CPU_PVTPLL_CON0, 291 0xffff0000); 292 udelay(1); 293 mmio_write_32(CPUGRF_BASE + RK3568_CPU_PVTPLL_CON0, 294 0xffff0000 | div->length); 295 udelay(1); 296 /* set core mux pvtpll */ 297 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(0), 298 CPU_CLK_PATH_PVTPLL); 299 } 300 301 /* pll enter slow mode */ 302 mmio_write_32(CRU_BASE + 0xc0, 303 (RK3568_PLL_MODE_MASK << 304 (16 + RK3568_PLL_MODE_SHIFT)) | 305 (RK3568_PLL_MODE_SLOWMODE << RK3568_PLL_MODE_SHIFT)); 306 /* update pll values */ 307 mmio_write_32(CRU_BASE + RK3568_PLLCON(0), 308 (RK3568_PLLCON0_FBDIV_MASK << 309 (16 + RK3568_PLLCON0_FBDIV_SHIFT)) | 310 (div->fbdiv << RK3568_PLLCON0_FBDIV_SHIFT)); 311 mmio_write_32(CRU_BASE + RK3568_PLLCON(0), 312 (RK3568_PLLCON0_POSTDIV1_MASK << 313 (16 + RK3568_PLLCON0_POSTDIV1_SHIFT)) | 314 (div->postdiv1 << RK3568_PLLCON0_POSTDIV1_SHIFT)); 315 mmio_write_32(CRU_BASE + RK3568_PLLCON(1), 316 (RK3568_PLLCON1_REFDIV_MASK << 317 (16 + RK3568_PLLCON1_REFDIV_SHIFT)) | 318 (div->refdiv << RK3568_PLLCON1_REFDIV_SHIFT)); 319 mmio_write_32(CRU_BASE + RK3568_PLLCON(1), 320 (RK3568_PLLCON1_POSTDIV2_MASK << 321 (16 + RK3568_PLLCON1_POSTDIV2_SHIFT)) | 322 (div->postdiv2 << RK3568_PLLCON1_POSTDIV2_SHIFT)); 323 mmio_write_32(CRU_BASE + RK3568_PLLCON(1), 324 (RK3568_PLLCON1_DSMPD_MASK << 325 (16 + RK3568_PLLCON1_DSMPD_SHIFT)) | 326 (div->dsmpd << RK3568_PLLCON1_DSMPD_SHIFT)); 327 328 /* wait for the pll to lock */ 329 while (delay > 0) { 330 if (mmio_read_32(CRU_BASE + RK3568_PLLCON(1)) & 331 RK3568_PLLCON1_LOCK_STATUS) 332 break; 333 udelay(1); 334 delay--; 335 } 336 if (delay == 0) 337 INFO("%s:ERROR: PLL WAIT LOCK FAILED\n", __func__); 338 339 /* pll enter normal mode */ 340 mmio_write_32(CRU_BASE + 0xc0, 341 (RK3568_PLL_MODE_MASK << (16 + RK3568_PLL_MODE_SHIFT)) | 342 (RK3568_PLL_MODE_NORMAL << RK3568_PLL_MODE_SHIFT)); 343 344 return 0; 345 } 346 347 static unsigned long rk3568_apll_get_rate(void) 348 { 349 unsigned int fbdiv, postdiv1, refdiv, postdiv2; 350 uint64_t rate64 = 24000000; 351 int mode; 352 353 mode = (mmio_read_32(CRU_BASE + 0xc0) >> RK3568_PLL_MODE_SHIFT) & 354 RK3568_PLL_MODE_MASK; 355 356 if (mode == RK3568_PLL_MODE_SLOWMODE) 357 return rate64; 358 359 fbdiv = (mmio_read_32(CRU_BASE + RK3568_PLLCON(0)) >> 360 RK3568_PLLCON0_FBDIV_SHIFT) & 361 RK3568_PLLCON0_FBDIV_MASK; 362 postdiv1 = (mmio_read_32(CRU_BASE + RK3568_PLLCON(0)) >> 363 RK3568_PLLCON0_POSTDIV1_SHIFT) & 364 RK3568_PLLCON0_POSTDIV1_MASK; 365 refdiv = (mmio_read_32(CRU_BASE + RK3568_PLLCON(1)) >> 366 RK3568_PLLCON1_REFDIV_SHIFT) & 367 RK3568_PLLCON1_REFDIV_MASK; 368 postdiv2 = (mmio_read_32(CRU_BASE + RK3568_PLLCON(1)) >> 369 RK3568_PLLCON1_POSTDIV2_SHIFT) & 370 RK3568_PLLCON1_POSTDIV2_MASK; 371 372 rate64 *= fbdiv; 373 rate64 = rate64 / refdiv; 374 rate64 = rate64 / postdiv1; 375 rate64 = rate64 / postdiv2; 376 377 return (unsigned long)rate64; 378 } 379 380 static int clk_cpu_set_rate(unsigned long rate, enum pll_type_sel type) 381 { 382 int div = 0, ret = 0; 383 384 if (!rate) 385 return SCMI_INVALID_PARAMETERS; 386 387 /* set clk core div to 3 */ 388 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(0), 389 CLKDIV_5BITS_SHF8(2) | CLKDIV_5BITS_SHF0(2)); 390 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(1), 391 CLKDIV_5BITS_SHF8(2) | CLKDIV_5BITS_SHF0(2)); 392 /* set atcore/gicclk div */ 393 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(3), 394 CLKDIV_5BITS_SHF8(7) | CLKDIV_5BITS_SHF0(7)); 395 /* set pclk/periph div */ 396 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(4), 397 CLKDIV_5BITS_SHF8(9) | CLKDIV_5BITS_SHF0(9)); 398 399 /* set dsu div to 4 */ 400 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), CLKDIV_4BITS_SHF0(3)); 401 402 /* set core mux gpll */ 403 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(0), CPU_CLK_PATH_NOR_GPLL); 404 /* set dsu mux gpll */ 405 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), SCLK_PATH_NOR_GPLL); 406 407 /* set apll */ 408 ret = rk3568_apll_set_rate(rate, type); 409 if (ret < 0) 410 return ret; 411 412 /* set t core mux apll */ 413 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(0), CLK_CORE_PATH_DIR_APLL); 414 415 div = DIV_ROUND_UP(rate, 300000000); 416 div = div - 1; 417 /* set atcore/gicclk div */ 418 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(3), 419 CLKDIV_5BITS_SHF8(div) | CLKDIV_5BITS_SHF0(div)); 420 /* set pclk/periph div */ 421 div = DIV_ROUND_UP(rate, 300000000); 422 div = div - 1; 423 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(4), 424 CLKDIV_5BITS_SHF8(div) | CLKDIV_5BITS_SHF0(div)); 425 426 if (rate >= 1608000000) { 427 /* set dsu mux npll */ 428 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), 429 SCLK_PATH_DIR_NPLL); 430 /* set dsu div to 1 */ 431 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), 432 CLKDIV_4BITS_SHF0(0)); 433 } else { 434 /* set dsu mux apll */ 435 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), 436 SCLK_PATH_NOR_APLL); 437 /* set dsu div to 2 */ 438 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(2), 439 CLKDIV_4BITS_SHF0(1)); 440 } 441 442 /* set clk core div to 1 */ 443 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(0), 444 CLKDIV_5BITS_SHF8(0) | CLKDIV_5BITS_SHF0(0)); 445 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(1), 446 CLKDIV_5BITS_SHF8(0) | CLKDIV_5BITS_SHF0(0)); 447 return ret; 448 } 449 450 static int clk_scmi_cpu_set_rate(struct rk_scmi_clock *clock, unsigned long rate) 451 { 452 int ret; 453 454 ret = clk_cpu_set_rate(rate, PLL_SEL_AUTO); 455 456 if (!ret) 457 sys_clk_info.cpu_rate = rate; 458 459 return ret; 460 } 461 462 static unsigned long clk_scmi_cpu_get_rate(struct rk_scmi_clock *clock) 463 { 464 return rk3568_apll_get_rate(); 465 } 466 467 static int clk_scmi_cpu_set_status(struct rk_scmi_clock *clock, bool status) 468 { 469 return 0; 470 } 471 472 static unsigned long clk_scmi_gpu_get_rate(struct rk_scmi_clock *clock) 473 { 474 int div; 475 476 if (mmio_read_32(CRU_BASE + RK3568_CLK_SEL(6)) & 0x0800) { 477 return 0; 478 } else { 479 div = mmio_read_32(CRU_BASE + RK3568_CLK_SEL(6)); 480 div = div & 0x000f; 481 return GPLL_RATE / (div + 1); 482 } 483 } 484 485 static int clk_gpu_set_rate(unsigned long rate, enum pll_type_sel type) 486 { 487 unsigned int length; 488 int div; 489 490 if (!rate) 491 return SCMI_INVALID_PARAMETERS; 492 493 /* set gpu div 6 */ 494 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(6), CLKDIV_4BITS_SHF0(5)); 495 /* set gpu mux gpll */ 496 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(6), GPU_CLK_PATH_NOR_GPLL); 497 498 /* set pvtpll ring */ 499 length = rockchip_get_pvtpll_length(rk3568_gpu_pvtpll_table, 500 ARRAY_SIZE(rk3568_gpu_pvtpll_table), 501 rate); 502 if (PVTPLL_NEED(type, length)) { 503 mmio_write_32(GRF_BASE + RK3568_GPU_PVTPLL_CON0, 504 0xffff0000); 505 udelay(1); 506 mmio_write_32(GRF_BASE + RK3568_GPU_PVTPLL_CON0, 507 0xffff0000 | length); 508 udelay(1); 509 /* set gpu mux pvtpll */ 510 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(6), 511 GPU_CLK_PATH_PVTPLL); 512 } 513 514 div = DIV_ROUND_UP(GPLL_RATE, rate); 515 /* set gpu div */ 516 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(6), CLKDIV_4BITS_SHF0((div - 1))); 517 518 return 0; 519 } 520 521 static int clk_scmi_gpu_set_rate(struct rk_scmi_clock *clock, unsigned long rate) 522 { 523 int ret; 524 525 ret = clk_gpu_set_rate(rate, PLL_SEL_AUTO); 526 527 if (!ret) 528 sys_clk_info.gpu_rate = rate; 529 return ret; 530 } 531 532 static int clk_scmi_gpu_set_status(struct rk_scmi_clock *clock, bool status) 533 { 534 return 0; 535 } 536 537 static unsigned long clk_scmi_npu_get_rate(struct rk_scmi_clock *clock) 538 { 539 int div; 540 541 if (mmio_read_32(CRU_BASE + RK3568_CLK_SEL(7)) & 0x8000) { 542 return 0; 543 } else { 544 div = mmio_read_32(CRU_BASE + RK3568_CLK_SEL(7)); 545 div = div & 0x000f; 546 return GPLL_RATE / (div + 1); 547 } 548 } 549 550 static int clk_npu_set_rate(unsigned long rate, enum pll_type_sel type) 551 { 552 unsigned int length; 553 int div; 554 555 if (!rate) 556 return SCMI_INVALID_PARAMETERS; 557 558 /* set npu div 6 */ 559 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(7), 560 CLKDIV_2BITS_SHF4(2) | CLKDIV_4BITS_SHF0(5)); 561 /* set npu mux gpll */ 562 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(7), 563 NPU_CLK_PATH_NOR_GPLL | CLK_NPU_NP5_SRC_GPLL); 564 565 /* set pvtpll ring */ 566 length = rockchip_get_pvtpll_length(rk3568_npu_pvtpll_table, 567 ARRAY_SIZE(rk3568_npu_pvtpll_table), 568 rate); 569 if (PVTPLL_NEED(type, length)) { 570 mmio_write_32(GRF_BASE + RK3568_NPU_PVTPLL_CON0, 571 0xffff0000); 572 udelay(1); 573 mmio_write_32(GRF_BASE + RK3568_NPU_PVTPLL_CON0, 574 0xffff0000 | length); 575 udelay(1); 576 /* set npu mux pvtpll */ 577 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(7), 578 NPU_CLK_PATH_PVTPLL); 579 } else { 580 div = DIV_ROUND_UP(GPLL_RATE, rate); 581 /* set gpu div */ 582 mmio_write_32(CRU_BASE + RK3568_CLK_SEL(7), 583 CLKDIV_4BITS_SHF0((div - 1))); 584 } 585 586 return 0; 587 } 588 589 static int clk_scmi_npu_set_rate(struct rk_scmi_clock *clock, unsigned long rate) 590 { 591 int ret; 592 593 ret = clk_npu_set_rate(rate, PLL_SEL_AUTO); 594 595 if (!ret) 596 sys_clk_info.npu_rate = rate; 597 598 return ret; 599 } 600 601 static int clk_scmi_npu_set_status(struct rk_scmi_clock *clock, bool status) 602 { 603 return 0; 604 } 605 606 static const struct rk_clk_ops clk_scmi_cpu_ops = { 607 .get_rate = clk_scmi_cpu_get_rate, 608 .set_rate = clk_scmi_cpu_set_rate, 609 .set_status = clk_scmi_cpu_set_status, 610 }; 611 612 static const struct rk_clk_ops clk_scmi_gpu_ops = { 613 .get_rate = clk_scmi_gpu_get_rate, 614 .set_rate = clk_scmi_gpu_set_rate, 615 .set_status = clk_scmi_gpu_set_status, 616 }; 617 618 static const struct rk_clk_ops clk_scmi_npu_ops = { 619 .get_rate = clk_scmi_npu_get_rate, 620 .set_rate = clk_scmi_npu_set_rate, 621 .set_status = clk_scmi_npu_set_status, 622 }; 623 624 struct rk_scmi_clock clock_table[] = { 625 { 626 .id = 0, 627 .name = "clk_scmi_cpu", 628 .clk_ops = &clk_scmi_cpu_ops, 629 .rate_table = rk3568_cpu_rates, 630 .rate_cnt = ARRAY_SIZE(rk3568_cpu_rates), 631 }, 632 { 633 .id = 1, 634 .name = "clk_scmi_gpu", 635 .clk_ops = &clk_scmi_gpu_ops, 636 .rate_table = rk3568_gpu_rates, 637 .rate_cnt = ARRAY_SIZE(rk3568_gpu_rates), 638 }, 639 { 640 .id = 2, 641 .name = "clk_scmi_npu", 642 .clk_ops = &clk_scmi_npu_ops, 643 .rate_table = rk3568_gpu_rates, 644 .rate_cnt = ARRAY_SIZE(rk3568_gpu_rates), 645 }, 646 }; 647 648 size_t rockchip_scmi_clock_count(unsigned int agent_id __unused) 649 { 650 return ARRAY_SIZE(clock_table); 651 } 652 653 rk_scmi_clock_t *rockchip_scmi_get_clock(uint32_t agent_id __unused, 654 uint32_t clock_id) 655 { 656 if (clock_id < ARRAY_SIZE(clock_table)) 657 return &clock_table[clock_id]; 658 659 return NULL; 660 } 661 662 void pvtplls_suspend(void) 663 { 664 clk_gpu_set_rate(100000000, PLL_SEL_NOR); 665 clk_npu_set_rate(100000000, PLL_SEL_NOR); 666 clk_cpu_set_rate(408000000, PLL_SEL_NOR); 667 } 668 669 void pvtplls_resume(void) 670 { 671 clk_cpu_set_rate(sys_clk_info.cpu_rate, PLL_SEL_AUTO); 672 clk_gpu_set_rate(sys_clk_info.gpu_rate, PLL_SEL_AUTO); 673 clk_npu_set_rate(sys_clk_info.npu_rate, PLL_SEL_AUTO); 674 } 675 676 void sys_reset_pvtplls_prepare(void) 677 { 678 clk_gpu_set_rate(100000000, PLL_SEL_NOR); 679 clk_npu_set_rate(100000000, PLL_SEL_NOR); 680 clk_cpu_set_rate(408000000, PLL_SEL_NOR); 681 } 682 683 void rockchip_clock_init(void) 684 { 685 struct otp_opp_info cpu_opp_info, gpu_opp_info, npu_opp_info; 686 int ret; 687 688 ret = rk_otp_read(RK3568_CPU_OPP_INFO_OFFSET, 689 sizeof(cpu_opp_info), 690 (uint16_t *)&cpu_opp_info, 691 true); 692 if (ret || !check_otp_ecc_ok(RK3568_CPU_OPP_INFO_OFFSET)) { 693 INFO("get cpu_opp_info fail, use default config!\n"); 694 cpu_opp_info.min_freq = 1008; 695 cpu_opp_info.max_freq = 1992; 696 cpu_opp_info.volt = 50; 697 cpu_opp_info.length = 4; 698 } 699 if (cpu_opp_info.length) { 700 INFO("adjust cpu pvtpll: min=%uM, max=%uM, length=%u\n", 701 cpu_opp_info.min_freq, cpu_opp_info.max_freq, cpu_opp_info.length); 702 703 rk3568_adjust_pvtpll_table(rk3568_cpu_pvtpll_table, 704 ARRAY_SIZE(rk3568_cpu_pvtpll_table), 705 cpu_opp_info.min_freq, 706 cpu_opp_info.max_freq, 707 cpu_opp_info.length); 708 } 709 710 ret = rk_otp_read(RK3568_GPU_OPP_INFO_OFFSET, 711 sizeof(gpu_opp_info), 712 (uint16_t *)&gpu_opp_info, 713 true); 714 if (ret || !check_otp_ecc_ok(RK3568_GPU_OPP_INFO_OFFSET)) { 715 INFO("get gpu_opp_info fail, use default config!\n"); 716 gpu_opp_info.min_freq = 600; 717 gpu_opp_info.max_freq = 800; 718 gpu_opp_info.volt = 50; 719 gpu_opp_info.length = 6; 720 } 721 if (gpu_opp_info.length) { 722 INFO("adjust gpu pvtpll: min=%uM, max=%uM, length=%u\n", 723 gpu_opp_info.min_freq, gpu_opp_info.max_freq, gpu_opp_info.length); 724 725 rk3568_adjust_pvtpll_table(rk3568_gpu_pvtpll_table, 726 ARRAY_SIZE(rk3568_gpu_pvtpll_table), 727 gpu_opp_info.min_freq, 728 gpu_opp_info.max_freq, 729 gpu_opp_info.length); 730 } 731 732 ret = rk_otp_read(RK3568_NPU_OPP_INFO_OFFSET, 733 sizeof(npu_opp_info), 734 (uint16_t *)&npu_opp_info, 735 true); 736 if (ret || !check_otp_ecc_ok(RK3568_NPU_OPP_INFO_OFFSET)) { 737 INFO("get npu_opp_info fail, use default config!\n"); 738 npu_opp_info.min_freq = 600; 739 npu_opp_info.max_freq = 1000; 740 npu_opp_info.volt = 50; 741 npu_opp_info.length = 6; 742 } 743 if (npu_opp_info.length) { 744 INFO("adjust npu pvtpll: min=%uM, max=%uM, length=%u\n", 745 npu_opp_info.min_freq, npu_opp_info.max_freq, npu_opp_info.length); 746 747 rk3568_adjust_pvtpll_table(rk3568_npu_pvtpll_table, 748 ARRAY_SIZE(rk3568_npu_pvtpll_table), 749 npu_opp_info.min_freq, 750 npu_opp_info.max_freq, 751 npu_opp_info.length); 752 } 753 } 754