1 /* 2 * Copyright 2024-2025 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 #include <common/debug.h> 8 #include <drivers/clk.h> 9 #include <lib/mmio.h> 10 #include <lib/xlat_tables/xlat_tables_v2.h> 11 #include <s32cc-clk-ids.h> 12 #include <s32cc-clk-modules.h> 13 #include <s32cc-clk-regs.h> 14 #include <s32cc-clk-utils.h> 15 #include <s32cc-mc-me.h> 16 17 #define MAX_STACK_DEPTH (40U) 18 19 /* This is used for floating-point precision calculations. */ 20 #define FP_PRECISION (100000000UL) 21 22 struct s32cc_clk_drv { 23 uintptr_t fxosc_base; 24 uintptr_t armpll_base; 25 uintptr_t periphpll_base; 26 uintptr_t armdfs_base; 27 uintptr_t cgm0_base; 28 uintptr_t cgm1_base; 29 uintptr_t cgm5_base; 30 uintptr_t ddrpll_base; 31 uintptr_t mc_me; 32 uintptr_t mc_rgm; 33 uintptr_t rdc; 34 }; 35 36 static int set_module_rate(const struct s32cc_clk_obj *module, 37 unsigned long rate, unsigned long *orate, 38 unsigned int *depth); 39 static int get_module_rate(const struct s32cc_clk_obj *module, 40 const struct s32cc_clk_drv *drv, 41 unsigned long *rate, 42 unsigned int depth); 43 44 static int update_stack_depth(unsigned int *depth) 45 { 46 if (*depth == 0U) { 47 return -ENOMEM; 48 } 49 50 (*depth)--; 51 return 0; 52 } 53 54 static struct s32cc_clk_drv *get_drv(void) 55 { 56 static struct s32cc_clk_drv driver = { 57 .fxosc_base = FXOSC_BASE_ADDR, 58 .armpll_base = ARMPLL_BASE_ADDR, 59 .periphpll_base = PERIPHPLL_BASE_ADDR, 60 .armdfs_base = ARM_DFS_BASE_ADDR, 61 .cgm0_base = CGM0_BASE_ADDR, 62 .cgm1_base = CGM1_BASE_ADDR, 63 .cgm5_base = MC_CGM5_BASE_ADDR, 64 .ddrpll_base = DDRPLL_BASE_ADDR, 65 .mc_me = MC_ME_BASE_ADDR, 66 .mc_rgm = MC_RGM_BASE_ADDR, 67 .rdc = RDC_BASE_ADDR, 68 }; 69 70 return &driver; 71 } 72 73 static int enable_module(struct s32cc_clk_obj *module, 74 const struct s32cc_clk_drv *drv, 75 unsigned int depth); 76 77 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 78 { 79 const struct s32cc_clk *clk = s32cc_obj2clk(module); 80 81 if (clk->module != NULL) { 82 return clk->module; 83 } 84 85 if (clk->pclock != NULL) { 86 return &clk->pclock->desc; 87 } 88 89 return NULL; 90 } 91 92 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 93 uintptr_t *base) 94 { 95 int ret = 0; 96 97 switch (id) { 98 case S32CC_FXOSC: 99 *base = drv->fxosc_base; 100 break; 101 case S32CC_ARM_PLL: 102 *base = drv->armpll_base; 103 break; 104 case S32CC_PERIPH_PLL: 105 *base = drv->periphpll_base; 106 break; 107 case S32CC_DDR_PLL: 108 *base = drv->ddrpll_base; 109 break; 110 case S32CC_ARM_DFS: 111 *base = drv->armdfs_base; 112 break; 113 case S32CC_CGM0: 114 *base = drv->cgm0_base; 115 break; 116 case S32CC_CGM1: 117 *base = drv->cgm1_base; 118 break; 119 case S32CC_CGM5: 120 *base = drv->cgm5_base; 121 break; 122 case S32CC_FIRC: 123 break; 124 case S32CC_SIRC: 125 break; 126 default: 127 ret = -EINVAL; 128 break; 129 } 130 131 if (ret != 0) { 132 ERROR("Unknown clock source id: %u\n", id); 133 } 134 135 return ret; 136 } 137 138 static void enable_fxosc(const struct s32cc_clk_drv *drv) 139 { 140 uintptr_t fxosc_base = drv->fxosc_base; 141 uint32_t ctrl; 142 143 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 144 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 145 return; 146 } 147 148 ctrl = FXOSC_CTRL_COMP_EN; 149 ctrl &= ~FXOSC_CTRL_OSC_BYP; 150 ctrl |= FXOSC_CTRL_EOCV(0x1); 151 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 152 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 153 154 /* Switch ON the crystal oscillator. */ 155 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 156 157 /* Wait until the clock is stable. */ 158 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 159 } 160 } 161 162 static int enable_osc(struct s32cc_clk_obj *module, 163 const struct s32cc_clk_drv *drv, 164 unsigned int depth) 165 { 166 const struct s32cc_osc *osc = s32cc_obj2osc(module); 167 unsigned int ldepth = depth; 168 int ret = 0; 169 170 ret = update_stack_depth(&ldepth); 171 if (ret != 0) { 172 return ret; 173 } 174 175 switch (osc->source) { 176 case S32CC_FXOSC: 177 enable_fxosc(drv); 178 break; 179 /* FIRC and SIRC oscillators are enabled by default */ 180 case S32CC_FIRC: 181 break; 182 case S32CC_SIRC: 183 break; 184 default: 185 ERROR("Invalid oscillator %d\n", osc->source); 186 ret = -EINVAL; 187 break; 188 }; 189 190 return ret; 191 } 192 193 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 194 { 195 const struct s32cc_pll *pll = s32cc_obj2pll(module); 196 197 if (pll->source == NULL) { 198 ERROR("Failed to identify PLL's parent\n"); 199 } 200 201 return pll->source; 202 } 203 204 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 205 uint32_t *mfi, uint32_t *mfn) 206 207 { 208 unsigned long vco; 209 unsigned long mfn64; 210 211 /* FRAC-N mode */ 212 *mfi = (uint32_t)(pll_vco / ref_freq); 213 214 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 215 mfn64 = pll_vco % ref_freq; 216 mfn64 *= FP_PRECISION; 217 mfn64 /= ref_freq; 218 mfn64 *= 18432UL; 219 mfn64 /= FP_PRECISION; 220 221 if (mfn64 > UINT32_MAX) { 222 return -EINVAL; 223 } 224 225 *mfn = (uint32_t)mfn64; 226 227 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 228 vco += (unsigned long)*mfi * FP_PRECISION; 229 vco *= ref_freq; 230 vco /= FP_PRECISION; 231 232 if (vco != pll_vco) { 233 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 234 pll_vco, vco); 235 return -EINVAL; 236 } 237 238 return 0; 239 } 240 241 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 242 { 243 const struct s32cc_clk_obj *source = pll->source; 244 const struct s32cc_clk *clk; 245 246 if (source == NULL) { 247 ERROR("Failed to identify PLL's parent\n"); 248 return NULL; 249 } 250 251 if (source->type != s32cc_clk_t) { 252 ERROR("The parent of the PLL isn't a clock\n"); 253 return NULL; 254 } 255 256 clk = s32cc_obj2clk(source); 257 258 if (clk->module == NULL) { 259 ERROR("The clock isn't connected to a module\n"); 260 return NULL; 261 } 262 263 source = clk->module; 264 265 if ((source->type != s32cc_clkmux_t) && 266 (source->type != s32cc_shared_clkmux_t)) { 267 ERROR("The parent of the PLL isn't a MUX\n"); 268 return NULL; 269 } 270 271 return s32cc_obj2clkmux(source); 272 } 273 274 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 275 { 276 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 277 } 278 279 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 280 { 281 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 282 } 283 284 static void enable_odivs(uintptr_t pll_addr, uint32_t ndivs, uint32_t mask) 285 { 286 uint32_t i; 287 288 for (i = 0; i < ndivs; i++) { 289 if ((mask & BIT_32(i)) != 0U) { 290 enable_odiv(pll_addr, i); 291 } 292 } 293 } 294 295 static int adjust_odiv_settings(const struct s32cc_pll *pll, uintptr_t pll_addr, 296 uint32_t odivs_mask, unsigned long old_vco) 297 { 298 uint64_t old_odiv_freq, odiv_freq; 299 uint32_t i, pllodiv, pdiv; 300 int ret = 0; 301 302 if (old_vco == 0UL) { 303 return 0; 304 } 305 306 for (i = 0; i < pll->ndividers; i++) { 307 if ((odivs_mask & BIT_32(i)) == 0U) { 308 continue; 309 } 310 311 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, i)); 312 313 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 314 315 old_odiv_freq = ((old_vco * FP_PRECISION) / (pdiv + 1U)) / FP_PRECISION; 316 pdiv = (uint32_t)(pll->vco_freq * FP_PRECISION / old_odiv_freq / FP_PRECISION); 317 318 odiv_freq = pll->vco_freq * FP_PRECISION / pdiv / FP_PRECISION; 319 320 if (old_odiv_freq != odiv_freq) { 321 ERROR("Failed to adjust ODIV %" PRIu32 " to match previous frequency\n", 322 i); 323 } 324 325 pllodiv = PLLDIG_PLLODIV_DIV_SET(pdiv - 1U); 326 mmio_write_32(PLLDIG_PLLODIV(pll_addr, i), pllodiv); 327 } 328 329 return ret; 330 } 331 332 static uint32_t get_enabled_odivs(uintptr_t pll_addr, uint32_t ndivs) 333 { 334 uint32_t mask = 0; 335 uint32_t pllodiv; 336 uint32_t i; 337 338 for (i = 0; i < ndivs; i++) { 339 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, i)); 340 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 341 mask |= BIT_32(i); 342 } 343 } 344 345 return mask; 346 } 347 348 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 349 { 350 uint32_t i; 351 352 for (i = 0; i < ndivs; i++) { 353 disable_odiv(pll_addr, i); 354 } 355 } 356 357 static void enable_pll_hw(uintptr_t pll_addr) 358 { 359 /* Enable the PLL. */ 360 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 361 362 /* Poll until PLL acquires lock. */ 363 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 364 } 365 } 366 367 static void disable_pll_hw(uintptr_t pll_addr) 368 { 369 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 370 } 371 372 static bool is_pll_enabled(uintptr_t pll_base) 373 { 374 uint32_t pllcr, pllsr; 375 376 pllcr = mmio_read_32(PLLDIG_PLLCR(pll_base)); 377 pllsr = mmio_read_32(PLLDIG_PLLSR(pll_base)); 378 379 /* Enabled and locked PLL */ 380 if ((pllcr & PLLDIG_PLLCR_PLLPD) != 0U) { 381 return false; 382 } 383 384 if ((pllsr & PLLDIG_PLLSR_LOCK) == 0U) { 385 return false; 386 } 387 388 return true; 389 } 390 391 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 392 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 393 unsigned long sclk_freq, unsigned int depth) 394 { 395 uint32_t rdiv = 1, mfi, mfn; 396 unsigned long old_vco = 0UL; 397 unsigned int ldepth = depth; 398 uint32_t odivs_mask; 399 int ret; 400 401 ret = update_stack_depth(&ldepth); 402 if (ret != 0) { 403 return ret; 404 } 405 406 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 407 if (ret != 0) { 408 return -EINVAL; 409 } 410 411 odivs_mask = get_enabled_odivs(pll_addr, pll->ndividers); 412 413 if (is_pll_enabled(pll_addr)) { 414 ret = get_module_rate(&pll->desc, drv, &old_vco, ldepth); 415 if (ret != 0) { 416 return ret; 417 } 418 } 419 420 /* Disable ODIVs*/ 421 disable_odivs(pll_addr, pll->ndividers); 422 423 /* Disable PLL */ 424 disable_pll_hw(pll_addr); 425 426 /* Program PLLCLKMUX */ 427 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 428 429 /* Program VCO */ 430 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 431 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 432 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 433 434 mmio_write_32(PLLDIG_PLLFD(pll_addr), 435 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 436 437 ret = adjust_odiv_settings(pll, pll_addr, odivs_mask, old_vco); 438 if (ret != 0) { 439 return ret; 440 } 441 442 enable_pll_hw(pll_addr); 443 444 /* Enable out dividers */ 445 enable_odivs(pll_addr, pll->ndividers, odivs_mask); 446 447 return ret; 448 } 449 450 static int enable_pll(struct s32cc_clk_obj *module, 451 const struct s32cc_clk_drv *drv, 452 unsigned int depth) 453 { 454 const struct s32cc_pll *pll = s32cc_obj2pll(module); 455 unsigned int clk_src, ldepth = depth; 456 unsigned long sclk_freq, pll_vco; 457 const struct s32cc_clkmux *mux; 458 uintptr_t pll_addr = UL(0x0); 459 bool pll_enabled; 460 uint32_t sclk_id; 461 int ret; 462 463 ret = update_stack_depth(&ldepth); 464 if (ret != 0) { 465 return ret; 466 } 467 468 mux = get_pll_mux(pll); 469 if (mux == NULL) { 470 return -EINVAL; 471 } 472 473 if (pll->instance != mux->module) { 474 ERROR("MUX type is not in sync with PLL ID\n"); 475 return -EINVAL; 476 } 477 478 ret = get_base_addr(pll->instance, drv, &pll_addr); 479 if (ret != 0) { 480 ERROR("Failed to detect PLL instance\n"); 481 return ret; 482 } 483 484 switch (mux->source_id) { 485 case S32CC_CLK_FIRC: 486 sclk_freq = 48U * MHZ; 487 sclk_id = 0; 488 break; 489 case S32CC_CLK_FXOSC: 490 sclk_freq = 40U * MHZ; 491 sclk_id = 1; 492 break; 493 default: 494 ERROR("Invalid source selection for PLL 0x%lx\n", 495 pll_addr); 496 return -EINVAL; 497 }; 498 499 ret = get_module_rate(&pll->desc, drv, &pll_vco, depth); 500 if (ret != 0) { 501 return ret; 502 } 503 504 pll_enabled = is_pll_enabled(pll_addr); 505 clk_src = mmio_read_32(PLLDIG_PLLCLKMUX(pll_addr)); 506 507 if ((clk_src == sclk_id) && pll_enabled && 508 (pll_vco == pll->vco_freq)) { 509 return 0; 510 } 511 512 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq, ldepth); 513 } 514 515 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 516 { 517 const struct s32cc_clk_obj *parent; 518 519 parent = pdiv->parent; 520 if (parent == NULL) { 521 ERROR("Failed to identify PLL divider's parent\n"); 522 return NULL; 523 } 524 525 if (parent->type != s32cc_pll_t) { 526 ERROR("The parent of the divider is not a PLL instance\n"); 527 return NULL; 528 } 529 530 return s32cc_obj2pll(parent); 531 } 532 533 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 534 { 535 uint32_t pllodiv; 536 uint32_t pdiv; 537 538 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 539 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 540 541 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 542 return; 543 } 544 545 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 546 disable_odiv(pll_addr, div_index); 547 } 548 549 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 550 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 551 552 enable_odiv(pll_addr, div_index); 553 } 554 555 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 556 { 557 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 558 559 if (pdiv->parent == NULL) { 560 ERROR("Failed to identify PLL DIV's parent\n"); 561 } 562 563 return pdiv->parent; 564 } 565 566 static int enable_pll_div(struct s32cc_clk_obj *module, 567 const struct s32cc_clk_drv *drv, 568 unsigned int depth) 569 { 570 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 571 uintptr_t pll_addr = 0x0ULL; 572 unsigned int ldepth = depth; 573 const struct s32cc_pll *pll; 574 unsigned long pll_vco; 575 uint32_t dc; 576 int ret; 577 578 ret = update_stack_depth(&ldepth); 579 if (ret != 0) { 580 return ret; 581 } 582 583 pll = get_div_pll(pdiv); 584 if (pll == NULL) { 585 ERROR("The parent of the PLL DIV is invalid\n"); 586 return 0; 587 } 588 589 ret = get_base_addr(pll->instance, drv, &pll_addr); 590 if (ret != 0) { 591 ERROR("Failed to detect PLL instance\n"); 592 return -EINVAL; 593 } 594 595 ret = get_module_rate(&pll->desc, drv, &pll_vco, ldepth); 596 if (ret != 0) { 597 ERROR("Failed to enable the PLL due to unknown rate for 0x%" PRIxPTR "\n", 598 pll_addr); 599 return ret; 600 } 601 602 dc = (uint32_t)(pll_vco / pdiv->freq); 603 604 config_pll_out_div(pll_addr, pdiv->index, dc); 605 606 return 0; 607 } 608 609 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 610 bool safe_clk) 611 { 612 uint32_t css, csc; 613 614 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 615 616 /* Already configured */ 617 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 618 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 619 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 620 return 0; 621 } 622 623 /* Ongoing clock switch? */ 624 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 625 MC_CGM_MUXn_CSS_SWIP) != 0U) { 626 } 627 628 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 629 630 /* Clear previous source. */ 631 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 632 633 if (!safe_clk) { 634 /* Select the clock source and trigger the clock switch. */ 635 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 636 } else { 637 /* Switch to safe clock */ 638 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 639 } 640 641 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 642 643 /* Wait for configuration bit to auto-clear. */ 644 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 645 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 646 } 647 648 /* Is the clock switch completed? */ 649 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 650 MC_CGM_MUXn_CSS_SWIP) != 0U) { 651 } 652 653 /* 654 * Check if the switch succeeded. 655 * Check switch trigger cause and the source. 656 */ 657 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 658 if (!safe_clk) { 659 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 660 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 661 return 0; 662 } 663 664 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 665 mux, source, cgm_addr); 666 } else { 667 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 668 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 669 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 670 return 0; 671 } 672 673 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 674 mux, cgm_addr); 675 } 676 677 return -EINVAL; 678 } 679 680 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 681 const struct s32cc_clk_drv *drv) 682 { 683 uintptr_t cgm_addr = UL(0x0); 684 uint32_t mux_hw_clk; 685 int ret; 686 687 ret = get_base_addr(mux->module, drv, &cgm_addr); 688 if (ret != 0) { 689 return ret; 690 } 691 692 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 693 694 return cgm_mux_clk_config(cgm_addr, mux->index, 695 mux_hw_clk, false); 696 } 697 698 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 699 { 700 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 701 struct s32cc_clk *clk; 702 703 if (mux == NULL) { 704 return NULL; 705 } 706 707 clk = s32cc_get_arch_clk(mux->source_id); 708 if (clk == NULL) { 709 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 710 mux->source_id, mux->index); 711 return NULL; 712 } 713 714 return &clk->desc; 715 } 716 717 static int enable_mux(struct s32cc_clk_obj *module, 718 const struct s32cc_clk_drv *drv, 719 unsigned int depth) 720 { 721 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 722 unsigned int ldepth = depth; 723 const struct s32cc_clk *clk; 724 int ret = 0; 725 726 ret = update_stack_depth(&ldepth); 727 if (ret != 0) { 728 return ret; 729 } 730 731 if (mux == NULL) { 732 return -EINVAL; 733 } 734 735 clk = s32cc_get_arch_clk(mux->source_id); 736 if (clk == NULL) { 737 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 738 mux->source_id, mux->index); 739 return -EINVAL; 740 } 741 742 switch (mux->module) { 743 /* PLL mux will be enabled by PLL setup */ 744 case S32CC_ARM_PLL: 745 case S32CC_PERIPH_PLL: 746 case S32CC_DDR_PLL: 747 break; 748 case S32CC_CGM1: 749 ret = enable_cgm_mux(mux, drv); 750 break; 751 case S32CC_CGM0: 752 ret = enable_cgm_mux(mux, drv); 753 break; 754 case S32CC_CGM5: 755 ret = enable_cgm_mux(mux, drv); 756 break; 757 default: 758 ERROR("Unknown mux parent type: %d\n", mux->module); 759 ret = -EINVAL; 760 break; 761 }; 762 763 return ret; 764 } 765 766 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 767 { 768 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 769 770 if (dfs->parent == NULL) { 771 ERROR("Failed to identify DFS's parent\n"); 772 } 773 774 return dfs->parent; 775 } 776 777 static int enable_dfs(struct s32cc_clk_obj *module, 778 const struct s32cc_clk_drv *drv, 779 unsigned int depth) 780 { 781 unsigned int ldepth = depth; 782 int ret = 0; 783 784 ret = update_stack_depth(&ldepth); 785 if (ret != 0) { 786 return ret; 787 } 788 789 return 0; 790 } 791 792 static int get_dfs_freq(const struct s32cc_clk_obj *module, 793 const struct s32cc_clk_drv *drv, 794 unsigned long *rate, unsigned int depth) 795 { 796 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 797 unsigned int ldepth = depth; 798 uintptr_t dfs_addr; 799 int ret; 800 801 ret = update_stack_depth(&ldepth); 802 if (ret != 0) { 803 return ret; 804 } 805 806 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 807 if (ret != 0) { 808 ERROR("Failed to detect the DFS instance\n"); 809 return ret; 810 } 811 812 return get_module_rate(dfs->parent, drv, rate, ldepth); 813 } 814 815 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 816 { 817 const struct s32cc_clk_obj *parent = dfs_div->parent; 818 819 if (parent->type != s32cc_dfs_t) { 820 ERROR("DFS DIV doesn't have a DFS as parent\n"); 821 return NULL; 822 } 823 824 return s32cc_obj2dfs(parent); 825 } 826 827 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 828 uint32_t *mfi, uint32_t *mfn) 829 { 830 uint64_t factor64, tmp64, ofreq; 831 uint32_t factor32; 832 833 unsigned long in = dfs_freq; 834 unsigned long out = dfs_div->freq; 835 836 /** 837 * factor = (IN / OUT) / 2 838 * MFI = integer(factor) 839 * MFN = (factor - MFI) * 36 840 */ 841 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 842 tmp64 = factor64 / FP_PRECISION; 843 if (tmp64 > UINT32_MAX) { 844 return -EINVAL; 845 } 846 847 factor32 = (uint32_t)tmp64; 848 *mfi = factor32; 849 850 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 851 if (tmp64 > UINT32_MAX) { 852 return -EINVAL; 853 } 854 855 *mfn = (uint32_t)tmp64; 856 857 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 858 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 859 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 860 factor64 *= 2ULL; 861 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 862 863 if (ofreq != dfs_div->freq) { 864 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 865 dfs_div->freq); 866 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 867 return -EINVAL; 868 } 869 870 return 0; 871 } 872 873 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 874 uint32_t mfi, uint32_t mfn) 875 { 876 uint32_t portsr, portolsr; 877 uint32_t mask, old_mfi, old_mfn; 878 uint32_t dvport; 879 bool init_dfs; 880 881 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 882 883 old_mfi = DFS_DVPORTn_MFI(dvport); 884 old_mfn = DFS_DVPORTn_MFN(dvport); 885 886 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 887 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 888 889 /* Skip configuration if it's not needed */ 890 if (((portsr & BIT_32(port)) != 0U) && 891 ((portolsr & BIT_32(port)) == 0U) && 892 (mfi == old_mfi) && (mfn == old_mfn)) { 893 return 0; 894 } 895 896 init_dfs = (portsr == 0U); 897 898 if (init_dfs) { 899 mask = DFS_PORTRESET_MASK; 900 } else { 901 mask = DFS_PORTRESET_SET(BIT_32(port)); 902 } 903 904 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 905 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 906 907 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 908 } 909 910 if (init_dfs) { 911 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 912 } 913 914 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 915 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 916 917 if (init_dfs) { 918 /* DFS clk enable programming */ 919 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 920 } 921 922 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 923 924 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 925 } 926 927 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 928 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 929 ERROR("Failed to lock DFS divider\n"); 930 return -EINVAL; 931 } 932 933 return 0; 934 } 935 936 static struct s32cc_clk_obj * 937 get_dfs_div_parent(const struct s32cc_clk_obj *module) 938 { 939 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 940 941 if (dfs_div->parent == NULL) { 942 ERROR("Failed to identify DFS divider's parent\n"); 943 } 944 945 return dfs_div->parent; 946 } 947 948 static int enable_dfs_div(struct s32cc_clk_obj *module, 949 const struct s32cc_clk_drv *drv, 950 unsigned int depth) 951 { 952 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 953 unsigned int ldepth = depth; 954 const struct s32cc_dfs *dfs; 955 uintptr_t dfs_addr = 0UL; 956 unsigned long dfs_freq; 957 uint32_t mfi, mfn; 958 int ret = 0; 959 960 ret = update_stack_depth(&ldepth); 961 if (ret != 0) { 962 return ret; 963 } 964 965 dfs = get_div_dfs(dfs_div); 966 if (dfs == NULL) { 967 return -EINVAL; 968 } 969 970 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 971 if ((ret != 0) || (dfs_addr == 0UL)) { 972 return -EINVAL; 973 } 974 975 ret = get_module_rate(&dfs->desc, drv, &dfs_freq, depth); 976 if (ret != 0) { 977 return ret; 978 } 979 980 ret = get_dfs_mfi_mfn(dfs_freq, dfs_div, &mfi, &mfn); 981 if (ret != 0) { 982 return -EINVAL; 983 } 984 985 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 986 } 987 988 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 989 const struct s32cc_clk_drv *drv, 990 unsigned int depth); 991 992 static int enable_part(struct s32cc_clk_obj *module, 993 const struct s32cc_clk_drv *drv, 994 unsigned int depth) 995 { 996 const struct s32cc_part *part = s32cc_obj2part(module); 997 uint32_t part_no = part->partition_id; 998 999 if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) { 1000 return -EINVAL; 1001 } 1002 1003 return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no); 1004 } 1005 1006 static int enable_part_block(struct s32cc_clk_obj *module, 1007 const struct s32cc_clk_drv *drv, 1008 unsigned int depth) 1009 { 1010 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 1011 const struct s32cc_part *part = block->part; 1012 uint32_t part_no = part->partition_id; 1013 unsigned int ldepth = depth; 1014 uint32_t cofb; 1015 int ret; 1016 1017 ret = update_stack_depth(&ldepth); 1018 if (ret != 0) { 1019 return ret; 1020 } 1021 1022 if ((block->block >= s32cc_part_block0) && 1023 (block->block <= s32cc_part_block15)) { 1024 cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0; 1025 mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status); 1026 } else { 1027 ERROR("Unknown partition block type: %d\n", block->block); 1028 return -EINVAL; 1029 } 1030 1031 return 0; 1032 } 1033 1034 static struct s32cc_clk_obj * 1035 get_part_block_parent(const struct s32cc_clk_obj *module) 1036 { 1037 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 1038 1039 return &block->part->desc; 1040 } 1041 1042 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1043 const struct s32cc_clk_drv *drv, 1044 unsigned int depth); 1045 1046 static int enable_part_block_link(struct s32cc_clk_obj *module, 1047 const struct s32cc_clk_drv *drv, 1048 unsigned int depth) 1049 { 1050 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 1051 struct s32cc_part_block *block = link->block; 1052 unsigned int ldepth = depth; 1053 int ret; 1054 1055 ret = update_stack_depth(&ldepth); 1056 if (ret != 0) { 1057 return ret; 1058 } 1059 1060 /* Move the enablement algorithm to partition tree */ 1061 return enable_module_with_refcount(&block->desc, drv, ldepth); 1062 } 1063 1064 static struct s32cc_clk_obj * 1065 get_part_block_link_parent(const struct s32cc_clk_obj *module) 1066 { 1067 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 1068 1069 return link->parent; 1070 } 1071 1072 static int get_part_block_link_freq(const struct s32cc_clk_obj *module, 1073 const struct s32cc_clk_drv *drv, 1074 unsigned long *rate, unsigned int depth) 1075 { 1076 const struct s32cc_part_block_link *block = s32cc_obj2partblocklink(module); 1077 unsigned int ldepth = depth; 1078 int ret; 1079 1080 ret = update_stack_depth(&ldepth); 1081 if (ret != 0) { 1082 return ret; 1083 } 1084 1085 return get_module_rate(block->parent, drv, rate, ldepth); 1086 } 1087 1088 static int no_enable(struct s32cc_clk_obj *module, 1089 const struct s32cc_clk_drv *drv, 1090 unsigned int depth) 1091 { 1092 return 0; 1093 } 1094 1095 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 1096 const struct s32cc_clk_drv *drv, bool leaf_node, 1097 unsigned int depth) 1098 { 1099 unsigned int ldepth = depth; 1100 int ret = 0; 1101 1102 if (mod == NULL) { 1103 return 0; 1104 } 1105 1106 ret = update_stack_depth(&ldepth); 1107 if (ret != 0) { 1108 return ret; 1109 } 1110 1111 /* Refcount will be updated as part of the recursivity */ 1112 if (leaf_node) { 1113 return en_cb(mod, drv, ldepth); 1114 } 1115 1116 if (mod->refcount == 0U) { 1117 ret = en_cb(mod, drv, ldepth); 1118 } 1119 1120 if (ret == 0) { 1121 mod->refcount++; 1122 } 1123 1124 return ret; 1125 } 1126 1127 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 1128 1129 static int enable_module(struct s32cc_clk_obj *module, 1130 const struct s32cc_clk_drv *drv, 1131 unsigned int depth) 1132 { 1133 struct s32cc_clk_obj *parent = get_module_parent(module); 1134 static const enable_clk_t enable_clbs[12] = { 1135 [s32cc_clk_t] = no_enable, 1136 [s32cc_osc_t] = enable_osc, 1137 [s32cc_pll_t] = enable_pll, 1138 [s32cc_pll_out_div_t] = enable_pll_div, 1139 [s32cc_clkmux_t] = enable_mux, 1140 [s32cc_shared_clkmux_t] = enable_mux, 1141 [s32cc_dfs_t] = enable_dfs, 1142 [s32cc_dfs_div_t] = enable_dfs_div, 1143 [s32cc_part_t] = enable_part, 1144 [s32cc_part_block_t] = enable_part_block, 1145 [s32cc_part_block_link_t] = enable_part_block_link, 1146 }; 1147 unsigned int ldepth = depth; 1148 uint32_t index; 1149 int ret = 0; 1150 1151 ret = update_stack_depth(&ldepth); 1152 if (ret != 0) { 1153 return ret; 1154 } 1155 1156 if (drv == NULL) { 1157 return -EINVAL; 1158 } 1159 1160 index = (uint32_t)module->type; 1161 1162 if (index >= ARRAY_SIZE(enable_clbs)) { 1163 ERROR("Undefined module type: %d\n", module->type); 1164 return -EINVAL; 1165 } 1166 1167 if (enable_clbs[index] == NULL) { 1168 ERROR("Undefined callback for the clock type: %d\n", 1169 module->type); 1170 return -EINVAL; 1171 } 1172 1173 parent = get_module_parent(module); 1174 1175 ret = exec_cb_with_refcount(enable_module, parent, drv, 1176 false, ldepth); 1177 if (ret != 0) { 1178 return ret; 1179 } 1180 1181 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 1182 true, ldepth); 1183 if (ret != 0) { 1184 return ret; 1185 } 1186 1187 return ret; 1188 } 1189 1190 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1191 const struct s32cc_clk_drv *drv, 1192 unsigned int depth) 1193 { 1194 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 1195 } 1196 1197 static int s32cc_clk_enable(unsigned long id) 1198 { 1199 const struct s32cc_clk_drv *drv = get_drv(); 1200 unsigned int depth = MAX_STACK_DEPTH; 1201 struct s32cc_clk *clk; 1202 1203 clk = s32cc_get_arch_clk(id); 1204 if (clk == NULL) { 1205 return -EINVAL; 1206 } 1207 1208 return enable_module_with_refcount(&clk->desc, drv, depth); 1209 } 1210 1211 static void s32cc_clk_disable(unsigned long id) 1212 { 1213 } 1214 1215 static bool s32cc_clk_is_enabled(unsigned long id) 1216 { 1217 return false; 1218 } 1219 1220 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1221 unsigned long *orate, unsigned int *depth) 1222 { 1223 struct s32cc_osc *osc = s32cc_obj2osc(module); 1224 int ret; 1225 1226 ret = update_stack_depth(depth); 1227 if (ret != 0) { 1228 return ret; 1229 } 1230 1231 if ((osc->freq != 0UL) && (rate != osc->freq)) { 1232 ERROR("Already initialized oscillator. freq = %lu\n", 1233 osc->freq); 1234 return -EINVAL; 1235 } 1236 1237 osc->freq = rate; 1238 *orate = osc->freq; 1239 1240 return 0; 1241 } 1242 1243 static int get_osc_freq(const struct s32cc_clk_obj *module, 1244 const struct s32cc_clk_drv *drv, 1245 unsigned long *rate, unsigned int depth) 1246 { 1247 const struct s32cc_osc *osc = s32cc_obj2osc(module); 1248 unsigned int ldepth = depth; 1249 int ret; 1250 1251 ret = update_stack_depth(&ldepth); 1252 if (ret != 0) { 1253 return ret; 1254 } 1255 1256 if (osc->freq == 0UL) { 1257 ERROR("Uninitialized oscillator\n"); 1258 return -EINVAL; 1259 } 1260 1261 *rate = osc->freq; 1262 1263 return 0; 1264 } 1265 1266 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1267 unsigned long *orate, unsigned int *depth) 1268 { 1269 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1270 int ret; 1271 1272 ret = update_stack_depth(depth); 1273 if (ret != 0) { 1274 return ret; 1275 } 1276 1277 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1278 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1279 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1280 rate, clk->min_freq, clk->max_freq); 1281 return -EINVAL; 1282 } 1283 1284 if (clk->module != NULL) { 1285 return set_module_rate(clk->module, rate, orate, depth); 1286 } 1287 1288 if (clk->pclock != NULL) { 1289 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1290 } 1291 1292 return -EINVAL; 1293 } 1294 1295 static int get_clk_freq(const struct s32cc_clk_obj *module, 1296 const struct s32cc_clk_drv *drv, unsigned long *rate, 1297 unsigned int depth) 1298 { 1299 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1300 unsigned int ldepth = depth; 1301 int ret; 1302 1303 ret = update_stack_depth(&ldepth); 1304 if (ret != 0) { 1305 return ret; 1306 } 1307 1308 if (clk == NULL) { 1309 ERROR("Invalid clock\n"); 1310 return -EINVAL; 1311 } 1312 1313 if (clk->module != NULL) { 1314 return get_module_rate(clk->module, drv, rate, ldepth); 1315 } 1316 1317 if (clk->pclock == NULL) { 1318 ERROR("Invalid clock parent\n"); 1319 return -EINVAL; 1320 } 1321 1322 return get_clk_freq(&clk->pclock->desc, drv, rate, ldepth); 1323 } 1324 1325 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1326 unsigned long *orate, unsigned int *depth) 1327 { 1328 struct s32cc_pll *pll = s32cc_obj2pll(module); 1329 int ret; 1330 1331 ret = update_stack_depth(depth); 1332 if (ret != 0) { 1333 return ret; 1334 } 1335 1336 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1337 ERROR("PLL frequency was already set\n"); 1338 return -EINVAL; 1339 } 1340 1341 pll->vco_freq = rate; 1342 *orate = pll->vco_freq; 1343 1344 return 0; 1345 } 1346 1347 static int get_pll_freq(const struct s32cc_clk_obj *module, 1348 const struct s32cc_clk_drv *drv, 1349 unsigned long *rate, unsigned int depth) 1350 { 1351 const struct s32cc_pll *pll = s32cc_obj2pll(module); 1352 const struct s32cc_clk *source; 1353 uint32_t mfi, mfn, rdiv, plldv; 1354 unsigned long prate, clk_src; 1355 unsigned int ldepth = depth; 1356 uintptr_t pll_addr = 0UL; 1357 uint64_t t1, t2; 1358 int ret; 1359 1360 ret = update_stack_depth(&ldepth); 1361 if (ret != 0) { 1362 return ret; 1363 } 1364 1365 ret = get_base_addr(pll->instance, drv, &pll_addr); 1366 if (ret != 0) { 1367 ERROR("Failed to detect PLL instance\n"); 1368 return ret; 1369 } 1370 1371 /* Disabled PLL */ 1372 if (!is_pll_enabled(pll_addr)) { 1373 *rate = pll->vco_freq; 1374 return 0; 1375 } 1376 1377 clk_src = mmio_read_32(PLLDIG_PLLCLKMUX(pll_addr)); 1378 switch (clk_src) { 1379 case 0: 1380 clk_src = S32CC_CLK_FIRC; 1381 break; 1382 case 1: 1383 clk_src = S32CC_CLK_FXOSC; 1384 break; 1385 default: 1386 ERROR("Failed to identify PLL source id %" PRIu64 "\n", clk_src); 1387 return -EINVAL; 1388 }; 1389 1390 source = s32cc_get_arch_clk(clk_src); 1391 if (source == NULL) { 1392 ERROR("Failed to get PLL source clock\n"); 1393 return -EINVAL; 1394 } 1395 1396 ret = get_module_rate(&source->desc, drv, &prate, ldepth); 1397 if (ret != 0) { 1398 ERROR("Failed to get PLL's parent frequency\n"); 1399 return ret; 1400 } 1401 1402 plldv = mmio_read_32(PLLDIG_PLLDV(pll_addr)); 1403 mfi = PLLDIG_PLLDV_MFI(plldv); 1404 rdiv = PLLDIG_PLLDV_RDIV(plldv); 1405 if (rdiv == 0U) { 1406 rdiv = 1; 1407 } 1408 1409 /* Frac-N mode */ 1410 mfn = PLLDIG_PLLFD_MFN_SET(mmio_read_32(PLLDIG_PLLFD(pll_addr))); 1411 1412 /* PLL VCO frequency in Fractional mode when PLLDV[RDIV] is not 0 */ 1413 t1 = prate / rdiv; 1414 t2 = (mfi * FP_PRECISION) + (mfn * FP_PRECISION / 18432U); 1415 1416 *rate = t1 * t2 / FP_PRECISION; 1417 1418 return 0; 1419 } 1420 1421 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1422 unsigned long *orate, unsigned int *depth) 1423 { 1424 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1425 const struct s32cc_pll *pll; 1426 unsigned long prate, dc; 1427 int ret; 1428 1429 ret = update_stack_depth(depth); 1430 if (ret != 0) { 1431 return ret; 1432 } 1433 1434 if (pdiv->parent == NULL) { 1435 ERROR("Failed to identify PLL divider's parent\n"); 1436 return -EINVAL; 1437 } 1438 1439 pll = s32cc_obj2pll(pdiv->parent); 1440 if (pll == NULL) { 1441 ERROR("The parent of the PLL DIV is invalid\n"); 1442 return -EINVAL; 1443 } 1444 1445 prate = pll->vco_freq; 1446 1447 /** 1448 * The PLL is not initialized yet, so let's take a risk 1449 * and accept the proposed rate. 1450 */ 1451 if (prate == 0UL) { 1452 pdiv->freq = rate; 1453 *orate = rate; 1454 return 0; 1455 } 1456 1457 /* Decline in case the rate cannot fit PLL's requirements. */ 1458 dc = prate / rate; 1459 if ((prate / dc) != rate) { 1460 return -EINVAL; 1461 } 1462 1463 pdiv->freq = rate; 1464 *orate = pdiv->freq; 1465 1466 return 0; 1467 } 1468 1469 static int get_pll_div_freq(const struct s32cc_clk_obj *module, 1470 const struct s32cc_clk_drv *drv, 1471 unsigned long *rate, unsigned int depth) 1472 { 1473 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1474 const struct s32cc_pll *pll; 1475 unsigned int ldepth = depth; 1476 uintptr_t pll_addr = 0UL; 1477 unsigned long pfreq; 1478 uint32_t pllodiv; 1479 uint32_t dc; 1480 int ret; 1481 1482 ret = update_stack_depth(&ldepth); 1483 if (ret != 0) { 1484 return ret; 1485 } 1486 1487 pll = get_div_pll(pdiv); 1488 if (pll == NULL) { 1489 ERROR("The parent of the PLL DIV is invalid\n"); 1490 return -EINVAL; 1491 } 1492 1493 ret = get_base_addr(pll->instance, drv, &pll_addr); 1494 if (ret != 0) { 1495 ERROR("Failed to detect PLL instance\n"); 1496 return -EINVAL; 1497 } 1498 1499 ret = get_module_rate(pdiv->parent, drv, &pfreq, ldepth); 1500 if (ret != 0) { 1501 ERROR("Failed to get the frequency of PLL %" PRIxPTR "\n", 1502 pll_addr); 1503 return ret; 1504 } 1505 1506 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, pdiv->index)); 1507 1508 /* Disabled module */ 1509 if ((pllodiv & PLLDIG_PLLODIV_DE) == 0U) { 1510 *rate = pdiv->freq; 1511 return 0; 1512 } 1513 1514 dc = PLLDIG_PLLODIV_DIV(pllodiv); 1515 *rate = (pfreq * FP_PRECISION) / (dc + 1U) / FP_PRECISION; 1516 1517 return 0; 1518 } 1519 1520 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1521 unsigned long *orate, unsigned int *depth) 1522 { 1523 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1524 int ret; 1525 1526 ret = update_stack_depth(depth); 1527 if (ret != 0) { 1528 return ret; 1529 } 1530 1531 if (fdiv->parent == NULL) { 1532 ERROR("The divider doesn't have a valid parent\b"); 1533 return -EINVAL; 1534 } 1535 1536 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1537 1538 /* Update the output rate based on the parent's rate */ 1539 *orate /= fdiv->rate_div; 1540 1541 return ret; 1542 } 1543 1544 static int get_fixed_div_freq(const struct s32cc_clk_obj *module, 1545 const struct s32cc_clk_drv *drv, 1546 unsigned long *rate, unsigned int depth) 1547 { 1548 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1549 unsigned long pfreq; 1550 int ret; 1551 1552 ret = get_module_rate(fdiv->parent, drv, &pfreq, depth); 1553 if (ret != 0) { 1554 return ret; 1555 } 1556 1557 *rate = (pfreq * FP_PRECISION / fdiv->rate_div) / FP_PRECISION; 1558 return 0; 1559 } 1560 1561 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1562 unsigned long *orate, unsigned int *depth) 1563 { 1564 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1565 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1566 int ret; 1567 1568 ret = update_stack_depth(depth); 1569 if (ret != 0) { 1570 return ret; 1571 } 1572 1573 if (clk == NULL) { 1574 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1575 mux->index, mux->source_id); 1576 return -EINVAL; 1577 } 1578 1579 return set_module_rate(&clk->desc, rate, orate, depth); 1580 } 1581 1582 static int get_mux_freq(const struct s32cc_clk_obj *module, 1583 const struct s32cc_clk_drv *drv, 1584 unsigned long *rate, unsigned int depth) 1585 { 1586 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1587 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1588 unsigned int ldepth = depth; 1589 int ret; 1590 1591 ret = update_stack_depth(&ldepth); 1592 if (ret != 0) { 1593 return ret; 1594 } 1595 1596 if (clk == NULL) { 1597 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1598 mux->index, mux->source_id); 1599 return -EINVAL; 1600 } 1601 1602 return get_clk_freq(&clk->desc, drv, rate, ldepth); 1603 } 1604 1605 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1606 unsigned long *orate, unsigned int *depth) 1607 { 1608 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1609 const struct s32cc_dfs *dfs; 1610 int ret; 1611 1612 ret = update_stack_depth(depth); 1613 if (ret != 0) { 1614 return ret; 1615 } 1616 1617 if (dfs_div->parent == NULL) { 1618 ERROR("Failed to identify DFS divider's parent\n"); 1619 return -EINVAL; 1620 } 1621 1622 /* Sanity check */ 1623 dfs = s32cc_obj2dfs(dfs_div->parent); 1624 if (dfs->parent == NULL) { 1625 ERROR("Failed to identify DFS's parent\n"); 1626 return -EINVAL; 1627 } 1628 1629 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1630 ERROR("DFS DIV frequency was already set to %lu\n", 1631 dfs_div->freq); 1632 return -EINVAL; 1633 } 1634 1635 dfs_div->freq = rate; 1636 *orate = rate; 1637 1638 return ret; 1639 } 1640 1641 static unsigned long compute_dfs_div_freq(unsigned long pfreq, uint32_t mfi, uint32_t mfn) 1642 { 1643 unsigned long freq; 1644 1645 /** 1646 * Formula for input and output clocks of each port divider. 1647 * See 'Digital Frequency Synthesizer' chapter from Reference Manual. 1648 * 1649 * freq = pfreq / (2 * (mfi + mfn / 36.0)); 1650 */ 1651 freq = (mfi * FP_PRECISION) + (mfn * FP_PRECISION / 36UL); 1652 freq *= 2UL; 1653 freq = pfreq * FP_PRECISION / freq; 1654 1655 return freq; 1656 } 1657 1658 static int get_dfs_div_freq(const struct s32cc_clk_obj *module, 1659 const struct s32cc_clk_drv *drv, 1660 unsigned long *rate, unsigned int depth) 1661 { 1662 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1663 unsigned int ldepth = depth; 1664 const struct s32cc_dfs *dfs; 1665 uint32_t dvport, mfi, mfn; 1666 uintptr_t dfs_addr = 0UL; 1667 unsigned long pfreq; 1668 int ret; 1669 1670 ret = update_stack_depth(&ldepth); 1671 if (ret != 0) { 1672 return ret; 1673 } 1674 1675 dfs = get_div_dfs(dfs_div); 1676 if (dfs == NULL) { 1677 return -EINVAL; 1678 } 1679 1680 ret = get_module_rate(dfs_div->parent, drv, &pfreq, ldepth); 1681 if (ret != 0) { 1682 return ret; 1683 } 1684 1685 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 1686 if (ret != 0) { 1687 ERROR("Failed to detect the DFS instance\n"); 1688 return ret; 1689 } 1690 1691 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, dfs_div->index)); 1692 1693 mfi = DFS_DVPORTn_MFI(dvport); 1694 mfn = DFS_DVPORTn_MFN(dvport); 1695 1696 /* Disabled port */ 1697 if ((mfi == 0U) && (mfn == 0U)) { 1698 *rate = dfs_div->freq; 1699 return 0; 1700 } 1701 1702 *rate = compute_dfs_div_freq(pfreq, mfi, mfn); 1703 return 0; 1704 } 1705 1706 static int set_module_rate(const struct s32cc_clk_obj *module, 1707 unsigned long rate, unsigned long *orate, 1708 unsigned int *depth) 1709 { 1710 int ret = 0; 1711 1712 ret = update_stack_depth(depth); 1713 if (ret != 0) { 1714 return ret; 1715 } 1716 1717 ret = -EINVAL; 1718 1719 switch (module->type) { 1720 case s32cc_clk_t: 1721 ret = set_clk_freq(module, rate, orate, depth); 1722 break; 1723 case s32cc_osc_t: 1724 ret = set_osc_freq(module, rate, orate, depth); 1725 break; 1726 case s32cc_pll_t: 1727 ret = set_pll_freq(module, rate, orate, depth); 1728 break; 1729 case s32cc_pll_out_div_t: 1730 ret = set_pll_div_freq(module, rate, orate, depth); 1731 break; 1732 case s32cc_fixed_div_t: 1733 ret = set_fixed_div_freq(module, rate, orate, depth); 1734 break; 1735 case s32cc_clkmux_t: 1736 ret = set_mux_freq(module, rate, orate, depth); 1737 break; 1738 case s32cc_shared_clkmux_t: 1739 ret = set_mux_freq(module, rate, orate, depth); 1740 break; 1741 case s32cc_dfs_t: 1742 ERROR("Setting the frequency of a DFS is not allowed!"); 1743 break; 1744 case s32cc_dfs_div_t: 1745 ret = set_dfs_div_freq(module, rate, orate, depth); 1746 break; 1747 default: 1748 break; 1749 } 1750 1751 return ret; 1752 } 1753 1754 static int get_module_rate(const struct s32cc_clk_obj *module, 1755 const struct s32cc_clk_drv *drv, 1756 unsigned long *rate, 1757 unsigned int depth) 1758 { 1759 unsigned int ldepth = depth; 1760 int ret = 0; 1761 1762 ret = update_stack_depth(&ldepth); 1763 if (ret != 0) { 1764 return ret; 1765 } 1766 1767 switch (module->type) { 1768 case s32cc_osc_t: 1769 ret = get_osc_freq(module, drv, rate, ldepth); 1770 break; 1771 case s32cc_clk_t: 1772 ret = get_clk_freq(module, drv, rate, ldepth); 1773 break; 1774 case s32cc_pll_t: 1775 ret = get_pll_freq(module, drv, rate, ldepth); 1776 break; 1777 case s32cc_dfs_t: 1778 ret = get_dfs_freq(module, drv, rate, ldepth); 1779 break; 1780 case s32cc_dfs_div_t: 1781 ret = get_dfs_div_freq(module, drv, rate, ldepth); 1782 break; 1783 case s32cc_fixed_div_t: 1784 ret = get_fixed_div_freq(module, drv, rate, ldepth); 1785 break; 1786 case s32cc_pll_out_div_t: 1787 ret = get_pll_div_freq(module, drv, rate, ldepth); 1788 break; 1789 case s32cc_clkmux_t: 1790 ret = get_mux_freq(module, drv, rate, ldepth); 1791 break; 1792 case s32cc_shared_clkmux_t: 1793 ret = get_mux_freq(module, drv, rate, ldepth); 1794 break; 1795 case s32cc_part_t: 1796 ERROR("s32cc_part_t cannot be used to get rate\n"); 1797 break; 1798 case s32cc_part_block_t: 1799 ERROR("s32cc_part_block_t cannot be used to get rate\n"); 1800 break; 1801 case s32cc_part_block_link_t: 1802 ret = get_part_block_link_freq(module, drv, rate, ldepth); 1803 break; 1804 default: 1805 ret = -EINVAL; 1806 break; 1807 } 1808 1809 return ret; 1810 } 1811 1812 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 1813 unsigned long *orate) 1814 { 1815 unsigned int depth = MAX_STACK_DEPTH; 1816 const struct s32cc_clk *clk; 1817 int ret; 1818 1819 clk = s32cc_get_arch_clk(id); 1820 if (clk == NULL) { 1821 return -EINVAL; 1822 } 1823 1824 ret = set_module_rate(&clk->desc, rate, orate, &depth); 1825 if (ret != 0) { 1826 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 1827 rate, id); 1828 } 1829 1830 return ret; 1831 } 1832 1833 static unsigned long s32cc_clk_get_rate(unsigned long id) 1834 { 1835 const struct s32cc_clk_drv *drv = get_drv(); 1836 unsigned int depth = MAX_STACK_DEPTH; 1837 const struct s32cc_clk *clk; 1838 unsigned long rate = 0UL; 1839 int ret; 1840 1841 clk = s32cc_get_arch_clk(id); 1842 if (clk == NULL) { 1843 return 0; 1844 } 1845 1846 ret = get_module_rate(&clk->desc, drv, &rate, depth); 1847 if (ret != 0) { 1848 ERROR("Failed to get frequency (%lu MHz) for clock %lu\n", 1849 rate, id); 1850 return 0; 1851 } 1852 1853 return rate; 1854 } 1855 1856 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 1857 { 1858 return NULL; 1859 } 1860 1861 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 1862 1863 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 1864 { 1865 static const get_parent_clb_t parents_clbs[12] = { 1866 [s32cc_clk_t] = get_clk_parent, 1867 [s32cc_osc_t] = get_no_parent, 1868 [s32cc_pll_t] = get_pll_parent, 1869 [s32cc_pll_out_div_t] = get_pll_div_parent, 1870 [s32cc_clkmux_t] = get_mux_parent, 1871 [s32cc_shared_clkmux_t] = get_mux_parent, 1872 [s32cc_dfs_t] = get_dfs_parent, 1873 [s32cc_dfs_div_t] = get_dfs_div_parent, 1874 [s32cc_part_t] = get_no_parent, 1875 [s32cc_part_block_t] = get_part_block_parent, 1876 [s32cc_part_block_link_t] = get_part_block_link_parent, 1877 }; 1878 uint32_t index; 1879 1880 if (module == NULL) { 1881 return NULL; 1882 } 1883 1884 index = (uint32_t)module->type; 1885 1886 if (index >= ARRAY_SIZE(parents_clbs)) { 1887 ERROR("Undefined module type: %d\n", module->type); 1888 return NULL; 1889 } 1890 1891 if (parents_clbs[index] == NULL) { 1892 ERROR("Undefined parent getter for type: %d\n", module->type); 1893 return NULL; 1894 } 1895 1896 return parents_clbs[index](module); 1897 } 1898 1899 static int s32cc_clk_get_parent(unsigned long id) 1900 { 1901 struct s32cc_clk *parent_clk; 1902 const struct s32cc_clk_obj *parent; 1903 const struct s32cc_clk *clk; 1904 unsigned long parent_id; 1905 int ret; 1906 1907 clk = s32cc_get_arch_clk(id); 1908 if (clk == NULL) { 1909 return -EINVAL; 1910 } 1911 1912 parent = get_module_parent(clk->module); 1913 if (parent == NULL) { 1914 return -EINVAL; 1915 } 1916 1917 parent_clk = s32cc_obj2clk(parent); 1918 if (parent_clk == NULL) { 1919 return -EINVAL; 1920 } 1921 1922 ret = s32cc_get_clk_id(parent_clk, &parent_id); 1923 if (ret != 0) { 1924 return ret; 1925 } 1926 1927 if (parent_id > (unsigned long)INT_MAX) { 1928 return -E2BIG; 1929 } 1930 1931 return (int)parent_id; 1932 } 1933 1934 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 1935 { 1936 const struct s32cc_clk *parent; 1937 const struct s32cc_clk *clk; 1938 bool valid_source = false; 1939 struct s32cc_clkmux *mux; 1940 uint8_t i; 1941 1942 clk = s32cc_get_arch_clk(id); 1943 if (clk == NULL) { 1944 return -EINVAL; 1945 } 1946 1947 parent = s32cc_get_arch_clk(parent_id); 1948 if (parent == NULL) { 1949 return -EINVAL; 1950 } 1951 1952 if (!is_s32cc_clk_mux(clk)) { 1953 ERROR("Clock %lu is not a mux\n", id); 1954 return -EINVAL; 1955 } 1956 1957 mux = s32cc_clk2mux(clk); 1958 if (mux == NULL) { 1959 ERROR("Failed to cast clock %lu to clock mux\n", id); 1960 return -EINVAL; 1961 } 1962 1963 for (i = 0; i < mux->nclks; i++) { 1964 if (mux->clkids[i] == parent_id) { 1965 valid_source = true; 1966 break; 1967 } 1968 } 1969 1970 if (!valid_source) { 1971 ERROR("Clock %lu is not a valid clock for mux %lu\n", 1972 parent_id, id); 1973 return -EINVAL; 1974 } 1975 1976 mux->source_id = parent_id; 1977 1978 return 0; 1979 } 1980 1981 static int s32cc_clk_mmap_regs(const struct s32cc_clk_drv *drv) 1982 { 1983 const uintptr_t base_addrs[11] = { 1984 drv->fxosc_base, 1985 drv->armpll_base, 1986 drv->periphpll_base, 1987 drv->armdfs_base, 1988 drv->cgm0_base, 1989 drv->cgm1_base, 1990 drv->cgm5_base, 1991 drv->ddrpll_base, 1992 drv->mc_me, 1993 drv->mc_rgm, 1994 drv->rdc, 1995 }; 1996 size_t i; 1997 int ret; 1998 1999 for (i = 0U; i < ARRAY_SIZE(base_addrs); i++) { 2000 ret = mmap_add_dynamic_region(base_addrs[i], base_addrs[i], 2001 PAGE_SIZE, 2002 MT_DEVICE | MT_RW | MT_SECURE); 2003 if (ret != 0) { 2004 ERROR("Failed to map clock module 0x%" PRIuPTR "\n", 2005 base_addrs[i]); 2006 return ret; 2007 } 2008 } 2009 2010 return 0; 2011 } 2012 2013 int s32cc_clk_register_drv(bool mmap_regs) 2014 { 2015 static const struct clk_ops s32cc_clk_ops = { 2016 .enable = s32cc_clk_enable, 2017 .disable = s32cc_clk_disable, 2018 .is_enabled = s32cc_clk_is_enabled, 2019 .get_rate = s32cc_clk_get_rate, 2020 .set_rate = s32cc_clk_set_rate, 2021 .get_parent = s32cc_clk_get_parent, 2022 .set_parent = s32cc_clk_set_parent, 2023 }; 2024 const struct s32cc_clk_drv *drv; 2025 2026 clk_register(&s32cc_clk_ops); 2027 2028 drv = get_drv(); 2029 if (drv == NULL) { 2030 return -EINVAL; 2031 } 2032 2033 if (mmap_regs) { 2034 return s32cc_clk_mmap_regs(drv); 2035 } 2036 2037 return 0; 2038 } 2039 2040