1 /* 2 * Copyright 2024-2025 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 #include <common/debug.h> 8 #include <drivers/clk.h> 9 #include <lib/mmio.h> 10 #include <lib/xlat_tables/xlat_tables_v2.h> 11 #include <s32cc-clk-ids.h> 12 #include <s32cc-clk-modules.h> 13 #include <s32cc-clk-regs.h> 14 #include <s32cc-clk-utils.h> 15 #include <s32cc-mc-me.h> 16 17 #define MAX_STACK_DEPTH (40U) 18 19 /* This is used for floating-point precision calculations. */ 20 #define FP_PRECISION (100000000UL) 21 22 struct s32cc_clk_drv { 23 uintptr_t fxosc_base; 24 uintptr_t armpll_base; 25 uintptr_t periphpll_base; 26 uintptr_t armdfs_base; 27 uintptr_t cgm0_base; 28 uintptr_t cgm1_base; 29 uintptr_t cgm5_base; 30 uintptr_t ddrpll_base; 31 uintptr_t mc_me; 32 uintptr_t mc_rgm; 33 uintptr_t rdc; 34 }; 35 36 static int update_stack_depth(unsigned int *depth) 37 { 38 if (*depth == 0U) { 39 return -ENOMEM; 40 } 41 42 (*depth)--; 43 return 0; 44 } 45 46 static struct s32cc_clk_drv *get_drv(void) 47 { 48 static struct s32cc_clk_drv driver = { 49 .fxosc_base = FXOSC_BASE_ADDR, 50 .armpll_base = ARMPLL_BASE_ADDR, 51 .periphpll_base = PERIPHPLL_BASE_ADDR, 52 .armdfs_base = ARM_DFS_BASE_ADDR, 53 .cgm0_base = CGM0_BASE_ADDR, 54 .cgm1_base = CGM1_BASE_ADDR, 55 .cgm5_base = MC_CGM5_BASE_ADDR, 56 .ddrpll_base = DDRPLL_BASE_ADDR, 57 .mc_me = MC_ME_BASE_ADDR, 58 .mc_rgm = MC_RGM_BASE_ADDR, 59 .rdc = RDC_BASE_ADDR, 60 }; 61 62 return &driver; 63 } 64 65 static int enable_module(struct s32cc_clk_obj *module, 66 const struct s32cc_clk_drv *drv, 67 unsigned int depth); 68 69 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 70 { 71 const struct s32cc_clk *clk = s32cc_obj2clk(module); 72 73 if (clk->module != NULL) { 74 return clk->module; 75 } 76 77 if (clk->pclock != NULL) { 78 return &clk->pclock->desc; 79 } 80 81 return NULL; 82 } 83 84 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 85 uintptr_t *base) 86 { 87 int ret = 0; 88 89 switch (id) { 90 case S32CC_FXOSC: 91 *base = drv->fxosc_base; 92 break; 93 case S32CC_ARM_PLL: 94 *base = drv->armpll_base; 95 break; 96 case S32CC_PERIPH_PLL: 97 *base = drv->periphpll_base; 98 break; 99 case S32CC_DDR_PLL: 100 *base = drv->ddrpll_base; 101 break; 102 case S32CC_ARM_DFS: 103 *base = drv->armdfs_base; 104 break; 105 case S32CC_CGM0: 106 *base = drv->cgm0_base; 107 break; 108 case S32CC_CGM1: 109 *base = drv->cgm1_base; 110 break; 111 case S32CC_CGM5: 112 *base = drv->cgm5_base; 113 break; 114 case S32CC_FIRC: 115 break; 116 case S32CC_SIRC: 117 break; 118 default: 119 ret = -EINVAL; 120 break; 121 } 122 123 if (ret != 0) { 124 ERROR("Unknown clock source id: %u\n", id); 125 } 126 127 return ret; 128 } 129 130 static void enable_fxosc(const struct s32cc_clk_drv *drv) 131 { 132 uintptr_t fxosc_base = drv->fxosc_base; 133 uint32_t ctrl; 134 135 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 136 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 137 return; 138 } 139 140 ctrl = FXOSC_CTRL_COMP_EN; 141 ctrl &= ~FXOSC_CTRL_OSC_BYP; 142 ctrl |= FXOSC_CTRL_EOCV(0x1); 143 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 144 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 145 146 /* Switch ON the crystal oscillator. */ 147 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 148 149 /* Wait until the clock is stable. */ 150 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 151 } 152 } 153 154 static int enable_osc(struct s32cc_clk_obj *module, 155 const struct s32cc_clk_drv *drv, 156 unsigned int depth) 157 { 158 const struct s32cc_osc *osc = s32cc_obj2osc(module); 159 unsigned int ldepth = depth; 160 int ret = 0; 161 162 ret = update_stack_depth(&ldepth); 163 if (ret != 0) { 164 return ret; 165 } 166 167 switch (osc->source) { 168 case S32CC_FXOSC: 169 enable_fxosc(drv); 170 break; 171 /* FIRC and SIRC oscillators are enabled by default */ 172 case S32CC_FIRC: 173 break; 174 case S32CC_SIRC: 175 break; 176 default: 177 ERROR("Invalid oscillator %d\n", osc->source); 178 ret = -EINVAL; 179 break; 180 }; 181 182 return ret; 183 } 184 185 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 186 { 187 const struct s32cc_pll *pll = s32cc_obj2pll(module); 188 189 if (pll->source == NULL) { 190 ERROR("Failed to identify PLL's parent\n"); 191 } 192 193 return pll->source; 194 } 195 196 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 197 uint32_t *mfi, uint32_t *mfn) 198 199 { 200 unsigned long vco; 201 unsigned long mfn64; 202 203 /* FRAC-N mode */ 204 *mfi = (uint32_t)(pll_vco / ref_freq); 205 206 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 207 mfn64 = pll_vco % ref_freq; 208 mfn64 *= FP_PRECISION; 209 mfn64 /= ref_freq; 210 mfn64 *= 18432UL; 211 mfn64 /= FP_PRECISION; 212 213 if (mfn64 > UINT32_MAX) { 214 return -EINVAL; 215 } 216 217 *mfn = (uint32_t)mfn64; 218 219 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 220 vco += (unsigned long)*mfi * FP_PRECISION; 221 vco *= ref_freq; 222 vco /= FP_PRECISION; 223 224 if (vco != pll_vco) { 225 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 226 pll_vco, vco); 227 return -EINVAL; 228 } 229 230 return 0; 231 } 232 233 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 234 { 235 const struct s32cc_clk_obj *source = pll->source; 236 const struct s32cc_clk *clk; 237 238 if (source == NULL) { 239 ERROR("Failed to identify PLL's parent\n"); 240 return NULL; 241 } 242 243 if (source->type != s32cc_clk_t) { 244 ERROR("The parent of the PLL isn't a clock\n"); 245 return NULL; 246 } 247 248 clk = s32cc_obj2clk(source); 249 250 if (clk->module == NULL) { 251 ERROR("The clock isn't connected to a module\n"); 252 return NULL; 253 } 254 255 source = clk->module; 256 257 if ((source->type != s32cc_clkmux_t) && 258 (source->type != s32cc_shared_clkmux_t)) { 259 ERROR("The parent of the PLL isn't a MUX\n"); 260 return NULL; 261 } 262 263 return s32cc_obj2clkmux(source); 264 } 265 266 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 267 { 268 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 269 } 270 271 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 272 { 273 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 274 } 275 276 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 277 { 278 uint32_t i; 279 280 for (i = 0; i < ndivs; i++) { 281 disable_odiv(pll_addr, i); 282 } 283 } 284 285 static void enable_pll_hw(uintptr_t pll_addr) 286 { 287 /* Enable the PLL. */ 288 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 289 290 /* Poll until PLL acquires lock. */ 291 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 292 } 293 } 294 295 static void disable_pll_hw(uintptr_t pll_addr) 296 { 297 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 298 } 299 300 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 301 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 302 unsigned long sclk_freq) 303 { 304 uint32_t rdiv = 1, mfi, mfn; 305 int ret; 306 307 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 308 if (ret != 0) { 309 return -EINVAL; 310 } 311 312 /* Disable ODIVs*/ 313 disable_odivs(pll_addr, pll->ndividers); 314 315 /* Disable PLL */ 316 disable_pll_hw(pll_addr); 317 318 /* Program PLLCLKMUX */ 319 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 320 321 /* Program VCO */ 322 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 323 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 324 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 325 326 mmio_write_32(PLLDIG_PLLFD(pll_addr), 327 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 328 329 enable_pll_hw(pll_addr); 330 331 return ret; 332 } 333 334 static int enable_pll(struct s32cc_clk_obj *module, 335 const struct s32cc_clk_drv *drv, 336 unsigned int depth) 337 { 338 const struct s32cc_pll *pll = s32cc_obj2pll(module); 339 const struct s32cc_clkmux *mux; 340 uintptr_t pll_addr = UL(0x0); 341 unsigned int ldepth = depth; 342 unsigned long sclk_freq; 343 uint32_t sclk_id; 344 int ret; 345 346 ret = update_stack_depth(&ldepth); 347 if (ret != 0) { 348 return ret; 349 } 350 351 mux = get_pll_mux(pll); 352 if (mux == NULL) { 353 return -EINVAL; 354 } 355 356 if (pll->instance != mux->module) { 357 ERROR("MUX type is not in sync with PLL ID\n"); 358 return -EINVAL; 359 } 360 361 ret = get_base_addr(pll->instance, drv, &pll_addr); 362 if (ret != 0) { 363 ERROR("Failed to detect PLL instance\n"); 364 return ret; 365 } 366 367 switch (mux->source_id) { 368 case S32CC_CLK_FIRC: 369 sclk_freq = 48U * MHZ; 370 sclk_id = 0; 371 break; 372 case S32CC_CLK_FXOSC: 373 sclk_freq = 40U * MHZ; 374 sclk_id = 1; 375 break; 376 default: 377 ERROR("Invalid source selection for PLL 0x%lx\n", 378 pll_addr); 379 return -EINVAL; 380 }; 381 382 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq); 383 } 384 385 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 386 { 387 const struct s32cc_clk_obj *parent; 388 389 parent = pdiv->parent; 390 if (parent == NULL) { 391 ERROR("Failed to identify PLL divider's parent\n"); 392 return NULL; 393 } 394 395 if (parent->type != s32cc_pll_t) { 396 ERROR("The parent of the divider is not a PLL instance\n"); 397 return NULL; 398 } 399 400 return s32cc_obj2pll(parent); 401 } 402 403 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 404 { 405 uint32_t pllodiv; 406 uint32_t pdiv; 407 408 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 409 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 410 411 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 412 return; 413 } 414 415 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 416 disable_odiv(pll_addr, div_index); 417 } 418 419 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 420 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 421 422 enable_odiv(pll_addr, div_index); 423 } 424 425 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 426 { 427 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 428 429 if (pdiv->parent == NULL) { 430 ERROR("Failed to identify PLL DIV's parent\n"); 431 } 432 433 return pdiv->parent; 434 } 435 436 static int enable_pll_div(struct s32cc_clk_obj *module, 437 const struct s32cc_clk_drv *drv, 438 unsigned int depth) 439 { 440 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 441 uintptr_t pll_addr = 0x0ULL; 442 unsigned int ldepth = depth; 443 const struct s32cc_pll *pll; 444 uint32_t dc; 445 int ret; 446 447 ret = update_stack_depth(&ldepth); 448 if (ret != 0) { 449 return ret; 450 } 451 452 pll = get_div_pll(pdiv); 453 if (pll == NULL) { 454 ERROR("The parent of the PLL DIV is invalid\n"); 455 return 0; 456 } 457 458 ret = get_base_addr(pll->instance, drv, &pll_addr); 459 if (ret != 0) { 460 ERROR("Failed to detect PLL instance\n"); 461 return -EINVAL; 462 } 463 464 dc = (uint32_t)(pll->vco_freq / pdiv->freq); 465 466 config_pll_out_div(pll_addr, pdiv->index, dc); 467 468 return 0; 469 } 470 471 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 472 bool safe_clk) 473 { 474 uint32_t css, csc; 475 476 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 477 478 /* Already configured */ 479 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 480 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 481 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 482 return 0; 483 } 484 485 /* Ongoing clock switch? */ 486 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 487 MC_CGM_MUXn_CSS_SWIP) != 0U) { 488 } 489 490 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 491 492 /* Clear previous source. */ 493 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 494 495 if (!safe_clk) { 496 /* Select the clock source and trigger the clock switch. */ 497 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 498 } else { 499 /* Switch to safe clock */ 500 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 501 } 502 503 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 504 505 /* Wait for configuration bit to auto-clear. */ 506 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 507 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 508 } 509 510 /* Is the clock switch completed? */ 511 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 512 MC_CGM_MUXn_CSS_SWIP) != 0U) { 513 } 514 515 /* 516 * Check if the switch succeeded. 517 * Check switch trigger cause and the source. 518 */ 519 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 520 if (!safe_clk) { 521 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 522 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 523 return 0; 524 } 525 526 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 527 mux, source, cgm_addr); 528 } else { 529 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 530 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 531 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 532 return 0; 533 } 534 535 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 536 mux, cgm_addr); 537 } 538 539 return -EINVAL; 540 } 541 542 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 543 const struct s32cc_clk_drv *drv) 544 { 545 uintptr_t cgm_addr = UL(0x0); 546 uint32_t mux_hw_clk; 547 int ret; 548 549 ret = get_base_addr(mux->module, drv, &cgm_addr); 550 if (ret != 0) { 551 return ret; 552 } 553 554 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 555 556 return cgm_mux_clk_config(cgm_addr, mux->index, 557 mux_hw_clk, false); 558 } 559 560 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 561 { 562 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 563 struct s32cc_clk *clk; 564 565 if (mux == NULL) { 566 return NULL; 567 } 568 569 clk = s32cc_get_arch_clk(mux->source_id); 570 if (clk == NULL) { 571 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 572 mux->source_id, mux->index); 573 return NULL; 574 } 575 576 return &clk->desc; 577 } 578 579 static int enable_mux(struct s32cc_clk_obj *module, 580 const struct s32cc_clk_drv *drv, 581 unsigned int depth) 582 { 583 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 584 unsigned int ldepth = depth; 585 const struct s32cc_clk *clk; 586 int ret = 0; 587 588 ret = update_stack_depth(&ldepth); 589 if (ret != 0) { 590 return ret; 591 } 592 593 if (mux == NULL) { 594 return -EINVAL; 595 } 596 597 clk = s32cc_get_arch_clk(mux->source_id); 598 if (clk == NULL) { 599 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 600 mux->source_id, mux->index); 601 return -EINVAL; 602 } 603 604 switch (mux->module) { 605 /* PLL mux will be enabled by PLL setup */ 606 case S32CC_ARM_PLL: 607 case S32CC_PERIPH_PLL: 608 case S32CC_DDR_PLL: 609 break; 610 case S32CC_CGM1: 611 ret = enable_cgm_mux(mux, drv); 612 break; 613 case S32CC_CGM0: 614 ret = enable_cgm_mux(mux, drv); 615 break; 616 case S32CC_CGM5: 617 ret = enable_cgm_mux(mux, drv); 618 break; 619 default: 620 ERROR("Unknown mux parent type: %d\n", mux->module); 621 ret = -EINVAL; 622 break; 623 }; 624 625 return ret; 626 } 627 628 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 629 { 630 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 631 632 if (dfs->parent == NULL) { 633 ERROR("Failed to identify DFS's parent\n"); 634 } 635 636 return dfs->parent; 637 } 638 639 static int enable_dfs(struct s32cc_clk_obj *module, 640 const struct s32cc_clk_drv *drv, 641 unsigned int depth) 642 { 643 unsigned int ldepth = depth; 644 int ret = 0; 645 646 ret = update_stack_depth(&ldepth); 647 if (ret != 0) { 648 return ret; 649 } 650 651 return 0; 652 } 653 654 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 655 { 656 const struct s32cc_clk_obj *parent = dfs_div->parent; 657 658 if (parent->type != s32cc_dfs_t) { 659 ERROR("DFS DIV doesn't have a DFS as parent\n"); 660 return NULL; 661 } 662 663 return s32cc_obj2dfs(parent); 664 } 665 666 static struct s32cc_pll *dfsdiv2pll(const struct s32cc_dfs_div *dfs_div) 667 { 668 const struct s32cc_clk_obj *parent; 669 const struct s32cc_dfs *dfs; 670 671 dfs = get_div_dfs(dfs_div); 672 if (dfs == NULL) { 673 return NULL; 674 } 675 676 parent = dfs->parent; 677 if (parent->type != s32cc_pll_t) { 678 return NULL; 679 } 680 681 return s32cc_obj2pll(parent); 682 } 683 684 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 685 uint32_t *mfi, uint32_t *mfn) 686 { 687 uint64_t factor64, tmp64, ofreq; 688 uint32_t factor32; 689 690 unsigned long in = dfs_freq; 691 unsigned long out = dfs_div->freq; 692 693 /** 694 * factor = (IN / OUT) / 2 695 * MFI = integer(factor) 696 * MFN = (factor - MFI) * 36 697 */ 698 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 699 tmp64 = factor64 / FP_PRECISION; 700 if (tmp64 > UINT32_MAX) { 701 return -EINVAL; 702 } 703 704 factor32 = (uint32_t)tmp64; 705 *mfi = factor32; 706 707 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 708 if (tmp64 > UINT32_MAX) { 709 return -EINVAL; 710 } 711 712 *mfn = (uint32_t)tmp64; 713 714 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 715 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 716 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 717 factor64 *= 2ULL; 718 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 719 720 if (ofreq != dfs_div->freq) { 721 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 722 dfs_div->freq); 723 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 724 return -EINVAL; 725 } 726 727 return 0; 728 } 729 730 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 731 uint32_t mfi, uint32_t mfn) 732 { 733 uint32_t portsr, portolsr; 734 uint32_t mask, old_mfi, old_mfn; 735 uint32_t dvport; 736 bool init_dfs; 737 738 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 739 740 old_mfi = DFS_DVPORTn_MFI(dvport); 741 old_mfn = DFS_DVPORTn_MFN(dvport); 742 743 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 744 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 745 746 /* Skip configuration if it's not needed */ 747 if (((portsr & BIT_32(port)) != 0U) && 748 ((portolsr & BIT_32(port)) == 0U) && 749 (mfi == old_mfi) && (mfn == old_mfn)) { 750 return 0; 751 } 752 753 init_dfs = (portsr == 0U); 754 755 if (init_dfs) { 756 mask = DFS_PORTRESET_MASK; 757 } else { 758 mask = DFS_PORTRESET_SET(BIT_32(port)); 759 } 760 761 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 762 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 763 764 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 765 } 766 767 if (init_dfs) { 768 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 769 } 770 771 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 772 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 773 774 if (init_dfs) { 775 /* DFS clk enable programming */ 776 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 777 } 778 779 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 780 781 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 782 } 783 784 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 785 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 786 ERROR("Failed to lock DFS divider\n"); 787 return -EINVAL; 788 } 789 790 return 0; 791 } 792 793 static struct s32cc_clk_obj * 794 get_dfs_div_parent(const struct s32cc_clk_obj *module) 795 { 796 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 797 798 if (dfs_div->parent == NULL) { 799 ERROR("Failed to identify DFS divider's parent\n"); 800 } 801 802 return dfs_div->parent; 803 } 804 805 static int enable_dfs_div(struct s32cc_clk_obj *module, 806 const struct s32cc_clk_drv *drv, 807 unsigned int depth) 808 { 809 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 810 unsigned int ldepth = depth; 811 const struct s32cc_pll *pll; 812 const struct s32cc_dfs *dfs; 813 uintptr_t dfs_addr = 0UL; 814 uint32_t mfi, mfn; 815 int ret = 0; 816 817 ret = update_stack_depth(&ldepth); 818 if (ret != 0) { 819 return ret; 820 } 821 822 dfs = get_div_dfs(dfs_div); 823 if (dfs == NULL) { 824 return -EINVAL; 825 } 826 827 pll = dfsdiv2pll(dfs_div); 828 if (pll == NULL) { 829 ERROR("Failed to identify DFS divider's parent\n"); 830 return -EINVAL; 831 } 832 833 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 834 if ((ret != 0) || (dfs_addr == 0UL)) { 835 return -EINVAL; 836 } 837 838 ret = get_dfs_mfi_mfn(pll->vco_freq, dfs_div, &mfi, &mfn); 839 if (ret != 0) { 840 return -EINVAL; 841 } 842 843 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 844 } 845 846 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 847 const struct s32cc_clk_drv *drv, 848 unsigned int depth); 849 850 static int enable_part(struct s32cc_clk_obj *module, 851 const struct s32cc_clk_drv *drv, 852 unsigned int depth) 853 { 854 const struct s32cc_part *part = s32cc_obj2part(module); 855 uint32_t part_no = part->partition_id; 856 857 if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) { 858 return -EINVAL; 859 } 860 861 return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no); 862 } 863 864 static int enable_part_block(struct s32cc_clk_obj *module, 865 const struct s32cc_clk_drv *drv, 866 unsigned int depth) 867 { 868 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 869 const struct s32cc_part *part = block->part; 870 uint32_t part_no = part->partition_id; 871 unsigned int ldepth = depth; 872 uint32_t cofb; 873 int ret; 874 875 ret = update_stack_depth(&ldepth); 876 if (ret != 0) { 877 return ret; 878 } 879 880 if ((block->block >= s32cc_part_block0) && 881 (block->block <= s32cc_part_block15)) { 882 cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0; 883 mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status); 884 } else { 885 ERROR("Unknown partition block type: %d\n", block->block); 886 return -EINVAL; 887 } 888 889 return 0; 890 } 891 892 static struct s32cc_clk_obj * 893 get_part_block_parent(const struct s32cc_clk_obj *module) 894 { 895 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 896 897 return &block->part->desc; 898 } 899 900 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 901 const struct s32cc_clk_drv *drv, 902 unsigned int depth); 903 904 static int enable_part_block_link(struct s32cc_clk_obj *module, 905 const struct s32cc_clk_drv *drv, 906 unsigned int depth) 907 { 908 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 909 struct s32cc_part_block *block = link->block; 910 unsigned int ldepth = depth; 911 int ret; 912 913 ret = update_stack_depth(&ldepth); 914 if (ret != 0) { 915 return ret; 916 } 917 918 /* Move the enablement algorithm to partition tree */ 919 return enable_module_with_refcount(&block->desc, drv, ldepth); 920 } 921 922 static struct s32cc_clk_obj * 923 get_part_block_link_parent(const struct s32cc_clk_obj *module) 924 { 925 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 926 927 return link->parent; 928 } 929 930 static int no_enable(struct s32cc_clk_obj *module, 931 const struct s32cc_clk_drv *drv, 932 unsigned int depth) 933 { 934 return 0; 935 } 936 937 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 938 const struct s32cc_clk_drv *drv, bool leaf_node, 939 unsigned int depth) 940 { 941 unsigned int ldepth = depth; 942 int ret = 0; 943 944 if (mod == NULL) { 945 return 0; 946 } 947 948 ret = update_stack_depth(&ldepth); 949 if (ret != 0) { 950 return ret; 951 } 952 953 /* Refcount will be updated as part of the recursivity */ 954 if (leaf_node) { 955 return en_cb(mod, drv, ldepth); 956 } 957 958 if (mod->refcount == 0U) { 959 ret = en_cb(mod, drv, ldepth); 960 } 961 962 if (ret == 0) { 963 mod->refcount++; 964 } 965 966 return ret; 967 } 968 969 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 970 971 static int enable_module(struct s32cc_clk_obj *module, 972 const struct s32cc_clk_drv *drv, 973 unsigned int depth) 974 { 975 struct s32cc_clk_obj *parent = get_module_parent(module); 976 static const enable_clk_t enable_clbs[12] = { 977 [s32cc_clk_t] = no_enable, 978 [s32cc_osc_t] = enable_osc, 979 [s32cc_pll_t] = enable_pll, 980 [s32cc_pll_out_div_t] = enable_pll_div, 981 [s32cc_clkmux_t] = enable_mux, 982 [s32cc_shared_clkmux_t] = enable_mux, 983 [s32cc_dfs_t] = enable_dfs, 984 [s32cc_dfs_div_t] = enable_dfs_div, 985 [s32cc_part_t] = enable_part, 986 [s32cc_part_block_t] = enable_part_block, 987 [s32cc_part_block_link_t] = enable_part_block_link, 988 }; 989 unsigned int ldepth = depth; 990 uint32_t index; 991 int ret = 0; 992 993 ret = update_stack_depth(&ldepth); 994 if (ret != 0) { 995 return ret; 996 } 997 998 if (drv == NULL) { 999 return -EINVAL; 1000 } 1001 1002 index = (uint32_t)module->type; 1003 1004 if (index >= ARRAY_SIZE(enable_clbs)) { 1005 ERROR("Undefined module type: %d\n", module->type); 1006 return -EINVAL; 1007 } 1008 1009 if (enable_clbs[index] == NULL) { 1010 ERROR("Undefined callback for the clock type: %d\n", 1011 module->type); 1012 return -EINVAL; 1013 } 1014 1015 parent = get_module_parent(module); 1016 1017 ret = exec_cb_with_refcount(enable_module, parent, drv, 1018 false, ldepth); 1019 if (ret != 0) { 1020 return ret; 1021 } 1022 1023 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 1024 true, ldepth); 1025 if (ret != 0) { 1026 return ret; 1027 } 1028 1029 return ret; 1030 } 1031 1032 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1033 const struct s32cc_clk_drv *drv, 1034 unsigned int depth) 1035 { 1036 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 1037 } 1038 1039 static int s32cc_clk_enable(unsigned long id) 1040 { 1041 const struct s32cc_clk_drv *drv = get_drv(); 1042 unsigned int depth = MAX_STACK_DEPTH; 1043 struct s32cc_clk *clk; 1044 1045 clk = s32cc_get_arch_clk(id); 1046 if (clk == NULL) { 1047 return -EINVAL; 1048 } 1049 1050 return enable_module_with_refcount(&clk->desc, drv, depth); 1051 } 1052 1053 static void s32cc_clk_disable(unsigned long id) 1054 { 1055 } 1056 1057 static bool s32cc_clk_is_enabled(unsigned long id) 1058 { 1059 return false; 1060 } 1061 1062 static int set_module_rate(const struct s32cc_clk_obj *module, 1063 unsigned long rate, unsigned long *orate, 1064 unsigned int *depth); 1065 static int get_module_rate(const struct s32cc_clk_obj *module, 1066 const struct s32cc_clk_drv *drv, 1067 unsigned long *rate, 1068 unsigned int depth); 1069 1070 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1071 unsigned long *orate, unsigned int *depth) 1072 { 1073 struct s32cc_osc *osc = s32cc_obj2osc(module); 1074 int ret; 1075 1076 ret = update_stack_depth(depth); 1077 if (ret != 0) { 1078 return ret; 1079 } 1080 1081 if ((osc->freq != 0UL) && (rate != osc->freq)) { 1082 ERROR("Already initialized oscillator. freq = %lu\n", 1083 osc->freq); 1084 return -EINVAL; 1085 } 1086 1087 osc->freq = rate; 1088 *orate = osc->freq; 1089 1090 return 0; 1091 } 1092 1093 static int get_osc_freq(const struct s32cc_clk_obj *module, 1094 const struct s32cc_clk_drv *drv, 1095 unsigned long *rate, unsigned int depth) 1096 { 1097 const struct s32cc_osc *osc = s32cc_obj2osc(module); 1098 unsigned int ldepth = depth; 1099 int ret; 1100 1101 ret = update_stack_depth(&ldepth); 1102 if (ret != 0) { 1103 return ret; 1104 } 1105 1106 if (osc->freq == 0UL) { 1107 ERROR("Uninitialized oscillator\n"); 1108 return -EINVAL; 1109 } 1110 1111 *rate = osc->freq; 1112 1113 return 0; 1114 } 1115 1116 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1117 unsigned long *orate, unsigned int *depth) 1118 { 1119 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1120 int ret; 1121 1122 ret = update_stack_depth(depth); 1123 if (ret != 0) { 1124 return ret; 1125 } 1126 1127 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1128 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1129 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1130 rate, clk->min_freq, clk->max_freq); 1131 return -EINVAL; 1132 } 1133 1134 if (clk->module != NULL) { 1135 return set_module_rate(clk->module, rate, orate, depth); 1136 } 1137 1138 if (clk->pclock != NULL) { 1139 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1140 } 1141 1142 return -EINVAL; 1143 } 1144 1145 static int get_clk_freq(const struct s32cc_clk_obj *module, 1146 const struct s32cc_clk_drv *drv, unsigned long *rate, 1147 unsigned int depth) 1148 { 1149 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1150 unsigned int ldepth = depth; 1151 int ret; 1152 1153 ret = update_stack_depth(&ldepth); 1154 if (ret != 0) { 1155 return ret; 1156 } 1157 1158 if (clk == NULL) { 1159 ERROR("Invalid clock\n"); 1160 return -EINVAL; 1161 } 1162 1163 if (clk->module != NULL) { 1164 return get_module_rate(clk->module, drv, rate, ldepth); 1165 } 1166 1167 if (clk->pclock == NULL) { 1168 ERROR("Invalid clock parent\n"); 1169 return -EINVAL; 1170 } 1171 1172 return get_clk_freq(&clk->pclock->desc, drv, rate, ldepth); 1173 } 1174 1175 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1176 unsigned long *orate, unsigned int *depth) 1177 { 1178 struct s32cc_pll *pll = s32cc_obj2pll(module); 1179 int ret; 1180 1181 ret = update_stack_depth(depth); 1182 if (ret != 0) { 1183 return ret; 1184 } 1185 1186 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1187 ERROR("PLL frequency was already set\n"); 1188 return -EINVAL; 1189 } 1190 1191 pll->vco_freq = rate; 1192 *orate = pll->vco_freq; 1193 1194 return 0; 1195 } 1196 1197 static int get_pll_freq(const struct s32cc_clk_obj *module, 1198 const struct s32cc_clk_drv *drv, 1199 unsigned long *rate, unsigned int depth) 1200 { 1201 const struct s32cc_pll *pll = s32cc_obj2pll(module); 1202 const struct s32cc_clk *source; 1203 uint32_t mfi, mfn, rdiv, plldv; 1204 unsigned long prate, clk_src; 1205 unsigned int ldepth = depth; 1206 uintptr_t pll_addr = 0UL; 1207 uint64_t t1, t2; 1208 uint32_t pllpd; 1209 int ret; 1210 1211 ret = update_stack_depth(&ldepth); 1212 if (ret != 0) { 1213 return ret; 1214 } 1215 1216 ret = get_base_addr(pll->instance, drv, &pll_addr); 1217 if (ret != 0) { 1218 ERROR("Failed to detect PLL instance\n"); 1219 return ret; 1220 } 1221 1222 /* Disabled PLL */ 1223 pllpd = mmio_read_32(PLLDIG_PLLCR(pll_addr)) & PLLDIG_PLLCR_PLLPD; 1224 if (pllpd != 0U) { 1225 *rate = pll->vco_freq; 1226 return 0; 1227 } 1228 1229 clk_src = mmio_read_32(PLLDIG_PLLCLKMUX(pll_addr)); 1230 switch (clk_src) { 1231 case 0: 1232 clk_src = S32CC_CLK_FIRC; 1233 break; 1234 case 1: 1235 clk_src = S32CC_CLK_FXOSC; 1236 break; 1237 default: 1238 ERROR("Failed to identify PLL source id %" PRIu64 "\n", clk_src); 1239 return -EINVAL; 1240 }; 1241 1242 source = s32cc_get_arch_clk(clk_src); 1243 if (source == NULL) { 1244 ERROR("Failed to get PLL source clock\n"); 1245 return -EINVAL; 1246 } 1247 1248 ret = get_module_rate(&source->desc, drv, &prate, ldepth); 1249 if (ret != 0) { 1250 ERROR("Failed to get PLL's parent frequency\n"); 1251 return ret; 1252 } 1253 1254 plldv = mmio_read_32(PLLDIG_PLLDV(pll_addr)); 1255 mfi = PLLDIG_PLLDV_MFI(plldv); 1256 rdiv = PLLDIG_PLLDV_RDIV(plldv); 1257 if (rdiv == 0U) { 1258 rdiv = 1; 1259 } 1260 1261 /* Frac-N mode */ 1262 mfn = PLLDIG_PLLFD_MFN_SET(mmio_read_32(PLLDIG_PLLFD(pll_addr))); 1263 1264 /* PLL VCO frequency in Fractional mode when PLLDV[RDIV] is not 0 */ 1265 t1 = prate / rdiv; 1266 t2 = (mfi * FP_PRECISION) + (mfn * FP_PRECISION / 18432U); 1267 1268 *rate = t1 * t2 / FP_PRECISION; 1269 1270 return 0; 1271 } 1272 1273 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1274 unsigned long *orate, unsigned int *depth) 1275 { 1276 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1277 const struct s32cc_pll *pll; 1278 unsigned long prate, dc; 1279 int ret; 1280 1281 ret = update_stack_depth(depth); 1282 if (ret != 0) { 1283 return ret; 1284 } 1285 1286 if (pdiv->parent == NULL) { 1287 ERROR("Failed to identify PLL divider's parent\n"); 1288 return -EINVAL; 1289 } 1290 1291 pll = s32cc_obj2pll(pdiv->parent); 1292 if (pll == NULL) { 1293 ERROR("The parent of the PLL DIV is invalid\n"); 1294 return -EINVAL; 1295 } 1296 1297 prate = pll->vco_freq; 1298 1299 /** 1300 * The PLL is not initialized yet, so let's take a risk 1301 * and accept the proposed rate. 1302 */ 1303 if (prate == 0UL) { 1304 pdiv->freq = rate; 1305 *orate = rate; 1306 return 0; 1307 } 1308 1309 /* Decline in case the rate cannot fit PLL's requirements. */ 1310 dc = prate / rate; 1311 if ((prate / dc) != rate) { 1312 return -EINVAL; 1313 } 1314 1315 pdiv->freq = rate; 1316 *orate = pdiv->freq; 1317 1318 return 0; 1319 } 1320 1321 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1322 unsigned long *orate, unsigned int *depth) 1323 { 1324 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1325 int ret; 1326 1327 ret = update_stack_depth(depth); 1328 if (ret != 0) { 1329 return ret; 1330 } 1331 1332 if (fdiv->parent == NULL) { 1333 ERROR("The divider doesn't have a valid parent\b"); 1334 return -EINVAL; 1335 } 1336 1337 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1338 1339 /* Update the output rate based on the parent's rate */ 1340 *orate /= fdiv->rate_div; 1341 1342 return ret; 1343 } 1344 1345 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1346 unsigned long *orate, unsigned int *depth) 1347 { 1348 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1349 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1350 int ret; 1351 1352 ret = update_stack_depth(depth); 1353 if (ret != 0) { 1354 return ret; 1355 } 1356 1357 if (clk == NULL) { 1358 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1359 mux->index, mux->source_id); 1360 return -EINVAL; 1361 } 1362 1363 return set_module_rate(&clk->desc, rate, orate, depth); 1364 } 1365 1366 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1367 unsigned long *orate, unsigned int *depth) 1368 { 1369 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1370 const struct s32cc_dfs *dfs; 1371 int ret; 1372 1373 ret = update_stack_depth(depth); 1374 if (ret != 0) { 1375 return ret; 1376 } 1377 1378 if (dfs_div->parent == NULL) { 1379 ERROR("Failed to identify DFS divider's parent\n"); 1380 return -EINVAL; 1381 } 1382 1383 /* Sanity check */ 1384 dfs = s32cc_obj2dfs(dfs_div->parent); 1385 if (dfs->parent == NULL) { 1386 ERROR("Failed to identify DFS's parent\n"); 1387 return -EINVAL; 1388 } 1389 1390 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1391 ERROR("DFS DIV frequency was already set to %lu\n", 1392 dfs_div->freq); 1393 return -EINVAL; 1394 } 1395 1396 dfs_div->freq = rate; 1397 *orate = rate; 1398 1399 return ret; 1400 } 1401 1402 static int set_module_rate(const struct s32cc_clk_obj *module, 1403 unsigned long rate, unsigned long *orate, 1404 unsigned int *depth) 1405 { 1406 int ret = 0; 1407 1408 ret = update_stack_depth(depth); 1409 if (ret != 0) { 1410 return ret; 1411 } 1412 1413 ret = -EINVAL; 1414 1415 switch (module->type) { 1416 case s32cc_clk_t: 1417 ret = set_clk_freq(module, rate, orate, depth); 1418 break; 1419 case s32cc_osc_t: 1420 ret = set_osc_freq(module, rate, orate, depth); 1421 break; 1422 case s32cc_pll_t: 1423 ret = set_pll_freq(module, rate, orate, depth); 1424 break; 1425 case s32cc_pll_out_div_t: 1426 ret = set_pll_div_freq(module, rate, orate, depth); 1427 break; 1428 case s32cc_fixed_div_t: 1429 ret = set_fixed_div_freq(module, rate, orate, depth); 1430 break; 1431 case s32cc_clkmux_t: 1432 ret = set_mux_freq(module, rate, orate, depth); 1433 break; 1434 case s32cc_shared_clkmux_t: 1435 ret = set_mux_freq(module, rate, orate, depth); 1436 break; 1437 case s32cc_dfs_t: 1438 ERROR("Setting the frequency of a DFS is not allowed!"); 1439 break; 1440 case s32cc_dfs_div_t: 1441 ret = set_dfs_div_freq(module, rate, orate, depth); 1442 break; 1443 default: 1444 break; 1445 } 1446 1447 return ret; 1448 } 1449 1450 static int get_module_rate(const struct s32cc_clk_obj *module, 1451 const struct s32cc_clk_drv *drv, 1452 unsigned long *rate, 1453 unsigned int depth) 1454 { 1455 unsigned int ldepth = depth; 1456 int ret = 0; 1457 1458 ret = update_stack_depth(&ldepth); 1459 if (ret != 0) { 1460 return ret; 1461 } 1462 1463 switch (module->type) { 1464 case s32cc_osc_t: 1465 ret = get_osc_freq(module, drv, rate, ldepth); 1466 break; 1467 case s32cc_clk_t: 1468 ret = get_clk_freq(module, drv, rate, ldepth); 1469 break; 1470 case s32cc_pll_t: 1471 ret = get_pll_freq(module, drv, rate, ldepth); 1472 break; 1473 default: 1474 ret = -EINVAL; 1475 break; 1476 } 1477 1478 return ret; 1479 } 1480 1481 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 1482 unsigned long *orate) 1483 { 1484 unsigned int depth = MAX_STACK_DEPTH; 1485 const struct s32cc_clk *clk; 1486 int ret; 1487 1488 clk = s32cc_get_arch_clk(id); 1489 if (clk == NULL) { 1490 return -EINVAL; 1491 } 1492 1493 ret = set_module_rate(&clk->desc, rate, orate, &depth); 1494 if (ret != 0) { 1495 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 1496 rate, id); 1497 } 1498 1499 return ret; 1500 } 1501 1502 static unsigned long s32cc_clk_get_rate(unsigned long id) 1503 { 1504 const struct s32cc_clk_drv *drv = get_drv(); 1505 unsigned int depth = MAX_STACK_DEPTH; 1506 const struct s32cc_clk *clk; 1507 unsigned long rate = 0UL; 1508 int ret; 1509 1510 clk = s32cc_get_arch_clk(id); 1511 if (clk == NULL) { 1512 return 0; 1513 } 1514 1515 ret = get_module_rate(&clk->desc, drv, &rate, depth); 1516 if (ret != 0) { 1517 ERROR("Failed to get frequency (%lu MHz) for clock %lu\n", 1518 rate, id); 1519 return 0; 1520 } 1521 1522 return rate; 1523 } 1524 1525 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 1526 { 1527 return NULL; 1528 } 1529 1530 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 1531 1532 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 1533 { 1534 static const get_parent_clb_t parents_clbs[12] = { 1535 [s32cc_clk_t] = get_clk_parent, 1536 [s32cc_osc_t] = get_no_parent, 1537 [s32cc_pll_t] = get_pll_parent, 1538 [s32cc_pll_out_div_t] = get_pll_div_parent, 1539 [s32cc_clkmux_t] = get_mux_parent, 1540 [s32cc_shared_clkmux_t] = get_mux_parent, 1541 [s32cc_dfs_t] = get_dfs_parent, 1542 [s32cc_dfs_div_t] = get_dfs_div_parent, 1543 [s32cc_part_t] = get_no_parent, 1544 [s32cc_part_block_t] = get_part_block_parent, 1545 [s32cc_part_block_link_t] = get_part_block_link_parent, 1546 }; 1547 uint32_t index; 1548 1549 if (module == NULL) { 1550 return NULL; 1551 } 1552 1553 index = (uint32_t)module->type; 1554 1555 if (index >= ARRAY_SIZE(parents_clbs)) { 1556 ERROR("Undefined module type: %d\n", module->type); 1557 return NULL; 1558 } 1559 1560 if (parents_clbs[index] == NULL) { 1561 ERROR("Undefined parent getter for type: %d\n", module->type); 1562 return NULL; 1563 } 1564 1565 return parents_clbs[index](module); 1566 } 1567 1568 static int s32cc_clk_get_parent(unsigned long id) 1569 { 1570 struct s32cc_clk *parent_clk; 1571 const struct s32cc_clk_obj *parent; 1572 const struct s32cc_clk *clk; 1573 unsigned long parent_id; 1574 int ret; 1575 1576 clk = s32cc_get_arch_clk(id); 1577 if (clk == NULL) { 1578 return -EINVAL; 1579 } 1580 1581 parent = get_module_parent(clk->module); 1582 if (parent == NULL) { 1583 return -EINVAL; 1584 } 1585 1586 parent_clk = s32cc_obj2clk(parent); 1587 if (parent_clk == NULL) { 1588 return -EINVAL; 1589 } 1590 1591 ret = s32cc_get_clk_id(parent_clk, &parent_id); 1592 if (ret != 0) { 1593 return ret; 1594 } 1595 1596 if (parent_id > (unsigned long)INT_MAX) { 1597 return -E2BIG; 1598 } 1599 1600 return (int)parent_id; 1601 } 1602 1603 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 1604 { 1605 const struct s32cc_clk *parent; 1606 const struct s32cc_clk *clk; 1607 bool valid_source = false; 1608 struct s32cc_clkmux *mux; 1609 uint8_t i; 1610 1611 clk = s32cc_get_arch_clk(id); 1612 if (clk == NULL) { 1613 return -EINVAL; 1614 } 1615 1616 parent = s32cc_get_arch_clk(parent_id); 1617 if (parent == NULL) { 1618 return -EINVAL; 1619 } 1620 1621 if (!is_s32cc_clk_mux(clk)) { 1622 ERROR("Clock %lu is not a mux\n", id); 1623 return -EINVAL; 1624 } 1625 1626 mux = s32cc_clk2mux(clk); 1627 if (mux == NULL) { 1628 ERROR("Failed to cast clock %lu to clock mux\n", id); 1629 return -EINVAL; 1630 } 1631 1632 for (i = 0; i < mux->nclks; i++) { 1633 if (mux->clkids[i] == parent_id) { 1634 valid_source = true; 1635 break; 1636 } 1637 } 1638 1639 if (!valid_source) { 1640 ERROR("Clock %lu is not a valid clock for mux %lu\n", 1641 parent_id, id); 1642 return -EINVAL; 1643 } 1644 1645 mux->source_id = parent_id; 1646 1647 return 0; 1648 } 1649 1650 static int s32cc_clk_mmap_regs(const struct s32cc_clk_drv *drv) 1651 { 1652 const uintptr_t base_addrs[11] = { 1653 drv->fxosc_base, 1654 drv->armpll_base, 1655 drv->periphpll_base, 1656 drv->armdfs_base, 1657 drv->cgm0_base, 1658 drv->cgm1_base, 1659 drv->cgm5_base, 1660 drv->ddrpll_base, 1661 drv->mc_me, 1662 drv->mc_rgm, 1663 drv->rdc, 1664 }; 1665 size_t i; 1666 int ret; 1667 1668 for (i = 0U; i < ARRAY_SIZE(base_addrs); i++) { 1669 ret = mmap_add_dynamic_region(base_addrs[i], base_addrs[i], 1670 PAGE_SIZE, 1671 MT_DEVICE | MT_RW | MT_SECURE); 1672 if (ret != 0) { 1673 ERROR("Failed to map clock module 0x%" PRIuPTR "\n", 1674 base_addrs[i]); 1675 return ret; 1676 } 1677 } 1678 1679 return 0; 1680 } 1681 1682 int s32cc_clk_register_drv(bool mmap_regs) 1683 { 1684 static const struct clk_ops s32cc_clk_ops = { 1685 .enable = s32cc_clk_enable, 1686 .disable = s32cc_clk_disable, 1687 .is_enabled = s32cc_clk_is_enabled, 1688 .get_rate = s32cc_clk_get_rate, 1689 .set_rate = s32cc_clk_set_rate, 1690 .get_parent = s32cc_clk_get_parent, 1691 .set_parent = s32cc_clk_set_parent, 1692 }; 1693 const struct s32cc_clk_drv *drv; 1694 1695 clk_register(&s32cc_clk_ops); 1696 1697 drv = get_drv(); 1698 if (drv == NULL) { 1699 return -EINVAL; 1700 } 1701 1702 if (mmap_regs) { 1703 return s32cc_clk_mmap_regs(drv); 1704 } 1705 1706 return 0; 1707 } 1708 1709