1 /* 2 * Copyright 2024-2025 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 #include <common/debug.h> 8 #include <drivers/clk.h> 9 #include <lib/mmio.h> 10 #include <lib/xlat_tables/xlat_tables_v2.h> 11 #include <s32cc-clk-ids.h> 12 #include <s32cc-clk-modules.h> 13 #include <s32cc-clk-regs.h> 14 #include <s32cc-clk-utils.h> 15 #include <s32cc-mc-me.h> 16 17 #define MAX_STACK_DEPTH (40U) 18 19 /* This is used for floating-point precision calculations. */ 20 #define FP_PRECISION (100000000UL) 21 22 struct s32cc_clk_drv { 23 uintptr_t fxosc_base; 24 uintptr_t armpll_base; 25 uintptr_t periphpll_base; 26 uintptr_t armdfs_base; 27 uintptr_t periphdfs_base; 28 uintptr_t cgm0_base; 29 uintptr_t cgm1_base; 30 uintptr_t cgm5_base; 31 uintptr_t ddrpll_base; 32 uintptr_t mc_me; 33 uintptr_t mc_rgm; 34 uintptr_t rdc; 35 }; 36 37 static int set_module_rate(const struct s32cc_clk_obj *module, 38 unsigned long rate, unsigned long *orate, 39 unsigned int *depth); 40 static int get_module_rate(const struct s32cc_clk_obj *module, 41 const struct s32cc_clk_drv *drv, 42 unsigned long *rate, 43 unsigned int depth); 44 45 static int update_stack_depth(unsigned int *depth) 46 { 47 if (*depth == 0U) { 48 return -ENOMEM; 49 } 50 51 (*depth)--; 52 return 0; 53 } 54 55 static struct s32cc_clk_drv *get_drv(void) 56 { 57 static struct s32cc_clk_drv driver = { 58 .fxosc_base = FXOSC_BASE_ADDR, 59 .armpll_base = ARMPLL_BASE_ADDR, 60 .periphpll_base = PERIPHPLL_BASE_ADDR, 61 .armdfs_base = ARM_DFS_BASE_ADDR, 62 .periphdfs_base = PERIPH_DFS_BASE_ADDR, 63 .cgm0_base = CGM0_BASE_ADDR, 64 .cgm1_base = CGM1_BASE_ADDR, 65 .cgm5_base = MC_CGM5_BASE_ADDR, 66 .ddrpll_base = DDRPLL_BASE_ADDR, 67 .mc_me = MC_ME_BASE_ADDR, 68 .mc_rgm = MC_RGM_BASE_ADDR, 69 .rdc = RDC_BASE_ADDR, 70 }; 71 72 return &driver; 73 } 74 75 static int enable_module(struct s32cc_clk_obj *module, 76 const struct s32cc_clk_drv *drv, 77 unsigned int depth); 78 79 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 80 { 81 const struct s32cc_clk *clk = s32cc_obj2clk(module); 82 83 if (clk->module != NULL) { 84 return clk->module; 85 } 86 87 if (clk->pclock != NULL) { 88 return &clk->pclock->desc; 89 } 90 91 return NULL; 92 } 93 94 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 95 uintptr_t *base) 96 { 97 int ret = 0; 98 99 switch (id) { 100 case S32CC_FXOSC: 101 *base = drv->fxosc_base; 102 break; 103 case S32CC_ARM_PLL: 104 *base = drv->armpll_base; 105 break; 106 case S32CC_PERIPH_PLL: 107 *base = drv->periphpll_base; 108 break; 109 case S32CC_DDR_PLL: 110 *base = drv->ddrpll_base; 111 break; 112 case S32CC_ARM_DFS: 113 *base = drv->armdfs_base; 114 break; 115 case S32CC_PERIPH_DFS: 116 *base = drv->periphdfs_base; 117 break; 118 case S32CC_CGM0: 119 *base = drv->cgm0_base; 120 break; 121 case S32CC_CGM1: 122 *base = drv->cgm1_base; 123 break; 124 case S32CC_CGM5: 125 *base = drv->cgm5_base; 126 break; 127 case S32CC_FIRC: 128 break; 129 case S32CC_SIRC: 130 break; 131 default: 132 ret = -EINVAL; 133 break; 134 } 135 136 if (ret != 0) { 137 ERROR("Unknown clock source id: %u\n", id); 138 } 139 140 return ret; 141 } 142 143 static void enable_fxosc(const struct s32cc_clk_drv *drv) 144 { 145 uintptr_t fxosc_base = drv->fxosc_base; 146 uint32_t ctrl; 147 148 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 149 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 150 return; 151 } 152 153 ctrl = FXOSC_CTRL_COMP_EN; 154 ctrl &= ~FXOSC_CTRL_OSC_BYP; 155 ctrl |= FXOSC_CTRL_EOCV(0x1); 156 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 157 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 158 159 /* Switch ON the crystal oscillator. */ 160 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 161 162 /* Wait until the clock is stable. */ 163 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 164 } 165 } 166 167 static int enable_osc(struct s32cc_clk_obj *module, 168 const struct s32cc_clk_drv *drv, 169 unsigned int depth) 170 { 171 const struct s32cc_osc *osc = s32cc_obj2osc(module); 172 unsigned int ldepth = depth; 173 int ret = 0; 174 175 ret = update_stack_depth(&ldepth); 176 if (ret != 0) { 177 return ret; 178 } 179 180 switch (osc->source) { 181 case S32CC_FXOSC: 182 enable_fxosc(drv); 183 break; 184 /* FIRC and SIRC oscillators are enabled by default */ 185 case S32CC_FIRC: 186 break; 187 case S32CC_SIRC: 188 break; 189 default: 190 ERROR("Invalid oscillator %d\n", osc->source); 191 ret = -EINVAL; 192 break; 193 }; 194 195 return ret; 196 } 197 198 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 199 { 200 const struct s32cc_pll *pll = s32cc_obj2pll(module); 201 202 if (pll->source == NULL) { 203 ERROR("Failed to identify PLL's parent\n"); 204 } 205 206 return pll->source; 207 } 208 209 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 210 uint32_t *mfi, uint32_t *mfn) 211 212 { 213 unsigned long vco; 214 unsigned long mfn64; 215 216 /* FRAC-N mode */ 217 *mfi = (uint32_t)(pll_vco / ref_freq); 218 219 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 220 mfn64 = pll_vco % ref_freq; 221 mfn64 *= FP_PRECISION; 222 mfn64 /= ref_freq; 223 mfn64 *= 18432UL; 224 mfn64 /= FP_PRECISION; 225 226 if (mfn64 > UINT32_MAX) { 227 return -EINVAL; 228 } 229 230 *mfn = (uint32_t)mfn64; 231 232 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 233 vco += (unsigned long)*mfi * FP_PRECISION; 234 vco *= ref_freq; 235 vco /= FP_PRECISION; 236 237 if (vco != pll_vco) { 238 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 239 pll_vco, vco); 240 return -EINVAL; 241 } 242 243 return 0; 244 } 245 246 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 247 { 248 const struct s32cc_clk_obj *source = pll->source; 249 const struct s32cc_clk *clk; 250 251 if (source == NULL) { 252 ERROR("Failed to identify PLL's parent\n"); 253 return NULL; 254 } 255 256 if (source->type != s32cc_clk_t) { 257 ERROR("The parent of the PLL isn't a clock\n"); 258 return NULL; 259 } 260 261 clk = s32cc_obj2clk(source); 262 263 if (clk->module == NULL) { 264 ERROR("The clock isn't connected to a module\n"); 265 return NULL; 266 } 267 268 source = clk->module; 269 270 if ((source->type != s32cc_clkmux_t) && 271 (source->type != s32cc_shared_clkmux_t)) { 272 ERROR("The parent of the PLL isn't a MUX\n"); 273 return NULL; 274 } 275 276 return s32cc_obj2clkmux(source); 277 } 278 279 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 280 { 281 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 282 } 283 284 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 285 { 286 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 287 } 288 289 static void enable_odivs(uintptr_t pll_addr, uint32_t ndivs, uint32_t mask) 290 { 291 uint32_t i; 292 293 for (i = 0; i < ndivs; i++) { 294 if ((mask & BIT_32(i)) != 0U) { 295 enable_odiv(pll_addr, i); 296 } 297 } 298 } 299 300 static int adjust_odiv_settings(const struct s32cc_pll *pll, uintptr_t pll_addr, 301 uint32_t odivs_mask, unsigned long old_vco) 302 { 303 uint64_t old_odiv_freq, odiv_freq; 304 uint32_t i, pllodiv, pdiv; 305 int ret = 0; 306 307 if (old_vco == 0UL) { 308 return 0; 309 } 310 311 for (i = 0; i < pll->ndividers; i++) { 312 if ((odivs_mask & BIT_32(i)) == 0U) { 313 continue; 314 } 315 316 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, i)); 317 318 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 319 320 old_odiv_freq = ((old_vco * FP_PRECISION) / (pdiv + 1U)) / FP_PRECISION; 321 pdiv = (uint32_t)(pll->vco_freq * FP_PRECISION / old_odiv_freq / FP_PRECISION); 322 323 odiv_freq = pll->vco_freq * FP_PRECISION / pdiv / FP_PRECISION; 324 325 if (old_odiv_freq != odiv_freq) { 326 ERROR("Failed to adjust ODIV %" PRIu32 " to match previous frequency\n", 327 i); 328 } 329 330 pllodiv = PLLDIG_PLLODIV_DIV_SET(pdiv - 1U); 331 mmio_write_32(PLLDIG_PLLODIV(pll_addr, i), pllodiv); 332 } 333 334 return ret; 335 } 336 337 static uint32_t get_enabled_odivs(uintptr_t pll_addr, uint32_t ndivs) 338 { 339 uint32_t mask = 0; 340 uint32_t pllodiv; 341 uint32_t i; 342 343 for (i = 0; i < ndivs; i++) { 344 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, i)); 345 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 346 mask |= BIT_32(i); 347 } 348 } 349 350 return mask; 351 } 352 353 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 354 { 355 uint32_t i; 356 357 for (i = 0; i < ndivs; i++) { 358 disable_odiv(pll_addr, i); 359 } 360 } 361 362 static void enable_pll_hw(uintptr_t pll_addr) 363 { 364 /* Enable the PLL. */ 365 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 366 367 /* Poll until PLL acquires lock. */ 368 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 369 } 370 } 371 372 static void disable_pll_hw(uintptr_t pll_addr) 373 { 374 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 375 } 376 377 static bool is_pll_enabled(uintptr_t pll_base) 378 { 379 uint32_t pllcr, pllsr; 380 381 pllcr = mmio_read_32(PLLDIG_PLLCR(pll_base)); 382 pllsr = mmio_read_32(PLLDIG_PLLSR(pll_base)); 383 384 /* Enabled and locked PLL */ 385 if ((pllcr & PLLDIG_PLLCR_PLLPD) != 0U) { 386 return false; 387 } 388 389 if ((pllsr & PLLDIG_PLLSR_LOCK) == 0U) { 390 return false; 391 } 392 393 return true; 394 } 395 396 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 397 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 398 unsigned long sclk_freq, unsigned int depth) 399 { 400 uint32_t rdiv = 1, mfi, mfn; 401 unsigned long old_vco = 0UL; 402 unsigned int ldepth = depth; 403 uint32_t odivs_mask; 404 int ret; 405 406 ret = update_stack_depth(&ldepth); 407 if (ret != 0) { 408 return ret; 409 } 410 411 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 412 if (ret != 0) { 413 return -EINVAL; 414 } 415 416 odivs_mask = get_enabled_odivs(pll_addr, pll->ndividers); 417 418 if (is_pll_enabled(pll_addr)) { 419 ret = get_module_rate(&pll->desc, drv, &old_vco, ldepth); 420 if (ret != 0) { 421 return ret; 422 } 423 } 424 425 /* Disable ODIVs*/ 426 disable_odivs(pll_addr, pll->ndividers); 427 428 /* Disable PLL */ 429 disable_pll_hw(pll_addr); 430 431 /* Program PLLCLKMUX */ 432 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 433 434 /* Program VCO */ 435 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 436 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 437 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 438 439 mmio_write_32(PLLDIG_PLLFD(pll_addr), 440 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 441 442 ret = adjust_odiv_settings(pll, pll_addr, odivs_mask, old_vco); 443 if (ret != 0) { 444 return ret; 445 } 446 447 enable_pll_hw(pll_addr); 448 449 /* Enable out dividers */ 450 enable_odivs(pll_addr, pll->ndividers, odivs_mask); 451 452 return ret; 453 } 454 455 static int enable_pll(struct s32cc_clk_obj *module, 456 const struct s32cc_clk_drv *drv, 457 unsigned int depth) 458 { 459 const struct s32cc_pll *pll = s32cc_obj2pll(module); 460 unsigned int clk_src, ldepth = depth; 461 unsigned long sclk_freq, pll_vco; 462 const struct s32cc_clkmux *mux; 463 uintptr_t pll_addr = UL(0x0); 464 bool pll_enabled; 465 uint32_t sclk_id; 466 int ret; 467 468 ret = update_stack_depth(&ldepth); 469 if (ret != 0) { 470 return ret; 471 } 472 473 mux = get_pll_mux(pll); 474 if (mux == NULL) { 475 return -EINVAL; 476 } 477 478 if (pll->instance != mux->module) { 479 ERROR("MUX type is not in sync with PLL ID\n"); 480 return -EINVAL; 481 } 482 483 ret = get_base_addr(pll->instance, drv, &pll_addr); 484 if (ret != 0) { 485 ERROR("Failed to detect PLL instance\n"); 486 return ret; 487 } 488 489 switch (mux->source_id) { 490 case S32CC_CLK_FIRC: 491 sclk_freq = 48U * MHZ; 492 sclk_id = 0; 493 break; 494 case S32CC_CLK_FXOSC: 495 sclk_freq = 40U * MHZ; 496 sclk_id = 1; 497 break; 498 default: 499 ERROR("Invalid source selection for PLL 0x%lx\n", 500 pll_addr); 501 return -EINVAL; 502 }; 503 504 ret = get_module_rate(&pll->desc, drv, &pll_vco, depth); 505 if (ret != 0) { 506 return ret; 507 } 508 509 pll_enabled = is_pll_enabled(pll_addr); 510 clk_src = mmio_read_32(PLLDIG_PLLCLKMUX(pll_addr)); 511 512 if ((clk_src == sclk_id) && pll_enabled && 513 (pll_vco == pll->vco_freq)) { 514 return 0; 515 } 516 517 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq, ldepth); 518 } 519 520 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 521 { 522 const struct s32cc_clk_obj *parent; 523 524 parent = pdiv->parent; 525 if (parent == NULL) { 526 ERROR("Failed to identify PLL divider's parent\n"); 527 return NULL; 528 } 529 530 if (parent->type != s32cc_pll_t) { 531 ERROR("The parent of the divider is not a PLL instance\n"); 532 return NULL; 533 } 534 535 return s32cc_obj2pll(parent); 536 } 537 538 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 539 { 540 uint32_t pllodiv; 541 uint32_t pdiv; 542 543 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 544 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 545 546 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 547 return; 548 } 549 550 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 551 disable_odiv(pll_addr, div_index); 552 } 553 554 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 555 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 556 557 enable_odiv(pll_addr, div_index); 558 } 559 560 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 561 { 562 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 563 564 if (pdiv->parent == NULL) { 565 ERROR("Failed to identify PLL DIV's parent\n"); 566 } 567 568 return pdiv->parent; 569 } 570 571 static int enable_pll_div(struct s32cc_clk_obj *module, 572 const struct s32cc_clk_drv *drv, 573 unsigned int depth) 574 { 575 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 576 uintptr_t pll_addr = 0x0ULL; 577 unsigned int ldepth = depth; 578 const struct s32cc_pll *pll; 579 unsigned long pll_vco; 580 uint32_t dc; 581 int ret; 582 583 ret = update_stack_depth(&ldepth); 584 if (ret != 0) { 585 return ret; 586 } 587 588 pll = get_div_pll(pdiv); 589 if (pll == NULL) { 590 ERROR("The parent of the PLL DIV is invalid\n"); 591 return 0; 592 } 593 594 ret = get_base_addr(pll->instance, drv, &pll_addr); 595 if (ret != 0) { 596 ERROR("Failed to detect PLL instance\n"); 597 return -EINVAL; 598 } 599 600 ret = get_module_rate(&pll->desc, drv, &pll_vco, ldepth); 601 if (ret != 0) { 602 ERROR("Failed to enable the PLL due to unknown rate for 0x%" PRIxPTR "\n", 603 pll_addr); 604 return ret; 605 } 606 607 dc = (uint32_t)(pll_vco / pdiv->freq); 608 609 config_pll_out_div(pll_addr, pdiv->index, dc); 610 611 return 0; 612 } 613 614 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 615 bool safe_clk) 616 { 617 uint32_t css, csc; 618 619 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 620 621 /* Already configured */ 622 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 623 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 624 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 625 return 0; 626 } 627 628 /* Ongoing clock switch? */ 629 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 630 MC_CGM_MUXn_CSS_SWIP) != 0U) { 631 } 632 633 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 634 635 /* Clear previous source. */ 636 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 637 638 if (!safe_clk) { 639 /* Select the clock source and trigger the clock switch. */ 640 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 641 } else { 642 /* Switch to safe clock */ 643 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 644 } 645 646 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 647 648 /* Wait for configuration bit to auto-clear. */ 649 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 650 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 651 } 652 653 /* Is the clock switch completed? */ 654 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 655 MC_CGM_MUXn_CSS_SWIP) != 0U) { 656 } 657 658 /* 659 * Check if the switch succeeded. 660 * Check switch trigger cause and the source. 661 */ 662 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 663 if (!safe_clk) { 664 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 665 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 666 return 0; 667 } 668 669 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 670 mux, source, cgm_addr); 671 } else { 672 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 673 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 674 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 675 return 0; 676 } 677 678 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 679 mux, cgm_addr); 680 } 681 682 return -EINVAL; 683 } 684 685 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 686 const struct s32cc_clk_drv *drv) 687 { 688 uintptr_t cgm_addr = UL(0x0); 689 uint32_t mux_hw_clk; 690 int ret; 691 692 ret = get_base_addr(mux->module, drv, &cgm_addr); 693 if (ret != 0) { 694 return ret; 695 } 696 697 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 698 699 return cgm_mux_clk_config(cgm_addr, mux->index, 700 mux_hw_clk, false); 701 } 702 703 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 704 { 705 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 706 struct s32cc_clk *clk; 707 708 if (mux == NULL) { 709 return NULL; 710 } 711 712 clk = s32cc_get_arch_clk(mux->source_id); 713 if (clk == NULL) { 714 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 715 mux->source_id, mux->index); 716 return NULL; 717 } 718 719 return &clk->desc; 720 } 721 722 static int enable_mux(struct s32cc_clk_obj *module, 723 const struct s32cc_clk_drv *drv, 724 unsigned int depth) 725 { 726 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 727 unsigned int ldepth = depth; 728 const struct s32cc_clk *clk; 729 int ret = 0; 730 731 ret = update_stack_depth(&ldepth); 732 if (ret != 0) { 733 return ret; 734 } 735 736 if (mux == NULL) { 737 return -EINVAL; 738 } 739 740 clk = s32cc_get_arch_clk(mux->source_id); 741 if (clk == NULL) { 742 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 743 mux->source_id, mux->index); 744 return -EINVAL; 745 } 746 747 switch (mux->module) { 748 /* PLL mux will be enabled by PLL setup */ 749 case S32CC_ARM_PLL: 750 case S32CC_PERIPH_PLL: 751 case S32CC_DDR_PLL: 752 break; 753 case S32CC_CGM1: 754 ret = enable_cgm_mux(mux, drv); 755 break; 756 case S32CC_CGM0: 757 ret = enable_cgm_mux(mux, drv); 758 break; 759 case S32CC_CGM5: 760 ret = enable_cgm_mux(mux, drv); 761 break; 762 default: 763 ERROR("Unknown mux parent type: %d\n", mux->module); 764 ret = -EINVAL; 765 break; 766 }; 767 768 return ret; 769 } 770 771 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 772 { 773 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 774 775 if (dfs->parent == NULL) { 776 ERROR("Failed to identify DFS's parent\n"); 777 } 778 779 return dfs->parent; 780 } 781 782 static int enable_dfs(struct s32cc_clk_obj *module, 783 const struct s32cc_clk_drv *drv, 784 unsigned int depth) 785 { 786 unsigned int ldepth = depth; 787 int ret = 0; 788 789 ret = update_stack_depth(&ldepth); 790 if (ret != 0) { 791 return ret; 792 } 793 794 return 0; 795 } 796 797 static int get_dfs_freq(const struct s32cc_clk_obj *module, 798 const struct s32cc_clk_drv *drv, 799 unsigned long *rate, unsigned int depth) 800 { 801 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 802 unsigned int ldepth = depth; 803 uintptr_t dfs_addr; 804 int ret; 805 806 ret = update_stack_depth(&ldepth); 807 if (ret != 0) { 808 return ret; 809 } 810 811 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 812 if (ret != 0) { 813 ERROR("Failed to detect the DFS instance\n"); 814 return ret; 815 } 816 817 return get_module_rate(dfs->parent, drv, rate, ldepth); 818 } 819 820 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 821 { 822 const struct s32cc_clk_obj *parent = dfs_div->parent; 823 824 if (parent->type != s32cc_dfs_t) { 825 ERROR("DFS DIV doesn't have a DFS as parent\n"); 826 return NULL; 827 } 828 829 return s32cc_obj2dfs(parent); 830 } 831 832 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 833 uint32_t *mfi, uint32_t *mfn) 834 { 835 uint64_t factor64, tmp64, ofreq; 836 uint32_t factor32; 837 838 unsigned long in = dfs_freq; 839 unsigned long out = dfs_div->freq; 840 841 /** 842 * factor = (IN / OUT) / 2 843 * MFI = integer(factor) 844 * MFN = (factor - MFI) * 36 845 */ 846 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 847 tmp64 = factor64 / FP_PRECISION; 848 if (tmp64 > UINT32_MAX) { 849 return -EINVAL; 850 } 851 852 factor32 = (uint32_t)tmp64; 853 *mfi = factor32; 854 855 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 856 if (tmp64 > UINT32_MAX) { 857 return -EINVAL; 858 } 859 860 *mfn = (uint32_t)tmp64; 861 862 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 863 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 864 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 865 factor64 *= 2ULL; 866 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 867 868 if (ofreq != dfs_div->freq) { 869 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 870 dfs_div->freq); 871 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 872 return -EINVAL; 873 } 874 875 return 0; 876 } 877 878 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 879 uint32_t mfi, uint32_t mfn) 880 { 881 uint32_t portsr, portolsr; 882 uint32_t mask, old_mfi, old_mfn; 883 uint32_t dvport; 884 bool init_dfs; 885 886 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 887 888 old_mfi = DFS_DVPORTn_MFI(dvport); 889 old_mfn = DFS_DVPORTn_MFN(dvport); 890 891 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 892 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 893 894 /* Skip configuration if it's not needed */ 895 if (((portsr & BIT_32(port)) != 0U) && 896 ((portolsr & BIT_32(port)) == 0U) && 897 (mfi == old_mfi) && (mfn == old_mfn)) { 898 return 0; 899 } 900 901 init_dfs = (portsr == 0U); 902 903 if (init_dfs) { 904 mask = DFS_PORTRESET_MASK; 905 } else { 906 mask = DFS_PORTRESET_SET(BIT_32(port)); 907 } 908 909 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 910 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 911 912 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 913 } 914 915 if (init_dfs) { 916 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 917 } 918 919 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 920 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 921 922 if (init_dfs) { 923 /* DFS clk enable programming */ 924 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 925 } 926 927 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 928 929 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 930 } 931 932 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 933 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 934 ERROR("Failed to lock DFS divider\n"); 935 return -EINVAL; 936 } 937 938 return 0; 939 } 940 941 static struct s32cc_clk_obj * 942 get_dfs_div_parent(const struct s32cc_clk_obj *module) 943 { 944 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 945 946 if (dfs_div->parent == NULL) { 947 ERROR("Failed to identify DFS divider's parent\n"); 948 } 949 950 return dfs_div->parent; 951 } 952 953 static int enable_dfs_div(struct s32cc_clk_obj *module, 954 const struct s32cc_clk_drv *drv, 955 unsigned int depth) 956 { 957 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 958 unsigned int ldepth = depth; 959 const struct s32cc_dfs *dfs; 960 uintptr_t dfs_addr = 0UL; 961 unsigned long dfs_freq; 962 uint32_t mfi, mfn; 963 int ret = 0; 964 965 ret = update_stack_depth(&ldepth); 966 if (ret != 0) { 967 return ret; 968 } 969 970 dfs = get_div_dfs(dfs_div); 971 if (dfs == NULL) { 972 return -EINVAL; 973 } 974 975 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 976 if ((ret != 0) || (dfs_addr == 0UL)) { 977 return -EINVAL; 978 } 979 980 ret = get_module_rate(&dfs->desc, drv, &dfs_freq, depth); 981 if (ret != 0) { 982 return ret; 983 } 984 985 ret = get_dfs_mfi_mfn(dfs_freq, dfs_div, &mfi, &mfn); 986 if (ret != 0) { 987 return -EINVAL; 988 } 989 990 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 991 } 992 993 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 994 const struct s32cc_clk_drv *drv, 995 unsigned int depth); 996 997 static int enable_part(struct s32cc_clk_obj *module, 998 const struct s32cc_clk_drv *drv, 999 unsigned int depth) 1000 { 1001 const struct s32cc_part *part = s32cc_obj2part(module); 1002 uint32_t part_no = part->partition_id; 1003 1004 if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) { 1005 return -EINVAL; 1006 } 1007 1008 return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no); 1009 } 1010 1011 static int enable_part_block(struct s32cc_clk_obj *module, 1012 const struct s32cc_clk_drv *drv, 1013 unsigned int depth) 1014 { 1015 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 1016 const struct s32cc_part *part = block->part; 1017 uint32_t part_no = part->partition_id; 1018 unsigned int ldepth = depth; 1019 uint32_t cofb; 1020 int ret; 1021 1022 ret = update_stack_depth(&ldepth); 1023 if (ret != 0) { 1024 return ret; 1025 } 1026 1027 if ((block->block >= s32cc_part_block0) && 1028 (block->block <= s32cc_part_block15)) { 1029 cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0; 1030 mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status); 1031 } else { 1032 ERROR("Unknown partition block type: %d\n", block->block); 1033 return -EINVAL; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static struct s32cc_clk_obj * 1040 get_part_block_parent(const struct s32cc_clk_obj *module) 1041 { 1042 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 1043 1044 return &block->part->desc; 1045 } 1046 1047 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1048 const struct s32cc_clk_drv *drv, 1049 unsigned int depth); 1050 1051 static int enable_part_block_link(struct s32cc_clk_obj *module, 1052 const struct s32cc_clk_drv *drv, 1053 unsigned int depth) 1054 { 1055 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 1056 struct s32cc_part_block *block = link->block; 1057 unsigned int ldepth = depth; 1058 int ret; 1059 1060 ret = update_stack_depth(&ldepth); 1061 if (ret != 0) { 1062 return ret; 1063 } 1064 1065 /* Move the enablement algorithm to partition tree */ 1066 return enable_module_with_refcount(&block->desc, drv, ldepth); 1067 } 1068 1069 static struct s32cc_clk_obj * 1070 get_part_block_link_parent(const struct s32cc_clk_obj *module) 1071 { 1072 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 1073 1074 return link->parent; 1075 } 1076 1077 static int get_part_block_link_freq(const struct s32cc_clk_obj *module, 1078 const struct s32cc_clk_drv *drv, 1079 unsigned long *rate, unsigned int depth) 1080 { 1081 const struct s32cc_part_block_link *block = s32cc_obj2partblocklink(module); 1082 unsigned int ldepth = depth; 1083 int ret; 1084 1085 ret = update_stack_depth(&ldepth); 1086 if (ret != 0) { 1087 return ret; 1088 } 1089 1090 return get_module_rate(block->parent, drv, rate, ldepth); 1091 } 1092 1093 static void cgm_mux_div_config(uintptr_t cgm_addr, uint32_t mux, 1094 uint32_t dc, uint32_t div_index) 1095 { 1096 uint32_t updstat; 1097 uint32_t dc_val = mmio_read_32(MC_CGM_MUXn_DCm(cgm_addr, mux, div_index)); 1098 1099 dc_val &= (MC_CGM_MUXn_DCm_DIV_MASK | MC_CGM_MUXn_DCm_DE); 1100 1101 if (dc_val == (MC_CGM_MUXn_DCm_DE | MC_CGM_MUXn_DCm_DIV_SET(dc))) { 1102 return; 1103 } 1104 1105 /* Set the divider */ 1106 mmio_write_32(MC_CGM_MUXn_DCm(cgm_addr, mux, div_index), 1107 MC_CGM_MUXn_DCm_DE | MC_CGM_MUXn_DCm_DIV_SET(dc)); 1108 1109 /* Wait for divider to get updated */ 1110 do { 1111 updstat = mmio_read_32(MC_CGM_MUXn_DIV_UPD_STAT(cgm_addr, mux)); 1112 } while (MC_CGM_MUXn_DIV_UPD_STAT_DIVSTAT(updstat) != 0U); 1113 } 1114 1115 static inline struct s32cc_clkmux *get_cgm_div_mux(const struct s32cc_cgm_div *cgm_div) 1116 { 1117 const struct s32cc_clk_obj *parent = cgm_div->parent; 1118 const struct s32cc_clk_obj *mux_obj; 1119 const struct s32cc_clk *clk; 1120 1121 if (parent == NULL) { 1122 ERROR("Failed to identify CGM DIV's parent\n"); 1123 return NULL; 1124 } 1125 1126 if (parent->type != s32cc_clk_t) { 1127 ERROR("The parent of the CGM DIV isn't a clock\n"); 1128 return NULL; 1129 } 1130 1131 clk = s32cc_obj2clk(parent); 1132 1133 if (clk->module == NULL) { 1134 ERROR("The clock isn't connected to a module\n"); 1135 return NULL; 1136 } 1137 1138 mux_obj = clk->module; 1139 1140 if ((mux_obj->type != s32cc_clkmux_t) && 1141 (mux_obj->type != s32cc_shared_clkmux_t)) { 1142 ERROR("The parent of the CGM DIV isn't a MUX\n"); 1143 return NULL; 1144 } 1145 1146 return s32cc_obj2clkmux(mux_obj); 1147 } 1148 1149 static int enable_cgm_div(struct s32cc_clk_obj *module, 1150 const struct s32cc_clk_drv *drv, unsigned int depth) 1151 { 1152 const struct s32cc_cgm_div *cgm_div = s32cc_obj2cgmdiv(module); 1153 const struct s32cc_clkmux *mux; 1154 unsigned int ldepth = depth; 1155 uintptr_t cgm_addr = 0ULL; 1156 uint64_t pfreq, dc64; 1157 uint32_t dc; 1158 int ret; 1159 1160 ret = update_stack_depth(&ldepth); 1161 if (ret != 0) { 1162 return ret; 1163 } 1164 1165 if (cgm_div->parent == NULL) { 1166 ERROR("Failed to identify CGM divider's parent\n"); 1167 return -EINVAL; 1168 } 1169 1170 if (cgm_div->freq == 0U) { 1171 ERROR("The frequency of the divider %" PRIu32 " is not set\n", 1172 cgm_div->index); 1173 return -EINVAL; 1174 } 1175 1176 mux = get_cgm_div_mux(cgm_div); 1177 if (mux == NULL) { 1178 return -EINVAL; 1179 } 1180 1181 ret = get_base_addr(mux->module, drv, &cgm_addr); 1182 if (ret != 0) { 1183 ERROR("Failed to get CGM base address of the MUX module %d\n", 1184 mux->module); 1185 return ret; 1186 } 1187 1188 ret = get_module_rate(cgm_div->parent, drv, &pfreq, ldepth); 1189 if (ret != 0) { 1190 ERROR("Failed to enable the div due to unknown frequency of " 1191 "the CGM MUX %" PRIu8 "(CGM=%" PRIxPTR ")\n", 1192 mux->index, cgm_addr); 1193 return -EINVAL; 1194 } 1195 1196 dc64 = ((pfreq * FP_PRECISION) / cgm_div->freq) / FP_PRECISION; 1197 dc = (uint32_t)dc64; 1198 1199 if ((pfreq / dc64) != cgm_div->freq) { 1200 ERROR("Cannot set CGM divider (mux:%" PRIu8 ", div:%" PRIu32 1201 ") for input = %lu & output = %lu, Nearest freq = %lu\n", 1202 mux->index, cgm_div->index, (unsigned long)pfreq, 1203 cgm_div->freq, (unsigned long)(pfreq / dc)); 1204 return -EINVAL; 1205 } 1206 1207 cgm_mux_div_config(cgm_addr, mux->index, dc - 1U, cgm_div->index); 1208 return 0; 1209 } 1210 1211 static int set_cgm_div_freq(const struct s32cc_clk_obj *module, 1212 unsigned long rate, unsigned long *orate, 1213 unsigned int *depth) 1214 { 1215 struct s32cc_cgm_div *cgm_div = s32cc_obj2cgmdiv(module); 1216 int ret; 1217 1218 ret = update_stack_depth(depth); 1219 if (ret != 0) { 1220 return ret; 1221 } 1222 1223 if (cgm_div->parent == NULL) { 1224 ERROR("Failed to identify the CGM divider's parent\n"); 1225 return -EINVAL; 1226 } 1227 1228 cgm_div->freq = rate; 1229 *orate = rate; 1230 1231 return 0; 1232 } 1233 1234 static inline bool is_cgm_div_enabled(uintptr_t cgm_addr, uint32_t mux, 1235 uint32_t div_index) 1236 { 1237 uint32_t dc_val; 1238 1239 dc_val = mmio_read_32(MC_CGM_MUXn_DCm(cgm_addr, mux, div_index)); 1240 1241 return ((dc_val & MC_CGM_MUXn_DCm_DE) != 0U); 1242 } 1243 1244 static unsigned long calc_cgm_div_freq(uintptr_t cgm_addr, uint32_t mux, 1245 uint32_t div_index, unsigned long pfreq) 1246 { 1247 uint32_t dc_val; 1248 uint32_t dc_div; 1249 1250 dc_val = mmio_read_32(MC_CGM_MUXn_DCm(cgm_addr, mux, div_index)); 1251 dc_div = MC_CGM_MUXn_DCm_DIV(dc_val) + 1U; 1252 1253 return pfreq * FP_PRECISION / dc_div / FP_PRECISION; 1254 } 1255 1256 static int get_cgm_div_freq(const struct s32cc_clk_obj *module, 1257 const struct s32cc_clk_drv *drv, 1258 unsigned long *rate, unsigned int depth) 1259 { 1260 const struct s32cc_cgm_div *cgm_div = s32cc_obj2cgmdiv(module); 1261 const struct s32cc_clkmux *mux; 1262 unsigned int ldepth = depth; 1263 uintptr_t cgm_addr = 0ULL; 1264 unsigned long pfreq; 1265 int ret; 1266 1267 ret = update_stack_depth(&ldepth); 1268 if (ret != 0) { 1269 return ret; 1270 } 1271 1272 if (cgm_div->parent == NULL) { 1273 ERROR("Failed to identify CGM divider's parent\n"); 1274 return -EINVAL; 1275 } 1276 1277 mux = get_cgm_div_mux(cgm_div); 1278 if (mux == NULL) { 1279 return -EINVAL; 1280 } 1281 1282 ret = get_base_addr(mux->module, drv, &cgm_addr); 1283 if (ret != 0) { 1284 ERROR("Failed to get CGM base address of the MUX module %d\n", 1285 mux->module); 1286 return ret; 1287 } 1288 1289 if (!is_cgm_div_enabled(cgm_addr, mux->index, cgm_div->index)) { 1290 *rate = cgm_div->freq; 1291 return 0; 1292 } 1293 1294 ret = get_module_rate(cgm_div->parent, drv, &pfreq, ldepth); 1295 if (ret != 0) { 1296 ERROR("Failed to get the frequency of CGM MUX %" PRIu8 "(CGM=0x%" PRIxPTR ")\n", 1297 mux->index, cgm_addr); 1298 return ret; 1299 } 1300 1301 *rate = calc_cgm_div_freq(cgm_addr, mux->index, cgm_div->index, pfreq); 1302 1303 return 0; 1304 } 1305 1306 static int no_enable(struct s32cc_clk_obj *module, 1307 const struct s32cc_clk_drv *drv, 1308 unsigned int depth) 1309 { 1310 return 0; 1311 } 1312 1313 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 1314 const struct s32cc_clk_drv *drv, bool leaf_node, 1315 unsigned int depth) 1316 { 1317 unsigned int ldepth = depth; 1318 int ret = 0; 1319 1320 if (mod == NULL) { 1321 return 0; 1322 } 1323 1324 ret = update_stack_depth(&ldepth); 1325 if (ret != 0) { 1326 return ret; 1327 } 1328 1329 /* Refcount will be updated as part of the recursivity */ 1330 if (leaf_node) { 1331 return en_cb(mod, drv, ldepth); 1332 } 1333 1334 if (mod->refcount == 0U) { 1335 ret = en_cb(mod, drv, ldepth); 1336 } 1337 1338 if (ret == 0) { 1339 mod->refcount++; 1340 } 1341 1342 return ret; 1343 } 1344 1345 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 1346 1347 static int enable_module(struct s32cc_clk_obj *module, 1348 const struct s32cc_clk_drv *drv, 1349 unsigned int depth) 1350 { 1351 struct s32cc_clk_obj *parent = get_module_parent(module); 1352 static const enable_clk_t enable_clbs[13] = { 1353 [s32cc_clk_t] = no_enable, 1354 [s32cc_osc_t] = enable_osc, 1355 [s32cc_pll_t] = enable_pll, 1356 [s32cc_pll_out_div_t] = enable_pll_div, 1357 [s32cc_clkmux_t] = enable_mux, 1358 [s32cc_shared_clkmux_t] = enable_mux, 1359 [s32cc_dfs_t] = enable_dfs, 1360 [s32cc_dfs_div_t] = enable_dfs_div, 1361 [s32cc_part_t] = enable_part, 1362 [s32cc_part_block_t] = enable_part_block, 1363 [s32cc_part_block_link_t] = enable_part_block_link, 1364 [s32cc_cgm_div_t] = enable_cgm_div, 1365 }; 1366 unsigned int ldepth = depth; 1367 uint32_t index; 1368 int ret = 0; 1369 1370 ret = update_stack_depth(&ldepth); 1371 if (ret != 0) { 1372 return ret; 1373 } 1374 1375 if (drv == NULL) { 1376 return -EINVAL; 1377 } 1378 1379 index = (uint32_t)module->type; 1380 1381 if (index >= ARRAY_SIZE(enable_clbs)) { 1382 ERROR("Undefined module type: %d\n", module->type); 1383 return -EINVAL; 1384 } 1385 1386 if (enable_clbs[index] == NULL) { 1387 ERROR("Undefined callback for the clock type: %d\n", 1388 module->type); 1389 return -EINVAL; 1390 } 1391 1392 parent = get_module_parent(module); 1393 1394 ret = exec_cb_with_refcount(enable_module, parent, drv, 1395 false, ldepth); 1396 if (ret != 0) { 1397 return ret; 1398 } 1399 1400 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 1401 true, ldepth); 1402 if (ret != 0) { 1403 return ret; 1404 } 1405 1406 return ret; 1407 } 1408 1409 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1410 const struct s32cc_clk_drv *drv, 1411 unsigned int depth) 1412 { 1413 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 1414 } 1415 1416 static int s32cc_clk_enable(unsigned long id) 1417 { 1418 const struct s32cc_clk_drv *drv = get_drv(); 1419 unsigned int depth = MAX_STACK_DEPTH; 1420 struct s32cc_clk *clk; 1421 1422 clk = s32cc_get_arch_clk(id); 1423 if (clk == NULL) { 1424 return -EINVAL; 1425 } 1426 1427 return enable_module_with_refcount(&clk->desc, drv, depth); 1428 } 1429 1430 static void s32cc_clk_disable(unsigned long id) 1431 { 1432 } 1433 1434 static bool s32cc_clk_is_enabled(unsigned long id) 1435 { 1436 return false; 1437 } 1438 1439 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1440 unsigned long *orate, unsigned int *depth) 1441 { 1442 struct s32cc_osc *osc = s32cc_obj2osc(module); 1443 int ret; 1444 1445 ret = update_stack_depth(depth); 1446 if (ret != 0) { 1447 return ret; 1448 } 1449 1450 if ((osc->freq != 0UL) && (rate != osc->freq)) { 1451 ERROR("Already initialized oscillator. freq = %lu\n", 1452 osc->freq); 1453 return -EINVAL; 1454 } 1455 1456 osc->freq = rate; 1457 *orate = osc->freq; 1458 1459 return 0; 1460 } 1461 1462 static int get_osc_freq(const struct s32cc_clk_obj *module, 1463 const struct s32cc_clk_drv *drv, 1464 unsigned long *rate, unsigned int depth) 1465 { 1466 const struct s32cc_osc *osc = s32cc_obj2osc(module); 1467 unsigned int ldepth = depth; 1468 int ret; 1469 1470 ret = update_stack_depth(&ldepth); 1471 if (ret != 0) { 1472 return ret; 1473 } 1474 1475 if (osc->freq == 0UL) { 1476 ERROR("Uninitialized oscillator\n"); 1477 return -EINVAL; 1478 } 1479 1480 *rate = osc->freq; 1481 1482 return 0; 1483 } 1484 1485 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1486 unsigned long *orate, unsigned int *depth) 1487 { 1488 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1489 int ret; 1490 1491 ret = update_stack_depth(depth); 1492 if (ret != 0) { 1493 return ret; 1494 } 1495 1496 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1497 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1498 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1499 rate, clk->min_freq, clk->max_freq); 1500 return -EINVAL; 1501 } 1502 1503 if (clk->module != NULL) { 1504 return set_module_rate(clk->module, rate, orate, depth); 1505 } 1506 1507 if (clk->pclock != NULL) { 1508 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1509 } 1510 1511 return -EINVAL; 1512 } 1513 1514 static int get_clk_freq(const struct s32cc_clk_obj *module, 1515 const struct s32cc_clk_drv *drv, unsigned long *rate, 1516 unsigned int depth) 1517 { 1518 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1519 unsigned int ldepth = depth; 1520 int ret; 1521 1522 ret = update_stack_depth(&ldepth); 1523 if (ret != 0) { 1524 return ret; 1525 } 1526 1527 if (clk == NULL) { 1528 ERROR("Invalid clock\n"); 1529 return -EINVAL; 1530 } 1531 1532 if (clk->module != NULL) { 1533 return get_module_rate(clk->module, drv, rate, ldepth); 1534 } 1535 1536 if (clk->pclock == NULL) { 1537 ERROR("Invalid clock parent\n"); 1538 return -EINVAL; 1539 } 1540 1541 return get_clk_freq(&clk->pclock->desc, drv, rate, ldepth); 1542 } 1543 1544 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1545 unsigned long *orate, unsigned int *depth) 1546 { 1547 struct s32cc_pll *pll = s32cc_obj2pll(module); 1548 int ret; 1549 1550 ret = update_stack_depth(depth); 1551 if (ret != 0) { 1552 return ret; 1553 } 1554 1555 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1556 ERROR("PLL frequency was already set\n"); 1557 return -EINVAL; 1558 } 1559 1560 pll->vco_freq = rate; 1561 *orate = pll->vco_freq; 1562 1563 return 0; 1564 } 1565 1566 static int get_pll_freq(const struct s32cc_clk_obj *module, 1567 const struct s32cc_clk_drv *drv, 1568 unsigned long *rate, unsigned int depth) 1569 { 1570 const struct s32cc_pll *pll = s32cc_obj2pll(module); 1571 const struct s32cc_clk *source; 1572 uint32_t mfi, mfn, rdiv, plldv; 1573 unsigned long prate, clk_src; 1574 unsigned int ldepth = depth; 1575 uintptr_t pll_addr = 0UL; 1576 uint64_t t1, t2; 1577 int ret; 1578 1579 ret = update_stack_depth(&ldepth); 1580 if (ret != 0) { 1581 return ret; 1582 } 1583 1584 ret = get_base_addr(pll->instance, drv, &pll_addr); 1585 if (ret != 0) { 1586 ERROR("Failed to detect PLL instance\n"); 1587 return ret; 1588 } 1589 1590 /* Disabled PLL */ 1591 if (!is_pll_enabled(pll_addr)) { 1592 *rate = pll->vco_freq; 1593 return 0; 1594 } 1595 1596 clk_src = mmio_read_32(PLLDIG_PLLCLKMUX(pll_addr)); 1597 switch (clk_src) { 1598 case 0: 1599 clk_src = S32CC_CLK_FIRC; 1600 break; 1601 case 1: 1602 clk_src = S32CC_CLK_FXOSC; 1603 break; 1604 default: 1605 ERROR("Failed to identify PLL source id %" PRIu64 "\n", clk_src); 1606 return -EINVAL; 1607 }; 1608 1609 source = s32cc_get_arch_clk(clk_src); 1610 if (source == NULL) { 1611 ERROR("Failed to get PLL source clock\n"); 1612 return -EINVAL; 1613 } 1614 1615 ret = get_module_rate(&source->desc, drv, &prate, ldepth); 1616 if (ret != 0) { 1617 ERROR("Failed to get PLL's parent frequency\n"); 1618 return ret; 1619 } 1620 1621 plldv = mmio_read_32(PLLDIG_PLLDV(pll_addr)); 1622 mfi = PLLDIG_PLLDV_MFI(plldv); 1623 rdiv = PLLDIG_PLLDV_RDIV(plldv); 1624 if (rdiv == 0U) { 1625 rdiv = 1; 1626 } 1627 1628 /* Frac-N mode */ 1629 mfn = PLLDIG_PLLFD_MFN_SET(mmio_read_32(PLLDIG_PLLFD(pll_addr))); 1630 1631 /* PLL VCO frequency in Fractional mode when PLLDV[RDIV] is not 0 */ 1632 t1 = prate / rdiv; 1633 t2 = (mfi * FP_PRECISION) + (mfn * FP_PRECISION / 18432U); 1634 1635 *rate = t1 * t2 / FP_PRECISION; 1636 1637 return 0; 1638 } 1639 1640 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1641 unsigned long *orate, unsigned int *depth) 1642 { 1643 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1644 const struct s32cc_pll *pll; 1645 unsigned long prate, dc; 1646 int ret; 1647 1648 ret = update_stack_depth(depth); 1649 if (ret != 0) { 1650 return ret; 1651 } 1652 1653 if (pdiv->parent == NULL) { 1654 ERROR("Failed to identify PLL divider's parent\n"); 1655 return -EINVAL; 1656 } 1657 1658 pll = s32cc_obj2pll(pdiv->parent); 1659 if (pll == NULL) { 1660 ERROR("The parent of the PLL DIV is invalid\n"); 1661 return -EINVAL; 1662 } 1663 1664 prate = pll->vco_freq; 1665 1666 /** 1667 * The PLL is not initialized yet, so let's take a risk 1668 * and accept the proposed rate. 1669 */ 1670 if (prate == 0UL) { 1671 pdiv->freq = rate; 1672 *orate = rate; 1673 return 0; 1674 } 1675 1676 /* Decline in case the rate cannot fit PLL's requirements. */ 1677 dc = prate / rate; 1678 if ((prate / dc) != rate) { 1679 return -EINVAL; 1680 } 1681 1682 pdiv->freq = rate; 1683 *orate = pdiv->freq; 1684 1685 return 0; 1686 } 1687 1688 static int get_pll_div_freq(const struct s32cc_clk_obj *module, 1689 const struct s32cc_clk_drv *drv, 1690 unsigned long *rate, unsigned int depth) 1691 { 1692 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1693 const struct s32cc_pll *pll; 1694 unsigned int ldepth = depth; 1695 uintptr_t pll_addr = 0UL; 1696 unsigned long pfreq; 1697 uint32_t pllodiv; 1698 uint32_t dc; 1699 int ret; 1700 1701 ret = update_stack_depth(&ldepth); 1702 if (ret != 0) { 1703 return ret; 1704 } 1705 1706 pll = get_div_pll(pdiv); 1707 if (pll == NULL) { 1708 ERROR("The parent of the PLL DIV is invalid\n"); 1709 return -EINVAL; 1710 } 1711 1712 ret = get_base_addr(pll->instance, drv, &pll_addr); 1713 if (ret != 0) { 1714 ERROR("Failed to detect PLL instance\n"); 1715 return -EINVAL; 1716 } 1717 1718 ret = get_module_rate(pdiv->parent, drv, &pfreq, ldepth); 1719 if (ret != 0) { 1720 ERROR("Failed to get the frequency of PLL %" PRIxPTR "\n", 1721 pll_addr); 1722 return ret; 1723 } 1724 1725 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, pdiv->index)); 1726 1727 /* Disabled module */ 1728 if ((pllodiv & PLLDIG_PLLODIV_DE) == 0U) { 1729 *rate = pdiv->freq; 1730 return 0; 1731 } 1732 1733 dc = PLLDIG_PLLODIV_DIV(pllodiv); 1734 *rate = (pfreq * FP_PRECISION) / (dc + 1U) / FP_PRECISION; 1735 1736 return 0; 1737 } 1738 1739 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1740 unsigned long *orate, unsigned int *depth) 1741 { 1742 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1743 int ret; 1744 1745 ret = update_stack_depth(depth); 1746 if (ret != 0) { 1747 return ret; 1748 } 1749 1750 if (fdiv->parent == NULL) { 1751 ERROR("The divider doesn't have a valid parent\b"); 1752 return -EINVAL; 1753 } 1754 1755 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1756 1757 /* Update the output rate based on the parent's rate */ 1758 *orate /= fdiv->rate_div; 1759 1760 return ret; 1761 } 1762 1763 static int get_fixed_div_freq(const struct s32cc_clk_obj *module, 1764 const struct s32cc_clk_drv *drv, 1765 unsigned long *rate, unsigned int depth) 1766 { 1767 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1768 unsigned long pfreq; 1769 int ret; 1770 1771 ret = get_module_rate(fdiv->parent, drv, &pfreq, depth); 1772 if (ret != 0) { 1773 return ret; 1774 } 1775 1776 *rate = (pfreq * FP_PRECISION / fdiv->rate_div) / FP_PRECISION; 1777 return 0; 1778 } 1779 1780 static inline struct s32cc_clk_obj *get_fixed_div_parent(const struct s32cc_clk_obj *module) 1781 { 1782 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1783 1784 return fdiv->parent; 1785 } 1786 1787 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1788 unsigned long *orate, unsigned int *depth) 1789 { 1790 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1791 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1792 int ret; 1793 1794 ret = update_stack_depth(depth); 1795 if (ret != 0) { 1796 return ret; 1797 } 1798 1799 if (clk == NULL) { 1800 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1801 mux->index, mux->source_id); 1802 return -EINVAL; 1803 } 1804 1805 return set_module_rate(&clk->desc, rate, orate, depth); 1806 } 1807 1808 static int get_mux_freq(const struct s32cc_clk_obj *module, 1809 const struct s32cc_clk_drv *drv, 1810 unsigned long *rate, unsigned int depth) 1811 { 1812 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1813 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1814 unsigned int ldepth = depth; 1815 int ret; 1816 1817 ret = update_stack_depth(&ldepth); 1818 if (ret != 0) { 1819 return ret; 1820 } 1821 1822 if (clk == NULL) { 1823 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1824 mux->index, mux->source_id); 1825 return -EINVAL; 1826 } 1827 1828 return get_clk_freq(&clk->desc, drv, rate, ldepth); 1829 } 1830 1831 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1832 unsigned long *orate, unsigned int *depth) 1833 { 1834 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1835 const struct s32cc_dfs *dfs; 1836 int ret; 1837 1838 ret = update_stack_depth(depth); 1839 if (ret != 0) { 1840 return ret; 1841 } 1842 1843 if (dfs_div->parent == NULL) { 1844 ERROR("Failed to identify DFS divider's parent\n"); 1845 return -EINVAL; 1846 } 1847 1848 /* Sanity check */ 1849 dfs = s32cc_obj2dfs(dfs_div->parent); 1850 if (dfs->parent == NULL) { 1851 ERROR("Failed to identify DFS's parent\n"); 1852 return -EINVAL; 1853 } 1854 1855 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1856 ERROR("DFS DIV frequency was already set to %lu\n", 1857 dfs_div->freq); 1858 return -EINVAL; 1859 } 1860 1861 dfs_div->freq = rate; 1862 *orate = rate; 1863 1864 return ret; 1865 } 1866 1867 static unsigned long compute_dfs_div_freq(unsigned long pfreq, uint32_t mfi, uint32_t mfn) 1868 { 1869 unsigned long freq; 1870 1871 /** 1872 * Formula for input and output clocks of each port divider. 1873 * See 'Digital Frequency Synthesizer' chapter from Reference Manual. 1874 * 1875 * freq = pfreq / (2 * (mfi + mfn / 36.0)); 1876 */ 1877 freq = (mfi * FP_PRECISION) + (mfn * FP_PRECISION / 36UL); 1878 freq *= 2UL; 1879 freq = pfreq * FP_PRECISION / freq; 1880 1881 return freq; 1882 } 1883 1884 static int get_dfs_div_freq(const struct s32cc_clk_obj *module, 1885 const struct s32cc_clk_drv *drv, 1886 unsigned long *rate, unsigned int depth) 1887 { 1888 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1889 unsigned int ldepth = depth; 1890 const struct s32cc_dfs *dfs; 1891 uint32_t dvport, mfi, mfn; 1892 uintptr_t dfs_addr = 0UL; 1893 unsigned long pfreq; 1894 int ret; 1895 1896 ret = update_stack_depth(&ldepth); 1897 if (ret != 0) { 1898 return ret; 1899 } 1900 1901 dfs = get_div_dfs(dfs_div); 1902 if (dfs == NULL) { 1903 return -EINVAL; 1904 } 1905 1906 ret = get_module_rate(dfs_div->parent, drv, &pfreq, ldepth); 1907 if (ret != 0) { 1908 return ret; 1909 } 1910 1911 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 1912 if (ret != 0) { 1913 ERROR("Failed to detect the DFS instance\n"); 1914 return ret; 1915 } 1916 1917 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, dfs_div->index)); 1918 1919 mfi = DFS_DVPORTn_MFI(dvport); 1920 mfn = DFS_DVPORTn_MFN(dvport); 1921 1922 /* Disabled port */ 1923 if ((mfi == 0U) && (mfn == 0U)) { 1924 *rate = dfs_div->freq; 1925 return 0; 1926 } 1927 1928 *rate = compute_dfs_div_freq(pfreq, mfi, mfn); 1929 return 0; 1930 } 1931 1932 static int set_part_block_link_freq(const struct s32cc_clk_obj *module, 1933 unsigned long rate, unsigned long *orate, 1934 const unsigned int *depth) 1935 { 1936 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 1937 const struct s32cc_clk_obj *parent = link->parent; 1938 unsigned int ldepth = *depth; 1939 int ret; 1940 1941 ret = update_stack_depth(&ldepth); 1942 if (ret != 0) { 1943 return ret; 1944 } 1945 1946 if (parent == NULL) { 1947 ERROR("Partition block link with no parent\n"); 1948 return -EINVAL; 1949 } 1950 1951 return set_module_rate(parent, rate, orate, &ldepth); 1952 } 1953 1954 static int set_module_rate(const struct s32cc_clk_obj *module, 1955 unsigned long rate, unsigned long *orate, 1956 unsigned int *depth) 1957 { 1958 int ret = 0; 1959 1960 ret = update_stack_depth(depth); 1961 if (ret != 0) { 1962 return ret; 1963 } 1964 1965 ret = -EINVAL; 1966 1967 switch (module->type) { 1968 case s32cc_clk_t: 1969 ret = set_clk_freq(module, rate, orate, depth); 1970 break; 1971 case s32cc_osc_t: 1972 ret = set_osc_freq(module, rate, orate, depth); 1973 break; 1974 case s32cc_pll_t: 1975 ret = set_pll_freq(module, rate, orate, depth); 1976 break; 1977 case s32cc_pll_out_div_t: 1978 ret = set_pll_div_freq(module, rate, orate, depth); 1979 break; 1980 case s32cc_fixed_div_t: 1981 ret = set_fixed_div_freq(module, rate, orate, depth); 1982 break; 1983 case s32cc_clkmux_t: 1984 ret = set_mux_freq(module, rate, orate, depth); 1985 break; 1986 case s32cc_shared_clkmux_t: 1987 ret = set_mux_freq(module, rate, orate, depth); 1988 break; 1989 case s32cc_cgm_div_t: 1990 ret = set_cgm_div_freq(module, rate, orate, depth); 1991 break; 1992 case s32cc_dfs_t: 1993 ERROR("Setting the frequency of a DFS is not allowed!"); 1994 break; 1995 case s32cc_dfs_div_t: 1996 ret = set_dfs_div_freq(module, rate, orate, depth); 1997 break; 1998 case s32cc_part_block_link_t: 1999 ret = set_part_block_link_freq(module, rate, orate, depth); 2000 break; 2001 case s32cc_part_t: 2002 ERROR("It's not allowed to set the frequency of a partition !"); 2003 break; 2004 case s32cc_part_block_t: 2005 ERROR("It's not allowed to set the frequency of a partition block !"); 2006 break; 2007 default: 2008 break; 2009 } 2010 2011 return ret; 2012 } 2013 2014 static int get_module_rate(const struct s32cc_clk_obj *module, 2015 const struct s32cc_clk_drv *drv, 2016 unsigned long *rate, 2017 unsigned int depth) 2018 { 2019 unsigned int ldepth = depth; 2020 int ret = 0; 2021 2022 ret = update_stack_depth(&ldepth); 2023 if (ret != 0) { 2024 return ret; 2025 } 2026 2027 switch (module->type) { 2028 case s32cc_osc_t: 2029 ret = get_osc_freq(module, drv, rate, ldepth); 2030 break; 2031 case s32cc_clk_t: 2032 ret = get_clk_freq(module, drv, rate, ldepth); 2033 break; 2034 case s32cc_pll_t: 2035 ret = get_pll_freq(module, drv, rate, ldepth); 2036 break; 2037 case s32cc_dfs_t: 2038 ret = get_dfs_freq(module, drv, rate, ldepth); 2039 break; 2040 case s32cc_dfs_div_t: 2041 ret = get_dfs_div_freq(module, drv, rate, ldepth); 2042 break; 2043 case s32cc_fixed_div_t: 2044 ret = get_fixed_div_freq(module, drv, rate, ldepth); 2045 break; 2046 case s32cc_pll_out_div_t: 2047 ret = get_pll_div_freq(module, drv, rate, ldepth); 2048 break; 2049 case s32cc_clkmux_t: 2050 ret = get_mux_freq(module, drv, rate, ldepth); 2051 break; 2052 case s32cc_shared_clkmux_t: 2053 ret = get_mux_freq(module, drv, rate, ldepth); 2054 break; 2055 case s32cc_part_t: 2056 ERROR("s32cc_part_t cannot be used to get rate\n"); 2057 break; 2058 case s32cc_part_block_t: 2059 ERROR("s32cc_part_block_t cannot be used to get rate\n"); 2060 break; 2061 case s32cc_part_block_link_t: 2062 ret = get_part_block_link_freq(module, drv, rate, ldepth); 2063 break; 2064 case s32cc_cgm_div_t: 2065 ret = get_cgm_div_freq(module, drv, rate, ldepth); 2066 break; 2067 default: 2068 ret = -EINVAL; 2069 break; 2070 } 2071 2072 return ret; 2073 } 2074 2075 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 2076 unsigned long *orate) 2077 { 2078 unsigned int depth = MAX_STACK_DEPTH; 2079 const struct s32cc_clk *clk; 2080 int ret; 2081 2082 clk = s32cc_get_arch_clk(id); 2083 if (clk == NULL) { 2084 return -EINVAL; 2085 } 2086 2087 ret = set_module_rate(&clk->desc, rate, orate, &depth); 2088 if (ret != 0) { 2089 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 2090 rate, id); 2091 } 2092 2093 return ret; 2094 } 2095 2096 static unsigned long s32cc_clk_get_rate(unsigned long id) 2097 { 2098 const struct s32cc_clk_drv *drv = get_drv(); 2099 unsigned int depth = MAX_STACK_DEPTH; 2100 const struct s32cc_clk *clk; 2101 unsigned long rate = 0UL; 2102 int ret; 2103 2104 clk = s32cc_get_arch_clk(id); 2105 if (clk == NULL) { 2106 return 0; 2107 } 2108 2109 ret = get_module_rate(&clk->desc, drv, &rate, depth); 2110 if (ret != 0) { 2111 ERROR("Failed to get frequency (%lu MHz) for clock %lu\n", 2112 rate, id); 2113 return 0; 2114 } 2115 2116 return rate; 2117 } 2118 2119 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 2120 { 2121 return NULL; 2122 } 2123 2124 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 2125 2126 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 2127 { 2128 static const get_parent_clb_t parents_clbs[13] = { 2129 [s32cc_clk_t] = get_clk_parent, 2130 [s32cc_osc_t] = get_no_parent, 2131 [s32cc_pll_t] = get_pll_parent, 2132 [s32cc_pll_out_div_t] = get_pll_div_parent, 2133 [s32cc_clkmux_t] = get_mux_parent, 2134 [s32cc_shared_clkmux_t] = get_mux_parent, 2135 [s32cc_dfs_t] = get_dfs_parent, 2136 [s32cc_dfs_div_t] = get_dfs_div_parent, 2137 [s32cc_part_t] = get_no_parent, 2138 [s32cc_fixed_div_t] = get_fixed_div_parent, 2139 [s32cc_part_block_t] = get_part_block_parent, 2140 [s32cc_part_block_link_t] = get_part_block_link_parent, 2141 }; 2142 uint32_t index; 2143 2144 if (module == NULL) { 2145 return NULL; 2146 } 2147 2148 index = (uint32_t)module->type; 2149 2150 if (index >= ARRAY_SIZE(parents_clbs)) { 2151 ERROR("Undefined module type: %d\n", module->type); 2152 return NULL; 2153 } 2154 2155 if (parents_clbs[index] == NULL) { 2156 ERROR("Undefined parent getter for type: %d\n", module->type); 2157 return NULL; 2158 } 2159 2160 return parents_clbs[index](module); 2161 } 2162 2163 static int s32cc_clk_get_parent(unsigned long id) 2164 { 2165 struct s32cc_clk *parent_clk; 2166 const struct s32cc_clk_obj *parent; 2167 const struct s32cc_clk *clk; 2168 unsigned long parent_id; 2169 int ret; 2170 2171 clk = s32cc_get_arch_clk(id); 2172 if (clk == NULL) { 2173 return -EINVAL; 2174 } 2175 2176 parent = get_module_parent(clk->module); 2177 if (parent == NULL) { 2178 return -EINVAL; 2179 } 2180 2181 parent_clk = s32cc_obj2clk(parent); 2182 if (parent_clk == NULL) { 2183 return -EINVAL; 2184 } 2185 2186 ret = s32cc_get_clk_id(parent_clk, &parent_id); 2187 if (ret != 0) { 2188 return ret; 2189 } 2190 2191 if (parent_id > (unsigned long)INT_MAX) { 2192 return -E2BIG; 2193 } 2194 2195 return (int)parent_id; 2196 } 2197 2198 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 2199 { 2200 const struct s32cc_clk *parent; 2201 const struct s32cc_clk *clk; 2202 bool valid_source = false; 2203 struct s32cc_clkmux *mux; 2204 uint8_t i; 2205 2206 clk = s32cc_get_arch_clk(id); 2207 if (clk == NULL) { 2208 return -EINVAL; 2209 } 2210 2211 parent = s32cc_get_arch_clk(parent_id); 2212 if (parent == NULL) { 2213 return -EINVAL; 2214 } 2215 2216 if (!is_s32cc_clk_mux(clk)) { 2217 ERROR("Clock %lu is not a mux\n", id); 2218 return -EINVAL; 2219 } 2220 2221 mux = s32cc_clk2mux(clk); 2222 if (mux == NULL) { 2223 ERROR("Failed to cast clock %lu to clock mux\n", id); 2224 return -EINVAL; 2225 } 2226 2227 for (i = 0; i < mux->nclks; i++) { 2228 if (mux->clkids[i] == parent_id) { 2229 valid_source = true; 2230 break; 2231 } 2232 } 2233 2234 if (!valid_source) { 2235 ERROR("Clock %lu is not a valid clock for mux %lu\n", 2236 parent_id, id); 2237 return -EINVAL; 2238 } 2239 2240 mux->source_id = parent_id; 2241 2242 return 0; 2243 } 2244 2245 static int s32cc_clk_mmap_regs(const struct s32cc_clk_drv *drv) 2246 { 2247 const uintptr_t base_addrs[12] = { 2248 drv->fxosc_base, 2249 drv->armpll_base, 2250 drv->periphpll_base, 2251 drv->armdfs_base, 2252 drv->periphdfs_base, 2253 drv->cgm0_base, 2254 drv->cgm1_base, 2255 drv->cgm5_base, 2256 drv->ddrpll_base, 2257 drv->mc_me, 2258 drv->mc_rgm, 2259 drv->rdc, 2260 }; 2261 size_t i; 2262 int ret; 2263 2264 for (i = 0U; i < ARRAY_SIZE(base_addrs); i++) { 2265 ret = mmap_add_dynamic_region(base_addrs[i], base_addrs[i], 2266 PAGE_SIZE, 2267 MT_DEVICE | MT_RW | MT_SECURE); 2268 if (ret != 0) { 2269 ERROR("Failed to map clock module 0x%" PRIuPTR "\n", 2270 base_addrs[i]); 2271 return ret; 2272 } 2273 } 2274 2275 return 0; 2276 } 2277 2278 int s32cc_clk_register_drv(bool mmap_regs) 2279 { 2280 static const struct clk_ops s32cc_clk_ops = { 2281 .enable = s32cc_clk_enable, 2282 .disable = s32cc_clk_disable, 2283 .is_enabled = s32cc_clk_is_enabled, 2284 .get_rate = s32cc_clk_get_rate, 2285 .set_rate = s32cc_clk_set_rate, 2286 .get_parent = s32cc_clk_get_parent, 2287 .set_parent = s32cc_clk_set_parent, 2288 }; 2289 const struct s32cc_clk_drv *drv; 2290 2291 clk_register(&s32cc_clk_ops); 2292 2293 drv = get_drv(); 2294 if (drv == NULL) { 2295 return -EINVAL; 2296 } 2297 2298 if (mmap_regs) { 2299 return s32cc_clk_mmap_regs(drv); 2300 } 2301 2302 return 0; 2303 } 2304 2305