1 /* 2 * Copyright 2024 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 8 #include <s32cc-clk-regs.h> 9 10 #include <common/debug.h> 11 #include <drivers/clk.h> 12 #include <lib/mmio.h> 13 #include <s32cc-clk-ids.h> 14 #include <s32cc-clk-modules.h> 15 #include <s32cc-clk-utils.h> 16 17 #define MAX_STACK_DEPTH (40U) 18 19 /* This is used for floating-point precision calculations. */ 20 #define FP_PRECISION (100000000UL) 21 22 struct s32cc_clk_drv { 23 uintptr_t fxosc_base; 24 uintptr_t armpll_base; 25 uintptr_t periphpll_base; 26 uintptr_t armdfs_base; 27 uintptr_t cgm0_base; 28 uintptr_t cgm1_base; 29 uintptr_t ddrpll_base; 30 }; 31 32 static int update_stack_depth(unsigned int *depth) 33 { 34 if (*depth == 0U) { 35 return -ENOMEM; 36 } 37 38 (*depth)--; 39 return 0; 40 } 41 42 static struct s32cc_clk_drv *get_drv(void) 43 { 44 static struct s32cc_clk_drv driver = { 45 .fxosc_base = FXOSC_BASE_ADDR, 46 .armpll_base = ARMPLL_BASE_ADDR, 47 .periphpll_base = PERIPHPLL_BASE_ADDR, 48 .armdfs_base = ARM_DFS_BASE_ADDR, 49 .cgm0_base = CGM0_BASE_ADDR, 50 .cgm1_base = CGM1_BASE_ADDR, 51 .ddrpll_base = DDRPLL_BASE_ADDR, 52 }; 53 54 return &driver; 55 } 56 57 static int enable_module(struct s32cc_clk_obj *module, 58 const struct s32cc_clk_drv *drv, 59 unsigned int depth); 60 61 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 62 { 63 const struct s32cc_clk *clk = s32cc_obj2clk(module); 64 65 if (clk->module != NULL) { 66 return clk->module; 67 } 68 69 if (clk->pclock != NULL) { 70 return &clk->pclock->desc; 71 } 72 73 return NULL; 74 } 75 76 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 77 uintptr_t *base) 78 { 79 int ret = 0; 80 81 switch (id) { 82 case S32CC_FXOSC: 83 *base = drv->fxosc_base; 84 break; 85 case S32CC_ARM_PLL: 86 *base = drv->armpll_base; 87 break; 88 case S32CC_PERIPH_PLL: 89 *base = drv->periphpll_base; 90 break; 91 case S32CC_DDR_PLL: 92 *base = drv->ddrpll_base; 93 break; 94 case S32CC_ARM_DFS: 95 *base = drv->armdfs_base; 96 break; 97 case S32CC_CGM0: 98 *base = drv->cgm0_base; 99 break; 100 case S32CC_CGM1: 101 *base = drv->cgm1_base; 102 break; 103 case S32CC_FIRC: 104 break; 105 case S32CC_SIRC: 106 break; 107 default: 108 ret = -EINVAL; 109 break; 110 } 111 112 if (ret != 0) { 113 ERROR("Unknown clock source id: %u\n", id); 114 } 115 116 return ret; 117 } 118 119 static void enable_fxosc(const struct s32cc_clk_drv *drv) 120 { 121 uintptr_t fxosc_base = drv->fxosc_base; 122 uint32_t ctrl; 123 124 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 125 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 126 return; 127 } 128 129 ctrl = FXOSC_CTRL_COMP_EN; 130 ctrl &= ~FXOSC_CTRL_OSC_BYP; 131 ctrl |= FXOSC_CTRL_EOCV(0x1); 132 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 133 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 134 135 /* Switch ON the crystal oscillator. */ 136 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 137 138 /* Wait until the clock is stable. */ 139 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 140 } 141 } 142 143 static int enable_osc(struct s32cc_clk_obj *module, 144 const struct s32cc_clk_drv *drv, 145 unsigned int depth) 146 { 147 const struct s32cc_osc *osc = s32cc_obj2osc(module); 148 int ret = 0; 149 150 ret = update_stack_depth(&depth); 151 if (ret != 0) { 152 return ret; 153 } 154 155 switch (osc->source) { 156 case S32CC_FXOSC: 157 enable_fxosc(drv); 158 break; 159 /* FIRC and SIRC oscillators are enabled by default */ 160 case S32CC_FIRC: 161 break; 162 case S32CC_SIRC: 163 break; 164 default: 165 ERROR("Invalid oscillator %d\n", osc->source); 166 ret = -EINVAL; 167 break; 168 }; 169 170 return ret; 171 } 172 173 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 174 { 175 const struct s32cc_pll *pll = s32cc_obj2pll(module); 176 177 if (pll->source == NULL) { 178 ERROR("Failed to identify PLL's parent\n"); 179 } 180 181 return pll->source; 182 } 183 184 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 185 uint32_t *mfi, uint32_t *mfn) 186 187 { 188 unsigned long vco; 189 unsigned long mfn64; 190 191 /* FRAC-N mode */ 192 *mfi = (uint32_t)(pll_vco / ref_freq); 193 194 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 195 mfn64 = pll_vco % ref_freq; 196 mfn64 *= FP_PRECISION; 197 mfn64 /= ref_freq; 198 mfn64 *= 18432UL; 199 mfn64 /= FP_PRECISION; 200 201 if (mfn64 > UINT32_MAX) { 202 return -EINVAL; 203 } 204 205 *mfn = (uint32_t)mfn64; 206 207 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 208 vco += (unsigned long)*mfi * FP_PRECISION; 209 vco *= ref_freq; 210 vco /= FP_PRECISION; 211 212 if (vco != pll_vco) { 213 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 214 pll_vco, vco); 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 222 { 223 const struct s32cc_clk_obj *source = pll->source; 224 const struct s32cc_clk *clk; 225 226 if (source == NULL) { 227 ERROR("Failed to identify PLL's parent\n"); 228 return NULL; 229 } 230 231 if (source->type != s32cc_clk_t) { 232 ERROR("The parent of the PLL isn't a clock\n"); 233 return NULL; 234 } 235 236 clk = s32cc_obj2clk(source); 237 238 if (clk->module == NULL) { 239 ERROR("The clock isn't connected to a module\n"); 240 return NULL; 241 } 242 243 source = clk->module; 244 245 if ((source->type != s32cc_clkmux_t) && 246 (source->type != s32cc_shared_clkmux_t)) { 247 ERROR("The parent of the PLL isn't a MUX\n"); 248 return NULL; 249 } 250 251 return s32cc_obj2clkmux(source); 252 } 253 254 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 255 { 256 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 257 } 258 259 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 260 { 261 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 262 } 263 264 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 265 { 266 uint32_t i; 267 268 for (i = 0; i < ndivs; i++) { 269 disable_odiv(pll_addr, i); 270 } 271 } 272 273 static void enable_pll_hw(uintptr_t pll_addr) 274 { 275 /* Enable the PLL. */ 276 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 277 278 /* Poll until PLL acquires lock. */ 279 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 280 } 281 } 282 283 static void disable_pll_hw(uintptr_t pll_addr) 284 { 285 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 286 } 287 288 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 289 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 290 unsigned long sclk_freq) 291 { 292 uint32_t rdiv = 1, mfi, mfn; 293 int ret; 294 295 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 296 if (ret != 0) { 297 return -EINVAL; 298 } 299 300 /* Disable ODIVs*/ 301 disable_odivs(pll_addr, pll->ndividers); 302 303 /* Disable PLL */ 304 disable_pll_hw(pll_addr); 305 306 /* Program PLLCLKMUX */ 307 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 308 309 /* Program VCO */ 310 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 311 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 312 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 313 314 mmio_write_32(PLLDIG_PLLFD(pll_addr), 315 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 316 317 enable_pll_hw(pll_addr); 318 319 return ret; 320 } 321 322 static int enable_pll(struct s32cc_clk_obj *module, 323 const struct s32cc_clk_drv *drv, 324 unsigned int depth) 325 { 326 const struct s32cc_pll *pll = s32cc_obj2pll(module); 327 const struct s32cc_clkmux *mux; 328 uintptr_t pll_addr = UL(0x0); 329 unsigned long sclk_freq; 330 uint32_t sclk_id; 331 int ret; 332 333 ret = update_stack_depth(&depth); 334 if (ret != 0) { 335 return ret; 336 } 337 338 mux = get_pll_mux(pll); 339 if (mux == NULL) { 340 return -EINVAL; 341 } 342 343 if (pll->instance != mux->module) { 344 ERROR("MUX type is not in sync with PLL ID\n"); 345 return -EINVAL; 346 } 347 348 ret = get_base_addr(pll->instance, drv, &pll_addr); 349 if (ret != 0) { 350 ERROR("Failed to detect PLL instance\n"); 351 return ret; 352 } 353 354 switch (mux->source_id) { 355 case S32CC_CLK_FIRC: 356 sclk_freq = 48U * MHZ; 357 sclk_id = 0; 358 break; 359 case S32CC_CLK_FXOSC: 360 sclk_freq = 40U * MHZ; 361 sclk_id = 1; 362 break; 363 default: 364 ERROR("Invalid source selection for PLL 0x%lx\n", 365 pll_addr); 366 return -EINVAL; 367 }; 368 369 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq); 370 } 371 372 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 373 { 374 const struct s32cc_clk_obj *parent; 375 376 parent = pdiv->parent; 377 if (parent == NULL) { 378 ERROR("Failed to identify PLL divider's parent\n"); 379 return NULL; 380 } 381 382 if (parent->type != s32cc_pll_t) { 383 ERROR("The parent of the divider is not a PLL instance\n"); 384 return NULL; 385 } 386 387 return s32cc_obj2pll(parent); 388 } 389 390 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 391 { 392 uint32_t pllodiv; 393 uint32_t pdiv; 394 395 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 396 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 397 398 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 399 return; 400 } 401 402 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 403 disable_odiv(pll_addr, div_index); 404 } 405 406 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 407 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 408 409 enable_odiv(pll_addr, div_index); 410 } 411 412 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 413 { 414 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 415 416 if (pdiv->parent == NULL) { 417 ERROR("Failed to identify PLL DIV's parent\n"); 418 } 419 420 return pdiv->parent; 421 } 422 423 static int enable_pll_div(struct s32cc_clk_obj *module, 424 const struct s32cc_clk_drv *drv, 425 unsigned int depth) 426 { 427 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 428 uintptr_t pll_addr = 0x0ULL; 429 const struct s32cc_pll *pll; 430 uint32_t dc; 431 int ret; 432 433 ret = update_stack_depth(&depth); 434 if (ret != 0) { 435 return ret; 436 } 437 438 pll = get_div_pll(pdiv); 439 if (pll == NULL) { 440 ERROR("The parent of the PLL DIV is invalid\n"); 441 return 0; 442 } 443 444 ret = get_base_addr(pll->instance, drv, &pll_addr); 445 if (ret != 0) { 446 ERROR("Failed to detect PLL instance\n"); 447 return -EINVAL; 448 } 449 450 dc = (uint32_t)(pll->vco_freq / pdiv->freq); 451 452 config_pll_out_div(pll_addr, pdiv->index, dc); 453 454 return 0; 455 } 456 457 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 458 bool safe_clk) 459 { 460 uint32_t css, csc; 461 462 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 463 464 /* Already configured */ 465 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 466 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 467 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 468 return 0; 469 } 470 471 /* Ongoing clock switch? */ 472 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 473 MC_CGM_MUXn_CSS_SWIP) != 0U) { 474 } 475 476 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 477 478 /* Clear previous source. */ 479 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 480 481 if (!safe_clk) { 482 /* Select the clock source and trigger the clock switch. */ 483 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 484 } else { 485 /* Switch to safe clock */ 486 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 487 } 488 489 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 490 491 /* Wait for configuration bit to auto-clear. */ 492 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 493 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 494 } 495 496 /* Is the clock switch completed? */ 497 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 498 MC_CGM_MUXn_CSS_SWIP) != 0U) { 499 } 500 501 /* 502 * Check if the switch succeeded. 503 * Check switch trigger cause and the source. 504 */ 505 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 506 if (!safe_clk) { 507 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 508 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 509 return 0; 510 } 511 512 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 513 mux, source, cgm_addr); 514 } else { 515 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 516 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 517 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 518 return 0; 519 } 520 521 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 522 mux, cgm_addr); 523 } 524 525 return -EINVAL; 526 } 527 528 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 529 const struct s32cc_clk_drv *drv) 530 { 531 uintptr_t cgm_addr = UL(0x0); 532 uint32_t mux_hw_clk; 533 int ret; 534 535 ret = get_base_addr(mux->module, drv, &cgm_addr); 536 if (ret != 0) { 537 return ret; 538 } 539 540 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 541 542 return cgm_mux_clk_config(cgm_addr, mux->index, 543 mux_hw_clk, false); 544 } 545 546 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 547 { 548 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 549 struct s32cc_clk *clk; 550 551 if (mux == NULL) { 552 return NULL; 553 } 554 555 clk = s32cc_get_arch_clk(mux->source_id); 556 if (clk == NULL) { 557 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 558 mux->source_id, mux->index); 559 return NULL; 560 } 561 562 return &clk->desc; 563 } 564 565 static int enable_mux(struct s32cc_clk_obj *module, 566 const struct s32cc_clk_drv *drv, 567 unsigned int depth) 568 { 569 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 570 const struct s32cc_clk *clk; 571 int ret = 0; 572 573 ret = update_stack_depth(&depth); 574 if (ret != 0) { 575 return ret; 576 } 577 578 if (mux == NULL) { 579 return -EINVAL; 580 } 581 582 clk = s32cc_get_arch_clk(mux->source_id); 583 if (clk == NULL) { 584 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 585 mux->source_id, mux->index); 586 return -EINVAL; 587 } 588 589 switch (mux->module) { 590 /* PLL mux will be enabled by PLL setup */ 591 case S32CC_ARM_PLL: 592 case S32CC_PERIPH_PLL: 593 case S32CC_DDR_PLL: 594 break; 595 case S32CC_CGM1: 596 ret = enable_cgm_mux(mux, drv); 597 break; 598 case S32CC_CGM0: 599 ret = enable_cgm_mux(mux, drv); 600 break; 601 default: 602 ERROR("Unknown mux parent type: %d\n", mux->module); 603 ret = -EINVAL; 604 break; 605 }; 606 607 return ret; 608 } 609 610 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 611 { 612 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 613 614 if (dfs->parent == NULL) { 615 ERROR("Failed to identify DFS's parent\n"); 616 } 617 618 return dfs->parent; 619 } 620 621 static int enable_dfs(struct s32cc_clk_obj *module, 622 const struct s32cc_clk_drv *drv, 623 unsigned int depth) 624 { 625 int ret = 0; 626 627 ret = update_stack_depth(&depth); 628 if (ret != 0) { 629 return ret; 630 } 631 632 return 0; 633 } 634 635 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 636 { 637 const struct s32cc_clk_obj *parent = dfs_div->parent; 638 639 if (parent->type != s32cc_dfs_t) { 640 ERROR("DFS DIV doesn't have a DFS as parent\n"); 641 return NULL; 642 } 643 644 return s32cc_obj2dfs(parent); 645 } 646 647 static struct s32cc_pll *dfsdiv2pll(const struct s32cc_dfs_div *dfs_div) 648 { 649 const struct s32cc_clk_obj *parent; 650 const struct s32cc_dfs *dfs; 651 652 dfs = get_div_dfs(dfs_div); 653 if (dfs == NULL) { 654 return NULL; 655 } 656 657 parent = dfs->parent; 658 if (parent->type != s32cc_pll_t) { 659 return NULL; 660 } 661 662 return s32cc_obj2pll(parent); 663 } 664 665 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 666 uint32_t *mfi, uint32_t *mfn) 667 { 668 uint64_t factor64, tmp64, ofreq; 669 uint32_t factor32; 670 671 unsigned long in = dfs_freq; 672 unsigned long out = dfs_div->freq; 673 674 /** 675 * factor = (IN / OUT) / 2 676 * MFI = integer(factor) 677 * MFN = (factor - MFI) * 36 678 */ 679 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 680 tmp64 = factor64 / FP_PRECISION; 681 if (tmp64 > UINT32_MAX) { 682 return -EINVAL; 683 } 684 685 factor32 = (uint32_t)tmp64; 686 *mfi = factor32; 687 688 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 689 if (tmp64 > UINT32_MAX) { 690 return -EINVAL; 691 } 692 693 *mfn = (uint32_t)tmp64; 694 695 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 696 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 697 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 698 factor64 *= 2ULL; 699 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 700 701 if (ofreq != dfs_div->freq) { 702 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 703 dfs_div->freq); 704 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 712 uint32_t mfi, uint32_t mfn) 713 { 714 uint32_t portsr, portolsr; 715 uint32_t mask, old_mfi, old_mfn; 716 uint32_t dvport; 717 bool init_dfs; 718 719 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 720 721 old_mfi = DFS_DVPORTn_MFI(dvport); 722 old_mfn = DFS_DVPORTn_MFN(dvport); 723 724 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 725 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 726 727 /* Skip configuration if it's not needed */ 728 if (((portsr & BIT_32(port)) != 0U) && 729 ((portolsr & BIT_32(port)) == 0U) && 730 (mfi == old_mfi) && (mfn == old_mfn)) { 731 return 0; 732 } 733 734 init_dfs = (portsr == 0U); 735 736 if (init_dfs) { 737 mask = DFS_PORTRESET_MASK; 738 } else { 739 mask = DFS_PORTRESET_SET(BIT_32(port)); 740 } 741 742 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 743 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 744 745 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 746 } 747 748 if (init_dfs) { 749 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 750 } 751 752 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 753 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 754 755 if (init_dfs) { 756 /* DFS clk enable programming */ 757 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 758 } 759 760 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 761 762 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 763 } 764 765 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 766 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 767 ERROR("Failed to lock DFS divider\n"); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 static struct s32cc_clk_obj * 775 get_dfs_div_parent(const struct s32cc_clk_obj *module) 776 { 777 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 778 779 if (dfs_div->parent == NULL) { 780 ERROR("Failed to identify DFS divider's parent\n"); 781 } 782 783 return dfs_div->parent; 784 } 785 786 static int enable_dfs_div(struct s32cc_clk_obj *module, 787 const struct s32cc_clk_drv *drv, 788 unsigned int depth) 789 { 790 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 791 const struct s32cc_pll *pll; 792 const struct s32cc_dfs *dfs; 793 uintptr_t dfs_addr = 0UL; 794 uint32_t mfi, mfn; 795 int ret = 0; 796 797 ret = update_stack_depth(&depth); 798 if (ret != 0) { 799 return ret; 800 } 801 802 dfs = get_div_dfs(dfs_div); 803 if (dfs == NULL) { 804 return -EINVAL; 805 } 806 807 pll = dfsdiv2pll(dfs_div); 808 if (pll == NULL) { 809 ERROR("Failed to identify DFS divider's parent\n"); 810 return -EINVAL; 811 } 812 813 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 814 if ((ret != 0) || (dfs_addr == 0UL)) { 815 return -EINVAL; 816 } 817 818 ret = get_dfs_mfi_mfn(pll->vco_freq, dfs_div, &mfi, &mfn); 819 if (ret != 0) { 820 return -EINVAL; 821 } 822 823 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 824 } 825 826 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 827 const struct s32cc_clk_drv *drv, 828 unsigned int depth); 829 830 static int no_enable(struct s32cc_clk_obj *module, 831 const struct s32cc_clk_drv *drv, 832 unsigned int depth) 833 { 834 return 0; 835 } 836 837 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 838 const struct s32cc_clk_drv *drv, bool leaf_node, 839 unsigned int depth) 840 { 841 int ret = 0; 842 843 if (mod == NULL) { 844 return 0; 845 } 846 847 ret = update_stack_depth(&depth); 848 if (ret != 0) { 849 return ret; 850 } 851 852 /* Refcount will be updated as part of the recursivity */ 853 if (leaf_node) { 854 return en_cb(mod, drv, depth); 855 } 856 857 if (mod->refcount == 0U) { 858 ret = en_cb(mod, drv, depth); 859 } 860 861 if (ret == 0) { 862 mod->refcount++; 863 } 864 865 return ret; 866 } 867 868 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 869 870 static int enable_module(struct s32cc_clk_obj *module, 871 const struct s32cc_clk_drv *drv, 872 unsigned int depth) 873 { 874 struct s32cc_clk_obj *parent = get_module_parent(module); 875 static const enable_clk_t enable_clbs[8] = { 876 [s32cc_clk_t] = no_enable, 877 [s32cc_osc_t] = enable_osc, 878 [s32cc_pll_t] = enable_pll, 879 [s32cc_pll_out_div_t] = enable_pll_div, 880 [s32cc_clkmux_t] = enable_mux, 881 [s32cc_shared_clkmux_t] = enable_mux, 882 [s32cc_dfs_t] = enable_dfs, 883 [s32cc_dfs_div_t] = enable_dfs_div, 884 }; 885 uint32_t index; 886 int ret = 0; 887 888 ret = update_stack_depth(&depth); 889 if (ret != 0) { 890 return ret; 891 } 892 893 if (drv == NULL) { 894 return -EINVAL; 895 } 896 897 index = (uint32_t)module->type; 898 899 if (index >= ARRAY_SIZE(enable_clbs)) { 900 ERROR("Undefined module type: %d\n", module->type); 901 return -EINVAL; 902 } 903 904 if (enable_clbs[index] == NULL) { 905 ERROR("Undefined callback for the clock type: %d\n", 906 module->type); 907 return -EINVAL; 908 } 909 910 parent = get_module_parent(module); 911 912 ret = exec_cb_with_refcount(enable_module, parent, drv, 913 false, depth); 914 if (ret != 0) { 915 return ret; 916 } 917 918 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 919 true, depth); 920 if (ret != 0) { 921 return ret; 922 } 923 924 return ret; 925 } 926 927 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 928 const struct s32cc_clk_drv *drv, 929 unsigned int depth) 930 { 931 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 932 } 933 934 static int s32cc_clk_enable(unsigned long id) 935 { 936 const struct s32cc_clk_drv *drv = get_drv(); 937 unsigned int depth = MAX_STACK_DEPTH; 938 struct s32cc_clk *clk; 939 940 clk = s32cc_get_arch_clk(id); 941 if (clk == NULL) { 942 return -EINVAL; 943 } 944 945 return enable_module_with_refcount(&clk->desc, drv, depth); 946 } 947 948 static void s32cc_clk_disable(unsigned long id) 949 { 950 } 951 952 static bool s32cc_clk_is_enabled(unsigned long id) 953 { 954 return false; 955 } 956 957 static unsigned long s32cc_clk_get_rate(unsigned long id) 958 { 959 return 0; 960 } 961 962 static int set_module_rate(const struct s32cc_clk_obj *module, 963 unsigned long rate, unsigned long *orate, 964 unsigned int *depth); 965 966 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 967 unsigned long *orate, unsigned int *depth) 968 { 969 struct s32cc_osc *osc = s32cc_obj2osc(module); 970 int ret; 971 972 ret = update_stack_depth(depth); 973 if (ret != 0) { 974 return ret; 975 } 976 977 if ((osc->freq != 0UL) && (rate != osc->freq)) { 978 ERROR("Already initialized oscillator. freq = %lu\n", 979 osc->freq); 980 return -EINVAL; 981 } 982 983 osc->freq = rate; 984 *orate = osc->freq; 985 986 return 0; 987 } 988 989 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 990 unsigned long *orate, unsigned int *depth) 991 { 992 const struct s32cc_clk *clk = s32cc_obj2clk(module); 993 int ret; 994 995 ret = update_stack_depth(depth); 996 if (ret != 0) { 997 return ret; 998 } 999 1000 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1001 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1002 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1003 rate, clk->min_freq, clk->max_freq); 1004 return -EINVAL; 1005 } 1006 1007 if (clk->module != NULL) { 1008 return set_module_rate(clk->module, rate, orate, depth); 1009 } 1010 1011 if (clk->pclock != NULL) { 1012 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1013 } 1014 1015 return -EINVAL; 1016 } 1017 1018 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1019 unsigned long *orate, unsigned int *depth) 1020 { 1021 struct s32cc_pll *pll = s32cc_obj2pll(module); 1022 int ret; 1023 1024 ret = update_stack_depth(depth); 1025 if (ret != 0) { 1026 return ret; 1027 } 1028 1029 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1030 ERROR("PLL frequency was already set\n"); 1031 return -EINVAL; 1032 } 1033 1034 pll->vco_freq = rate; 1035 *orate = pll->vco_freq; 1036 1037 return 0; 1038 } 1039 1040 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1041 unsigned long *orate, unsigned int *depth) 1042 { 1043 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1044 const struct s32cc_pll *pll; 1045 unsigned long prate, dc; 1046 int ret; 1047 1048 ret = update_stack_depth(depth); 1049 if (ret != 0) { 1050 return ret; 1051 } 1052 1053 if (pdiv->parent == NULL) { 1054 ERROR("Failed to identify PLL divider's parent\n"); 1055 return -EINVAL; 1056 } 1057 1058 pll = s32cc_obj2pll(pdiv->parent); 1059 if (pll == NULL) { 1060 ERROR("The parent of the PLL DIV is invalid\n"); 1061 return -EINVAL; 1062 } 1063 1064 prate = pll->vco_freq; 1065 1066 /** 1067 * The PLL is not initialized yet, so let's take a risk 1068 * and accept the proposed rate. 1069 */ 1070 if (prate == 0UL) { 1071 pdiv->freq = rate; 1072 *orate = rate; 1073 return 0; 1074 } 1075 1076 /* Decline in case the rate cannot fit PLL's requirements. */ 1077 dc = prate / rate; 1078 if ((prate / dc) != rate) { 1079 return -EINVAL; 1080 } 1081 1082 pdiv->freq = rate; 1083 *orate = pdiv->freq; 1084 1085 return 0; 1086 } 1087 1088 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1089 unsigned long *orate, unsigned int *depth) 1090 { 1091 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1092 int ret; 1093 1094 ret = update_stack_depth(depth); 1095 if (ret != 0) { 1096 return ret; 1097 } 1098 1099 if (fdiv->parent == NULL) { 1100 ERROR("The divider doesn't have a valid parent\b"); 1101 return -EINVAL; 1102 } 1103 1104 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1105 1106 /* Update the output rate based on the parent's rate */ 1107 *orate /= fdiv->rate_div; 1108 1109 return ret; 1110 } 1111 1112 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1113 unsigned long *orate, unsigned int *depth) 1114 { 1115 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1116 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1117 int ret; 1118 1119 ret = update_stack_depth(depth); 1120 if (ret != 0) { 1121 return ret; 1122 } 1123 1124 if (clk == NULL) { 1125 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1126 mux->index, mux->source_id); 1127 return -EINVAL; 1128 } 1129 1130 return set_module_rate(&clk->desc, rate, orate, depth); 1131 } 1132 1133 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1134 unsigned long *orate, unsigned int *depth) 1135 { 1136 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1137 const struct s32cc_dfs *dfs; 1138 int ret; 1139 1140 ret = update_stack_depth(depth); 1141 if (ret != 0) { 1142 return ret; 1143 } 1144 1145 if (dfs_div->parent == NULL) { 1146 ERROR("Failed to identify DFS divider's parent\n"); 1147 return -EINVAL; 1148 } 1149 1150 /* Sanity check */ 1151 dfs = s32cc_obj2dfs(dfs_div->parent); 1152 if (dfs->parent == NULL) { 1153 ERROR("Failed to identify DFS's parent\n"); 1154 return -EINVAL; 1155 } 1156 1157 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1158 ERROR("DFS DIV frequency was already set to %lu\n", 1159 dfs_div->freq); 1160 return -EINVAL; 1161 } 1162 1163 dfs_div->freq = rate; 1164 *orate = rate; 1165 1166 return ret; 1167 } 1168 1169 static int set_module_rate(const struct s32cc_clk_obj *module, 1170 unsigned long rate, unsigned long *orate, 1171 unsigned int *depth) 1172 { 1173 int ret = 0; 1174 1175 ret = update_stack_depth(depth); 1176 if (ret != 0) { 1177 return ret; 1178 } 1179 1180 ret = -EINVAL; 1181 1182 switch (module->type) { 1183 case s32cc_clk_t: 1184 ret = set_clk_freq(module, rate, orate, depth); 1185 break; 1186 case s32cc_osc_t: 1187 ret = set_osc_freq(module, rate, orate, depth); 1188 break; 1189 case s32cc_pll_t: 1190 ret = set_pll_freq(module, rate, orate, depth); 1191 break; 1192 case s32cc_pll_out_div_t: 1193 ret = set_pll_div_freq(module, rate, orate, depth); 1194 break; 1195 case s32cc_fixed_div_t: 1196 ret = set_fixed_div_freq(module, rate, orate, depth); 1197 break; 1198 case s32cc_clkmux_t: 1199 ret = set_mux_freq(module, rate, orate, depth); 1200 break; 1201 case s32cc_shared_clkmux_t: 1202 ret = set_mux_freq(module, rate, orate, depth); 1203 break; 1204 case s32cc_dfs_t: 1205 ERROR("Setting the frequency of a DFS is not allowed!"); 1206 break; 1207 case s32cc_dfs_div_t: 1208 ret = set_dfs_div_freq(module, rate, orate, depth); 1209 break; 1210 default: 1211 break; 1212 } 1213 1214 return ret; 1215 } 1216 1217 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 1218 unsigned long *orate) 1219 { 1220 unsigned int depth = MAX_STACK_DEPTH; 1221 const struct s32cc_clk *clk; 1222 int ret; 1223 1224 clk = s32cc_get_arch_clk(id); 1225 if (clk == NULL) { 1226 return -EINVAL; 1227 } 1228 1229 ret = set_module_rate(&clk->desc, rate, orate, &depth); 1230 if (ret != 0) { 1231 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 1232 rate, id); 1233 } 1234 1235 return ret; 1236 } 1237 1238 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 1239 { 1240 return NULL; 1241 } 1242 1243 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 1244 1245 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 1246 { 1247 static const get_parent_clb_t parents_clbs[8] = { 1248 [s32cc_clk_t] = get_clk_parent, 1249 [s32cc_osc_t] = get_no_parent, 1250 [s32cc_pll_t] = get_pll_parent, 1251 [s32cc_pll_out_div_t] = get_pll_div_parent, 1252 [s32cc_clkmux_t] = get_mux_parent, 1253 [s32cc_shared_clkmux_t] = get_mux_parent, 1254 [s32cc_dfs_t] = get_dfs_parent, 1255 [s32cc_dfs_div_t] = get_dfs_div_parent, 1256 }; 1257 uint32_t index; 1258 1259 if (module == NULL) { 1260 return NULL; 1261 } 1262 1263 index = (uint32_t)module->type; 1264 1265 if (index >= ARRAY_SIZE(parents_clbs)) { 1266 ERROR("Undefined module type: %d\n", module->type); 1267 return NULL; 1268 } 1269 1270 if (parents_clbs[index] == NULL) { 1271 ERROR("Undefined parent getter for type: %d\n", module->type); 1272 return NULL; 1273 } 1274 1275 return parents_clbs[index](module); 1276 } 1277 1278 static int s32cc_clk_get_parent(unsigned long id) 1279 { 1280 struct s32cc_clk *parent_clk; 1281 const struct s32cc_clk_obj *parent; 1282 const struct s32cc_clk *clk; 1283 unsigned long parent_id; 1284 int ret; 1285 1286 clk = s32cc_get_arch_clk(id); 1287 if (clk == NULL) { 1288 return -EINVAL; 1289 } 1290 1291 parent = get_module_parent(clk->module); 1292 if (parent == NULL) { 1293 return -EINVAL; 1294 } 1295 1296 parent_clk = s32cc_obj2clk(parent); 1297 if (parent_clk == NULL) { 1298 return -EINVAL; 1299 } 1300 1301 ret = s32cc_get_clk_id(parent_clk, &parent_id); 1302 if (ret != 0) { 1303 return ret; 1304 } 1305 1306 if (parent_id > (unsigned long)INT_MAX) { 1307 return -E2BIG; 1308 } 1309 1310 return (int)parent_id; 1311 } 1312 1313 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 1314 { 1315 const struct s32cc_clk *parent; 1316 const struct s32cc_clk *clk; 1317 bool valid_source = false; 1318 struct s32cc_clkmux *mux; 1319 uint8_t i; 1320 1321 clk = s32cc_get_arch_clk(id); 1322 if (clk == NULL) { 1323 return -EINVAL; 1324 } 1325 1326 parent = s32cc_get_arch_clk(parent_id); 1327 if (parent == NULL) { 1328 return -EINVAL; 1329 } 1330 1331 if (!is_s32cc_clk_mux(clk)) { 1332 ERROR("Clock %lu is not a mux\n", id); 1333 return -EINVAL; 1334 } 1335 1336 mux = s32cc_clk2mux(clk); 1337 if (mux == NULL) { 1338 ERROR("Failed to cast clock %lu to clock mux\n", id); 1339 return -EINVAL; 1340 } 1341 1342 for (i = 0; i < mux->nclks; i++) { 1343 if (mux->clkids[i] == parent_id) { 1344 valid_source = true; 1345 break; 1346 } 1347 } 1348 1349 if (!valid_source) { 1350 ERROR("Clock %lu is not a valid clock for mux %lu\n", 1351 parent_id, id); 1352 return -EINVAL; 1353 } 1354 1355 mux->source_id = parent_id; 1356 1357 return 0; 1358 } 1359 1360 void s32cc_clk_register_drv(void) 1361 { 1362 static const struct clk_ops s32cc_clk_ops = { 1363 .enable = s32cc_clk_enable, 1364 .disable = s32cc_clk_disable, 1365 .is_enabled = s32cc_clk_is_enabled, 1366 .get_rate = s32cc_clk_get_rate, 1367 .set_rate = s32cc_clk_set_rate, 1368 .get_parent = s32cc_clk_get_parent, 1369 .set_parent = s32cc_clk_set_parent, 1370 }; 1371 1372 clk_register(&s32cc_clk_ops); 1373 } 1374 1375