1 /* 2 * Copyright 2024 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 #include <common/debug.h> 8 #include <drivers/clk.h> 9 #include <lib/mmio.h> 10 #include <s32cc-clk-ids.h> 11 #include <s32cc-clk-modules.h> 12 #include <s32cc-clk-regs.h> 13 #include <s32cc-clk-utils.h> 14 #include <s32cc-mc-me.h> 15 16 #define MAX_STACK_DEPTH (40U) 17 18 /* This is used for floating-point precision calculations. */ 19 #define FP_PRECISION (100000000UL) 20 21 struct s32cc_clk_drv { 22 uintptr_t fxosc_base; 23 uintptr_t armpll_base; 24 uintptr_t periphpll_base; 25 uintptr_t armdfs_base; 26 uintptr_t cgm0_base; 27 uintptr_t cgm1_base; 28 uintptr_t cgm5_base; 29 uintptr_t ddrpll_base; 30 uintptr_t mc_me; 31 uintptr_t mc_rgm; 32 uintptr_t rdc; 33 }; 34 35 static int update_stack_depth(unsigned int *depth) 36 { 37 if (*depth == 0U) { 38 return -ENOMEM; 39 } 40 41 (*depth)--; 42 return 0; 43 } 44 45 static struct s32cc_clk_drv *get_drv(void) 46 { 47 static struct s32cc_clk_drv driver = { 48 .fxosc_base = FXOSC_BASE_ADDR, 49 .armpll_base = ARMPLL_BASE_ADDR, 50 .periphpll_base = PERIPHPLL_BASE_ADDR, 51 .armdfs_base = ARM_DFS_BASE_ADDR, 52 .cgm0_base = CGM0_BASE_ADDR, 53 .cgm1_base = CGM1_BASE_ADDR, 54 .cgm5_base = MC_CGM5_BASE_ADDR, 55 .ddrpll_base = DDRPLL_BASE_ADDR, 56 .mc_me = MC_ME_BASE_ADDR, 57 .mc_rgm = MC_RGM_BASE_ADDR, 58 .rdc = RDC_BASE_ADDR, 59 }; 60 61 return &driver; 62 } 63 64 static int enable_module(struct s32cc_clk_obj *module, 65 const struct s32cc_clk_drv *drv, 66 unsigned int depth); 67 68 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 69 { 70 const struct s32cc_clk *clk = s32cc_obj2clk(module); 71 72 if (clk->module != NULL) { 73 return clk->module; 74 } 75 76 if (clk->pclock != NULL) { 77 return &clk->pclock->desc; 78 } 79 80 return NULL; 81 } 82 83 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 84 uintptr_t *base) 85 { 86 int ret = 0; 87 88 switch (id) { 89 case S32CC_FXOSC: 90 *base = drv->fxosc_base; 91 break; 92 case S32CC_ARM_PLL: 93 *base = drv->armpll_base; 94 break; 95 case S32CC_PERIPH_PLL: 96 *base = drv->periphpll_base; 97 break; 98 case S32CC_DDR_PLL: 99 *base = drv->ddrpll_base; 100 break; 101 case S32CC_ARM_DFS: 102 *base = drv->armdfs_base; 103 break; 104 case S32CC_CGM0: 105 *base = drv->cgm0_base; 106 break; 107 case S32CC_CGM1: 108 *base = drv->cgm1_base; 109 break; 110 case S32CC_CGM5: 111 *base = drv->cgm5_base; 112 break; 113 case S32CC_FIRC: 114 break; 115 case S32CC_SIRC: 116 break; 117 default: 118 ret = -EINVAL; 119 break; 120 } 121 122 if (ret != 0) { 123 ERROR("Unknown clock source id: %u\n", id); 124 } 125 126 return ret; 127 } 128 129 static void enable_fxosc(const struct s32cc_clk_drv *drv) 130 { 131 uintptr_t fxosc_base = drv->fxosc_base; 132 uint32_t ctrl; 133 134 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 135 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 136 return; 137 } 138 139 ctrl = FXOSC_CTRL_COMP_EN; 140 ctrl &= ~FXOSC_CTRL_OSC_BYP; 141 ctrl |= FXOSC_CTRL_EOCV(0x1); 142 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 143 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 144 145 /* Switch ON the crystal oscillator. */ 146 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 147 148 /* Wait until the clock is stable. */ 149 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 150 } 151 } 152 153 static int enable_osc(struct s32cc_clk_obj *module, 154 const struct s32cc_clk_drv *drv, 155 unsigned int depth) 156 { 157 const struct s32cc_osc *osc = s32cc_obj2osc(module); 158 unsigned int ldepth = depth; 159 int ret = 0; 160 161 ret = update_stack_depth(&ldepth); 162 if (ret != 0) { 163 return ret; 164 } 165 166 switch (osc->source) { 167 case S32CC_FXOSC: 168 enable_fxosc(drv); 169 break; 170 /* FIRC and SIRC oscillators are enabled by default */ 171 case S32CC_FIRC: 172 break; 173 case S32CC_SIRC: 174 break; 175 default: 176 ERROR("Invalid oscillator %d\n", osc->source); 177 ret = -EINVAL; 178 break; 179 }; 180 181 return ret; 182 } 183 184 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 185 { 186 const struct s32cc_pll *pll = s32cc_obj2pll(module); 187 188 if (pll->source == NULL) { 189 ERROR("Failed to identify PLL's parent\n"); 190 } 191 192 return pll->source; 193 } 194 195 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 196 uint32_t *mfi, uint32_t *mfn) 197 198 { 199 unsigned long vco; 200 unsigned long mfn64; 201 202 /* FRAC-N mode */ 203 *mfi = (uint32_t)(pll_vco / ref_freq); 204 205 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 206 mfn64 = pll_vco % ref_freq; 207 mfn64 *= FP_PRECISION; 208 mfn64 /= ref_freq; 209 mfn64 *= 18432UL; 210 mfn64 /= FP_PRECISION; 211 212 if (mfn64 > UINT32_MAX) { 213 return -EINVAL; 214 } 215 216 *mfn = (uint32_t)mfn64; 217 218 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 219 vco += (unsigned long)*mfi * FP_PRECISION; 220 vco *= ref_freq; 221 vco /= FP_PRECISION; 222 223 if (vco != pll_vco) { 224 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 225 pll_vco, vco); 226 return -EINVAL; 227 } 228 229 return 0; 230 } 231 232 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 233 { 234 const struct s32cc_clk_obj *source = pll->source; 235 const struct s32cc_clk *clk; 236 237 if (source == NULL) { 238 ERROR("Failed to identify PLL's parent\n"); 239 return NULL; 240 } 241 242 if (source->type != s32cc_clk_t) { 243 ERROR("The parent of the PLL isn't a clock\n"); 244 return NULL; 245 } 246 247 clk = s32cc_obj2clk(source); 248 249 if (clk->module == NULL) { 250 ERROR("The clock isn't connected to a module\n"); 251 return NULL; 252 } 253 254 source = clk->module; 255 256 if ((source->type != s32cc_clkmux_t) && 257 (source->type != s32cc_shared_clkmux_t)) { 258 ERROR("The parent of the PLL isn't a MUX\n"); 259 return NULL; 260 } 261 262 return s32cc_obj2clkmux(source); 263 } 264 265 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 266 { 267 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 268 } 269 270 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 271 { 272 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 273 } 274 275 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 276 { 277 uint32_t i; 278 279 for (i = 0; i < ndivs; i++) { 280 disable_odiv(pll_addr, i); 281 } 282 } 283 284 static void enable_pll_hw(uintptr_t pll_addr) 285 { 286 /* Enable the PLL. */ 287 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 288 289 /* Poll until PLL acquires lock. */ 290 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 291 } 292 } 293 294 static void disable_pll_hw(uintptr_t pll_addr) 295 { 296 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 297 } 298 299 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 300 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 301 unsigned long sclk_freq) 302 { 303 uint32_t rdiv = 1, mfi, mfn; 304 int ret; 305 306 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 307 if (ret != 0) { 308 return -EINVAL; 309 } 310 311 /* Disable ODIVs*/ 312 disable_odivs(pll_addr, pll->ndividers); 313 314 /* Disable PLL */ 315 disable_pll_hw(pll_addr); 316 317 /* Program PLLCLKMUX */ 318 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 319 320 /* Program VCO */ 321 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 322 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 323 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 324 325 mmio_write_32(PLLDIG_PLLFD(pll_addr), 326 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 327 328 enable_pll_hw(pll_addr); 329 330 return ret; 331 } 332 333 static int enable_pll(struct s32cc_clk_obj *module, 334 const struct s32cc_clk_drv *drv, 335 unsigned int depth) 336 { 337 const struct s32cc_pll *pll = s32cc_obj2pll(module); 338 const struct s32cc_clkmux *mux; 339 uintptr_t pll_addr = UL(0x0); 340 unsigned int ldepth = depth; 341 unsigned long sclk_freq; 342 uint32_t sclk_id; 343 int ret; 344 345 ret = update_stack_depth(&ldepth); 346 if (ret != 0) { 347 return ret; 348 } 349 350 mux = get_pll_mux(pll); 351 if (mux == NULL) { 352 return -EINVAL; 353 } 354 355 if (pll->instance != mux->module) { 356 ERROR("MUX type is not in sync with PLL ID\n"); 357 return -EINVAL; 358 } 359 360 ret = get_base_addr(pll->instance, drv, &pll_addr); 361 if (ret != 0) { 362 ERROR("Failed to detect PLL instance\n"); 363 return ret; 364 } 365 366 switch (mux->source_id) { 367 case S32CC_CLK_FIRC: 368 sclk_freq = 48U * MHZ; 369 sclk_id = 0; 370 break; 371 case S32CC_CLK_FXOSC: 372 sclk_freq = 40U * MHZ; 373 sclk_id = 1; 374 break; 375 default: 376 ERROR("Invalid source selection for PLL 0x%lx\n", 377 pll_addr); 378 return -EINVAL; 379 }; 380 381 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq); 382 } 383 384 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 385 { 386 const struct s32cc_clk_obj *parent; 387 388 parent = pdiv->parent; 389 if (parent == NULL) { 390 ERROR("Failed to identify PLL divider's parent\n"); 391 return NULL; 392 } 393 394 if (parent->type != s32cc_pll_t) { 395 ERROR("The parent of the divider is not a PLL instance\n"); 396 return NULL; 397 } 398 399 return s32cc_obj2pll(parent); 400 } 401 402 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 403 { 404 uint32_t pllodiv; 405 uint32_t pdiv; 406 407 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 408 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 409 410 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 411 return; 412 } 413 414 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 415 disable_odiv(pll_addr, div_index); 416 } 417 418 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 419 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 420 421 enable_odiv(pll_addr, div_index); 422 } 423 424 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 425 { 426 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 427 428 if (pdiv->parent == NULL) { 429 ERROR("Failed to identify PLL DIV's parent\n"); 430 } 431 432 return pdiv->parent; 433 } 434 435 static int enable_pll_div(struct s32cc_clk_obj *module, 436 const struct s32cc_clk_drv *drv, 437 unsigned int depth) 438 { 439 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 440 uintptr_t pll_addr = 0x0ULL; 441 unsigned int ldepth = depth; 442 const struct s32cc_pll *pll; 443 uint32_t dc; 444 int ret; 445 446 ret = update_stack_depth(&ldepth); 447 if (ret != 0) { 448 return ret; 449 } 450 451 pll = get_div_pll(pdiv); 452 if (pll == NULL) { 453 ERROR("The parent of the PLL DIV is invalid\n"); 454 return 0; 455 } 456 457 ret = get_base_addr(pll->instance, drv, &pll_addr); 458 if (ret != 0) { 459 ERROR("Failed to detect PLL instance\n"); 460 return -EINVAL; 461 } 462 463 dc = (uint32_t)(pll->vco_freq / pdiv->freq); 464 465 config_pll_out_div(pll_addr, pdiv->index, dc); 466 467 return 0; 468 } 469 470 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 471 bool safe_clk) 472 { 473 uint32_t css, csc; 474 475 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 476 477 /* Already configured */ 478 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 479 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 480 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 481 return 0; 482 } 483 484 /* Ongoing clock switch? */ 485 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 486 MC_CGM_MUXn_CSS_SWIP) != 0U) { 487 } 488 489 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 490 491 /* Clear previous source. */ 492 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 493 494 if (!safe_clk) { 495 /* Select the clock source and trigger the clock switch. */ 496 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 497 } else { 498 /* Switch to safe clock */ 499 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 500 } 501 502 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 503 504 /* Wait for configuration bit to auto-clear. */ 505 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 506 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 507 } 508 509 /* Is the clock switch completed? */ 510 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 511 MC_CGM_MUXn_CSS_SWIP) != 0U) { 512 } 513 514 /* 515 * Check if the switch succeeded. 516 * Check switch trigger cause and the source. 517 */ 518 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 519 if (!safe_clk) { 520 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 521 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 522 return 0; 523 } 524 525 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 526 mux, source, cgm_addr); 527 } else { 528 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 529 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 530 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 531 return 0; 532 } 533 534 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 535 mux, cgm_addr); 536 } 537 538 return -EINVAL; 539 } 540 541 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 542 const struct s32cc_clk_drv *drv) 543 { 544 uintptr_t cgm_addr = UL(0x0); 545 uint32_t mux_hw_clk; 546 int ret; 547 548 ret = get_base_addr(mux->module, drv, &cgm_addr); 549 if (ret != 0) { 550 return ret; 551 } 552 553 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 554 555 return cgm_mux_clk_config(cgm_addr, mux->index, 556 mux_hw_clk, false); 557 } 558 559 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 560 { 561 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 562 struct s32cc_clk *clk; 563 564 if (mux == NULL) { 565 return NULL; 566 } 567 568 clk = s32cc_get_arch_clk(mux->source_id); 569 if (clk == NULL) { 570 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 571 mux->source_id, mux->index); 572 return NULL; 573 } 574 575 return &clk->desc; 576 } 577 578 static int enable_mux(struct s32cc_clk_obj *module, 579 const struct s32cc_clk_drv *drv, 580 unsigned int depth) 581 { 582 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 583 unsigned int ldepth = depth; 584 const struct s32cc_clk *clk; 585 int ret = 0; 586 587 ret = update_stack_depth(&ldepth); 588 if (ret != 0) { 589 return ret; 590 } 591 592 if (mux == NULL) { 593 return -EINVAL; 594 } 595 596 clk = s32cc_get_arch_clk(mux->source_id); 597 if (clk == NULL) { 598 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 599 mux->source_id, mux->index); 600 return -EINVAL; 601 } 602 603 switch (mux->module) { 604 /* PLL mux will be enabled by PLL setup */ 605 case S32CC_ARM_PLL: 606 case S32CC_PERIPH_PLL: 607 case S32CC_DDR_PLL: 608 break; 609 case S32CC_CGM1: 610 ret = enable_cgm_mux(mux, drv); 611 break; 612 case S32CC_CGM0: 613 ret = enable_cgm_mux(mux, drv); 614 break; 615 case S32CC_CGM5: 616 ret = enable_cgm_mux(mux, drv); 617 break; 618 default: 619 ERROR("Unknown mux parent type: %d\n", mux->module); 620 ret = -EINVAL; 621 break; 622 }; 623 624 return ret; 625 } 626 627 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 628 { 629 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 630 631 if (dfs->parent == NULL) { 632 ERROR("Failed to identify DFS's parent\n"); 633 } 634 635 return dfs->parent; 636 } 637 638 static int enable_dfs(struct s32cc_clk_obj *module, 639 const struct s32cc_clk_drv *drv, 640 unsigned int depth) 641 { 642 unsigned int ldepth = depth; 643 int ret = 0; 644 645 ret = update_stack_depth(&ldepth); 646 if (ret != 0) { 647 return ret; 648 } 649 650 return 0; 651 } 652 653 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 654 { 655 const struct s32cc_clk_obj *parent = dfs_div->parent; 656 657 if (parent->type != s32cc_dfs_t) { 658 ERROR("DFS DIV doesn't have a DFS as parent\n"); 659 return NULL; 660 } 661 662 return s32cc_obj2dfs(parent); 663 } 664 665 static struct s32cc_pll *dfsdiv2pll(const struct s32cc_dfs_div *dfs_div) 666 { 667 const struct s32cc_clk_obj *parent; 668 const struct s32cc_dfs *dfs; 669 670 dfs = get_div_dfs(dfs_div); 671 if (dfs == NULL) { 672 return NULL; 673 } 674 675 parent = dfs->parent; 676 if (parent->type != s32cc_pll_t) { 677 return NULL; 678 } 679 680 return s32cc_obj2pll(parent); 681 } 682 683 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 684 uint32_t *mfi, uint32_t *mfn) 685 { 686 uint64_t factor64, tmp64, ofreq; 687 uint32_t factor32; 688 689 unsigned long in = dfs_freq; 690 unsigned long out = dfs_div->freq; 691 692 /** 693 * factor = (IN / OUT) / 2 694 * MFI = integer(factor) 695 * MFN = (factor - MFI) * 36 696 */ 697 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 698 tmp64 = factor64 / FP_PRECISION; 699 if (tmp64 > UINT32_MAX) { 700 return -EINVAL; 701 } 702 703 factor32 = (uint32_t)tmp64; 704 *mfi = factor32; 705 706 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 707 if (tmp64 > UINT32_MAX) { 708 return -EINVAL; 709 } 710 711 *mfn = (uint32_t)tmp64; 712 713 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 714 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 715 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 716 factor64 *= 2ULL; 717 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 718 719 if (ofreq != dfs_div->freq) { 720 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 721 dfs_div->freq); 722 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 723 return -EINVAL; 724 } 725 726 return 0; 727 } 728 729 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 730 uint32_t mfi, uint32_t mfn) 731 { 732 uint32_t portsr, portolsr; 733 uint32_t mask, old_mfi, old_mfn; 734 uint32_t dvport; 735 bool init_dfs; 736 737 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 738 739 old_mfi = DFS_DVPORTn_MFI(dvport); 740 old_mfn = DFS_DVPORTn_MFN(dvport); 741 742 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 743 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 744 745 /* Skip configuration if it's not needed */ 746 if (((portsr & BIT_32(port)) != 0U) && 747 ((portolsr & BIT_32(port)) == 0U) && 748 (mfi == old_mfi) && (mfn == old_mfn)) { 749 return 0; 750 } 751 752 init_dfs = (portsr == 0U); 753 754 if (init_dfs) { 755 mask = DFS_PORTRESET_MASK; 756 } else { 757 mask = DFS_PORTRESET_SET(BIT_32(port)); 758 } 759 760 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 761 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 762 763 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 764 } 765 766 if (init_dfs) { 767 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 768 } 769 770 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 771 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 772 773 if (init_dfs) { 774 /* DFS clk enable programming */ 775 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 776 } 777 778 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 779 780 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 781 } 782 783 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 784 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 785 ERROR("Failed to lock DFS divider\n"); 786 return -EINVAL; 787 } 788 789 return 0; 790 } 791 792 static struct s32cc_clk_obj * 793 get_dfs_div_parent(const struct s32cc_clk_obj *module) 794 { 795 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 796 797 if (dfs_div->parent == NULL) { 798 ERROR("Failed to identify DFS divider's parent\n"); 799 } 800 801 return dfs_div->parent; 802 } 803 804 static int enable_dfs_div(struct s32cc_clk_obj *module, 805 const struct s32cc_clk_drv *drv, 806 unsigned int depth) 807 { 808 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 809 unsigned int ldepth = depth; 810 const struct s32cc_pll *pll; 811 const struct s32cc_dfs *dfs; 812 uintptr_t dfs_addr = 0UL; 813 uint32_t mfi, mfn; 814 int ret = 0; 815 816 ret = update_stack_depth(&ldepth); 817 if (ret != 0) { 818 return ret; 819 } 820 821 dfs = get_div_dfs(dfs_div); 822 if (dfs == NULL) { 823 return -EINVAL; 824 } 825 826 pll = dfsdiv2pll(dfs_div); 827 if (pll == NULL) { 828 ERROR("Failed to identify DFS divider's parent\n"); 829 return -EINVAL; 830 } 831 832 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 833 if ((ret != 0) || (dfs_addr == 0UL)) { 834 return -EINVAL; 835 } 836 837 ret = get_dfs_mfi_mfn(pll->vco_freq, dfs_div, &mfi, &mfn); 838 if (ret != 0) { 839 return -EINVAL; 840 } 841 842 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 843 } 844 845 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 846 const struct s32cc_clk_drv *drv, 847 unsigned int depth); 848 849 static int enable_part(struct s32cc_clk_obj *module, 850 const struct s32cc_clk_drv *drv, 851 unsigned int depth) 852 { 853 const struct s32cc_part *part = s32cc_obj2part(module); 854 uint32_t part_no = part->partition_id; 855 856 if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) { 857 return -EINVAL; 858 } 859 860 return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no); 861 } 862 863 static int enable_part_block(struct s32cc_clk_obj *module, 864 const struct s32cc_clk_drv *drv, 865 unsigned int depth) 866 { 867 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 868 const struct s32cc_part *part = block->part; 869 uint32_t part_no = part->partition_id; 870 unsigned int ldepth = depth; 871 uint32_t cofb; 872 int ret; 873 874 ret = update_stack_depth(&ldepth); 875 if (ret != 0) { 876 return ret; 877 } 878 879 if ((block->block >= s32cc_part_block0) && 880 (block->block <= s32cc_part_block15)) { 881 cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0; 882 mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status); 883 } else { 884 ERROR("Unknown partition block type: %d\n", block->block); 885 return -EINVAL; 886 } 887 888 return 0; 889 } 890 891 static struct s32cc_clk_obj * 892 get_part_block_parent(const struct s32cc_clk_obj *module) 893 { 894 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 895 896 return &block->part->desc; 897 } 898 899 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 900 const struct s32cc_clk_drv *drv, 901 unsigned int depth); 902 903 static int enable_part_block_link(struct s32cc_clk_obj *module, 904 const struct s32cc_clk_drv *drv, 905 unsigned int depth) 906 { 907 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 908 struct s32cc_part_block *block = link->block; 909 unsigned int ldepth = depth; 910 int ret; 911 912 ret = update_stack_depth(&ldepth); 913 if (ret != 0) { 914 return ret; 915 } 916 917 /* Move the enablement algorithm to partition tree */ 918 return enable_module_with_refcount(&block->desc, drv, ldepth); 919 } 920 921 static struct s32cc_clk_obj * 922 get_part_block_link_parent(const struct s32cc_clk_obj *module) 923 { 924 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 925 926 return link->parent; 927 } 928 929 static int no_enable(struct s32cc_clk_obj *module, 930 const struct s32cc_clk_drv *drv, 931 unsigned int depth) 932 { 933 return 0; 934 } 935 936 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 937 const struct s32cc_clk_drv *drv, bool leaf_node, 938 unsigned int depth) 939 { 940 unsigned int ldepth = depth; 941 int ret = 0; 942 943 if (mod == NULL) { 944 return 0; 945 } 946 947 ret = update_stack_depth(&ldepth); 948 if (ret != 0) { 949 return ret; 950 } 951 952 /* Refcount will be updated as part of the recursivity */ 953 if (leaf_node) { 954 return en_cb(mod, drv, ldepth); 955 } 956 957 if (mod->refcount == 0U) { 958 ret = en_cb(mod, drv, ldepth); 959 } 960 961 if (ret == 0) { 962 mod->refcount++; 963 } 964 965 return ret; 966 } 967 968 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 969 970 static int enable_module(struct s32cc_clk_obj *module, 971 const struct s32cc_clk_drv *drv, 972 unsigned int depth) 973 { 974 struct s32cc_clk_obj *parent = get_module_parent(module); 975 static const enable_clk_t enable_clbs[12] = { 976 [s32cc_clk_t] = no_enable, 977 [s32cc_osc_t] = enable_osc, 978 [s32cc_pll_t] = enable_pll, 979 [s32cc_pll_out_div_t] = enable_pll_div, 980 [s32cc_clkmux_t] = enable_mux, 981 [s32cc_shared_clkmux_t] = enable_mux, 982 [s32cc_dfs_t] = enable_dfs, 983 [s32cc_dfs_div_t] = enable_dfs_div, 984 [s32cc_part_t] = enable_part, 985 [s32cc_part_block_t] = enable_part_block, 986 [s32cc_part_block_link_t] = enable_part_block_link, 987 }; 988 unsigned int ldepth = depth; 989 uint32_t index; 990 int ret = 0; 991 992 ret = update_stack_depth(&ldepth); 993 if (ret != 0) { 994 return ret; 995 } 996 997 if (drv == NULL) { 998 return -EINVAL; 999 } 1000 1001 index = (uint32_t)module->type; 1002 1003 if (index >= ARRAY_SIZE(enable_clbs)) { 1004 ERROR("Undefined module type: %d\n", module->type); 1005 return -EINVAL; 1006 } 1007 1008 if (enable_clbs[index] == NULL) { 1009 ERROR("Undefined callback for the clock type: %d\n", 1010 module->type); 1011 return -EINVAL; 1012 } 1013 1014 parent = get_module_parent(module); 1015 1016 ret = exec_cb_with_refcount(enable_module, parent, drv, 1017 false, ldepth); 1018 if (ret != 0) { 1019 return ret; 1020 } 1021 1022 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 1023 true, ldepth); 1024 if (ret != 0) { 1025 return ret; 1026 } 1027 1028 return ret; 1029 } 1030 1031 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1032 const struct s32cc_clk_drv *drv, 1033 unsigned int depth) 1034 { 1035 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 1036 } 1037 1038 static int s32cc_clk_enable(unsigned long id) 1039 { 1040 const struct s32cc_clk_drv *drv = get_drv(); 1041 unsigned int depth = MAX_STACK_DEPTH; 1042 struct s32cc_clk *clk; 1043 1044 clk = s32cc_get_arch_clk(id); 1045 if (clk == NULL) { 1046 return -EINVAL; 1047 } 1048 1049 return enable_module_with_refcount(&clk->desc, drv, depth); 1050 } 1051 1052 static void s32cc_clk_disable(unsigned long id) 1053 { 1054 } 1055 1056 static bool s32cc_clk_is_enabled(unsigned long id) 1057 { 1058 return false; 1059 } 1060 1061 static unsigned long s32cc_clk_get_rate(unsigned long id) 1062 { 1063 return 0; 1064 } 1065 1066 static int set_module_rate(const struct s32cc_clk_obj *module, 1067 unsigned long rate, unsigned long *orate, 1068 unsigned int *depth); 1069 1070 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1071 unsigned long *orate, unsigned int *depth) 1072 { 1073 struct s32cc_osc *osc = s32cc_obj2osc(module); 1074 int ret; 1075 1076 ret = update_stack_depth(depth); 1077 if (ret != 0) { 1078 return ret; 1079 } 1080 1081 if ((osc->freq != 0UL) && (rate != osc->freq)) { 1082 ERROR("Already initialized oscillator. freq = %lu\n", 1083 osc->freq); 1084 return -EINVAL; 1085 } 1086 1087 osc->freq = rate; 1088 *orate = osc->freq; 1089 1090 return 0; 1091 } 1092 1093 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1094 unsigned long *orate, unsigned int *depth) 1095 { 1096 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1097 int ret; 1098 1099 ret = update_stack_depth(depth); 1100 if (ret != 0) { 1101 return ret; 1102 } 1103 1104 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1105 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1106 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1107 rate, clk->min_freq, clk->max_freq); 1108 return -EINVAL; 1109 } 1110 1111 if (clk->module != NULL) { 1112 return set_module_rate(clk->module, rate, orate, depth); 1113 } 1114 1115 if (clk->pclock != NULL) { 1116 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1117 } 1118 1119 return -EINVAL; 1120 } 1121 1122 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1123 unsigned long *orate, unsigned int *depth) 1124 { 1125 struct s32cc_pll *pll = s32cc_obj2pll(module); 1126 int ret; 1127 1128 ret = update_stack_depth(depth); 1129 if (ret != 0) { 1130 return ret; 1131 } 1132 1133 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1134 ERROR("PLL frequency was already set\n"); 1135 return -EINVAL; 1136 } 1137 1138 pll->vco_freq = rate; 1139 *orate = pll->vco_freq; 1140 1141 return 0; 1142 } 1143 1144 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1145 unsigned long *orate, unsigned int *depth) 1146 { 1147 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1148 const struct s32cc_pll *pll; 1149 unsigned long prate, dc; 1150 int ret; 1151 1152 ret = update_stack_depth(depth); 1153 if (ret != 0) { 1154 return ret; 1155 } 1156 1157 if (pdiv->parent == NULL) { 1158 ERROR("Failed to identify PLL divider's parent\n"); 1159 return -EINVAL; 1160 } 1161 1162 pll = s32cc_obj2pll(pdiv->parent); 1163 if (pll == NULL) { 1164 ERROR("The parent of the PLL DIV is invalid\n"); 1165 return -EINVAL; 1166 } 1167 1168 prate = pll->vco_freq; 1169 1170 /** 1171 * The PLL is not initialized yet, so let's take a risk 1172 * and accept the proposed rate. 1173 */ 1174 if (prate == 0UL) { 1175 pdiv->freq = rate; 1176 *orate = rate; 1177 return 0; 1178 } 1179 1180 /* Decline in case the rate cannot fit PLL's requirements. */ 1181 dc = prate / rate; 1182 if ((prate / dc) != rate) { 1183 return -EINVAL; 1184 } 1185 1186 pdiv->freq = rate; 1187 *orate = pdiv->freq; 1188 1189 return 0; 1190 } 1191 1192 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1193 unsigned long *orate, unsigned int *depth) 1194 { 1195 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1196 int ret; 1197 1198 ret = update_stack_depth(depth); 1199 if (ret != 0) { 1200 return ret; 1201 } 1202 1203 if (fdiv->parent == NULL) { 1204 ERROR("The divider doesn't have a valid parent\b"); 1205 return -EINVAL; 1206 } 1207 1208 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1209 1210 /* Update the output rate based on the parent's rate */ 1211 *orate /= fdiv->rate_div; 1212 1213 return ret; 1214 } 1215 1216 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1217 unsigned long *orate, unsigned int *depth) 1218 { 1219 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1220 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1221 int ret; 1222 1223 ret = update_stack_depth(depth); 1224 if (ret != 0) { 1225 return ret; 1226 } 1227 1228 if (clk == NULL) { 1229 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1230 mux->index, mux->source_id); 1231 return -EINVAL; 1232 } 1233 1234 return set_module_rate(&clk->desc, rate, orate, depth); 1235 } 1236 1237 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1238 unsigned long *orate, unsigned int *depth) 1239 { 1240 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1241 const struct s32cc_dfs *dfs; 1242 int ret; 1243 1244 ret = update_stack_depth(depth); 1245 if (ret != 0) { 1246 return ret; 1247 } 1248 1249 if (dfs_div->parent == NULL) { 1250 ERROR("Failed to identify DFS divider's parent\n"); 1251 return -EINVAL; 1252 } 1253 1254 /* Sanity check */ 1255 dfs = s32cc_obj2dfs(dfs_div->parent); 1256 if (dfs->parent == NULL) { 1257 ERROR("Failed to identify DFS's parent\n"); 1258 return -EINVAL; 1259 } 1260 1261 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1262 ERROR("DFS DIV frequency was already set to %lu\n", 1263 dfs_div->freq); 1264 return -EINVAL; 1265 } 1266 1267 dfs_div->freq = rate; 1268 *orate = rate; 1269 1270 return ret; 1271 } 1272 1273 static int set_module_rate(const struct s32cc_clk_obj *module, 1274 unsigned long rate, unsigned long *orate, 1275 unsigned int *depth) 1276 { 1277 int ret = 0; 1278 1279 ret = update_stack_depth(depth); 1280 if (ret != 0) { 1281 return ret; 1282 } 1283 1284 ret = -EINVAL; 1285 1286 switch (module->type) { 1287 case s32cc_clk_t: 1288 ret = set_clk_freq(module, rate, orate, depth); 1289 break; 1290 case s32cc_osc_t: 1291 ret = set_osc_freq(module, rate, orate, depth); 1292 break; 1293 case s32cc_pll_t: 1294 ret = set_pll_freq(module, rate, orate, depth); 1295 break; 1296 case s32cc_pll_out_div_t: 1297 ret = set_pll_div_freq(module, rate, orate, depth); 1298 break; 1299 case s32cc_fixed_div_t: 1300 ret = set_fixed_div_freq(module, rate, orate, depth); 1301 break; 1302 case s32cc_clkmux_t: 1303 ret = set_mux_freq(module, rate, orate, depth); 1304 break; 1305 case s32cc_shared_clkmux_t: 1306 ret = set_mux_freq(module, rate, orate, depth); 1307 break; 1308 case s32cc_dfs_t: 1309 ERROR("Setting the frequency of a DFS is not allowed!"); 1310 break; 1311 case s32cc_dfs_div_t: 1312 ret = set_dfs_div_freq(module, rate, orate, depth); 1313 break; 1314 default: 1315 break; 1316 } 1317 1318 return ret; 1319 } 1320 1321 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 1322 unsigned long *orate) 1323 { 1324 unsigned int depth = MAX_STACK_DEPTH; 1325 const struct s32cc_clk *clk; 1326 int ret; 1327 1328 clk = s32cc_get_arch_clk(id); 1329 if (clk == NULL) { 1330 return -EINVAL; 1331 } 1332 1333 ret = set_module_rate(&clk->desc, rate, orate, &depth); 1334 if (ret != 0) { 1335 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 1336 rate, id); 1337 } 1338 1339 return ret; 1340 } 1341 1342 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 1343 { 1344 return NULL; 1345 } 1346 1347 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 1348 1349 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 1350 { 1351 static const get_parent_clb_t parents_clbs[12] = { 1352 [s32cc_clk_t] = get_clk_parent, 1353 [s32cc_osc_t] = get_no_parent, 1354 [s32cc_pll_t] = get_pll_parent, 1355 [s32cc_pll_out_div_t] = get_pll_div_parent, 1356 [s32cc_clkmux_t] = get_mux_parent, 1357 [s32cc_shared_clkmux_t] = get_mux_parent, 1358 [s32cc_dfs_t] = get_dfs_parent, 1359 [s32cc_dfs_div_t] = get_dfs_div_parent, 1360 [s32cc_part_t] = get_no_parent, 1361 [s32cc_part_block_t] = get_part_block_parent, 1362 [s32cc_part_block_link_t] = get_part_block_link_parent, 1363 }; 1364 uint32_t index; 1365 1366 if (module == NULL) { 1367 return NULL; 1368 } 1369 1370 index = (uint32_t)module->type; 1371 1372 if (index >= ARRAY_SIZE(parents_clbs)) { 1373 ERROR("Undefined module type: %d\n", module->type); 1374 return NULL; 1375 } 1376 1377 if (parents_clbs[index] == NULL) { 1378 ERROR("Undefined parent getter for type: %d\n", module->type); 1379 return NULL; 1380 } 1381 1382 return parents_clbs[index](module); 1383 } 1384 1385 static int s32cc_clk_get_parent(unsigned long id) 1386 { 1387 struct s32cc_clk *parent_clk; 1388 const struct s32cc_clk_obj *parent; 1389 const struct s32cc_clk *clk; 1390 unsigned long parent_id; 1391 int ret; 1392 1393 clk = s32cc_get_arch_clk(id); 1394 if (clk == NULL) { 1395 return -EINVAL; 1396 } 1397 1398 parent = get_module_parent(clk->module); 1399 if (parent == NULL) { 1400 return -EINVAL; 1401 } 1402 1403 parent_clk = s32cc_obj2clk(parent); 1404 if (parent_clk == NULL) { 1405 return -EINVAL; 1406 } 1407 1408 ret = s32cc_get_clk_id(parent_clk, &parent_id); 1409 if (ret != 0) { 1410 return ret; 1411 } 1412 1413 if (parent_id > (unsigned long)INT_MAX) { 1414 return -E2BIG; 1415 } 1416 1417 return (int)parent_id; 1418 } 1419 1420 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 1421 { 1422 const struct s32cc_clk *parent; 1423 const struct s32cc_clk *clk; 1424 bool valid_source = false; 1425 struct s32cc_clkmux *mux; 1426 uint8_t i; 1427 1428 clk = s32cc_get_arch_clk(id); 1429 if (clk == NULL) { 1430 return -EINVAL; 1431 } 1432 1433 parent = s32cc_get_arch_clk(parent_id); 1434 if (parent == NULL) { 1435 return -EINVAL; 1436 } 1437 1438 if (!is_s32cc_clk_mux(clk)) { 1439 ERROR("Clock %lu is not a mux\n", id); 1440 return -EINVAL; 1441 } 1442 1443 mux = s32cc_clk2mux(clk); 1444 if (mux == NULL) { 1445 ERROR("Failed to cast clock %lu to clock mux\n", id); 1446 return -EINVAL; 1447 } 1448 1449 for (i = 0; i < mux->nclks; i++) { 1450 if (mux->clkids[i] == parent_id) { 1451 valid_source = true; 1452 break; 1453 } 1454 } 1455 1456 if (!valid_source) { 1457 ERROR("Clock %lu is not a valid clock for mux %lu\n", 1458 parent_id, id); 1459 return -EINVAL; 1460 } 1461 1462 mux->source_id = parent_id; 1463 1464 return 0; 1465 } 1466 1467 void s32cc_clk_register_drv(void) 1468 { 1469 static const struct clk_ops s32cc_clk_ops = { 1470 .enable = s32cc_clk_enable, 1471 .disable = s32cc_clk_disable, 1472 .is_enabled = s32cc_clk_is_enabled, 1473 .get_rate = s32cc_clk_get_rate, 1474 .set_rate = s32cc_clk_set_rate, 1475 .get_parent = s32cc_clk_get_parent, 1476 .set_parent = s32cc_clk_set_parent, 1477 }; 1478 1479 clk_register(&s32cc_clk_ops); 1480 } 1481 1482