1 /* 2 * Copyright 2024 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <errno.h> 7 #include <common/debug.h> 8 #include <drivers/clk.h> 9 #include <lib/mmio.h> 10 #include <s32cc-clk-ids.h> 11 #include <s32cc-clk-modules.h> 12 #include <s32cc-clk-regs.h> 13 #include <s32cc-clk-utils.h> 14 #include <s32cc-mc-me.h> 15 16 #define MAX_STACK_DEPTH (40U) 17 18 /* This is used for floating-point precision calculations. */ 19 #define FP_PRECISION (100000000UL) 20 21 struct s32cc_clk_drv { 22 uintptr_t fxosc_base; 23 uintptr_t armpll_base; 24 uintptr_t periphpll_base; 25 uintptr_t armdfs_base; 26 uintptr_t cgm0_base; 27 uintptr_t cgm1_base; 28 uintptr_t cgm5_base; 29 uintptr_t ddrpll_base; 30 uintptr_t mc_me; 31 uintptr_t mc_rgm; 32 uintptr_t rdc; 33 }; 34 35 static int update_stack_depth(unsigned int *depth) 36 { 37 if (*depth == 0U) { 38 return -ENOMEM; 39 } 40 41 (*depth)--; 42 return 0; 43 } 44 45 static struct s32cc_clk_drv *get_drv(void) 46 { 47 static struct s32cc_clk_drv driver = { 48 .fxosc_base = FXOSC_BASE_ADDR, 49 .armpll_base = ARMPLL_BASE_ADDR, 50 .periphpll_base = PERIPHPLL_BASE_ADDR, 51 .armdfs_base = ARM_DFS_BASE_ADDR, 52 .cgm0_base = CGM0_BASE_ADDR, 53 .cgm1_base = CGM1_BASE_ADDR, 54 .cgm5_base = MC_CGM5_BASE_ADDR, 55 .ddrpll_base = DDRPLL_BASE_ADDR, 56 .mc_me = MC_ME_BASE_ADDR, 57 .mc_rgm = MC_RGM_BASE_ADDR, 58 .rdc = RDC_BASE_ADDR, 59 }; 60 61 return &driver; 62 } 63 64 static int enable_module(struct s32cc_clk_obj *module, 65 const struct s32cc_clk_drv *drv, 66 unsigned int depth); 67 68 static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module) 69 { 70 const struct s32cc_clk *clk = s32cc_obj2clk(module); 71 72 if (clk->module != NULL) { 73 return clk->module; 74 } 75 76 if (clk->pclock != NULL) { 77 return &clk->pclock->desc; 78 } 79 80 return NULL; 81 } 82 83 static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv, 84 uintptr_t *base) 85 { 86 int ret = 0; 87 88 switch (id) { 89 case S32CC_FXOSC: 90 *base = drv->fxosc_base; 91 break; 92 case S32CC_ARM_PLL: 93 *base = drv->armpll_base; 94 break; 95 case S32CC_PERIPH_PLL: 96 *base = drv->periphpll_base; 97 break; 98 case S32CC_DDR_PLL: 99 *base = drv->ddrpll_base; 100 break; 101 case S32CC_ARM_DFS: 102 *base = drv->armdfs_base; 103 break; 104 case S32CC_CGM0: 105 *base = drv->cgm0_base; 106 break; 107 case S32CC_CGM1: 108 *base = drv->cgm1_base; 109 break; 110 case S32CC_CGM5: 111 *base = drv->cgm5_base; 112 break; 113 case S32CC_FIRC: 114 break; 115 case S32CC_SIRC: 116 break; 117 default: 118 ret = -EINVAL; 119 break; 120 } 121 122 if (ret != 0) { 123 ERROR("Unknown clock source id: %u\n", id); 124 } 125 126 return ret; 127 } 128 129 static void enable_fxosc(const struct s32cc_clk_drv *drv) 130 { 131 uintptr_t fxosc_base = drv->fxosc_base; 132 uint32_t ctrl; 133 134 ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base)); 135 if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) { 136 return; 137 } 138 139 ctrl = FXOSC_CTRL_COMP_EN; 140 ctrl &= ~FXOSC_CTRL_OSC_BYP; 141 ctrl |= FXOSC_CTRL_EOCV(0x1); 142 ctrl |= FXOSC_CTRL_GM_SEL(0x7); 143 mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl); 144 145 /* Switch ON the crystal oscillator. */ 146 mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON); 147 148 /* Wait until the clock is stable. */ 149 while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) { 150 } 151 } 152 153 static int enable_osc(struct s32cc_clk_obj *module, 154 const struct s32cc_clk_drv *drv, 155 unsigned int depth) 156 { 157 const struct s32cc_osc *osc = s32cc_obj2osc(module); 158 int ret = 0; 159 160 ret = update_stack_depth(&depth); 161 if (ret != 0) { 162 return ret; 163 } 164 165 switch (osc->source) { 166 case S32CC_FXOSC: 167 enable_fxosc(drv); 168 break; 169 /* FIRC and SIRC oscillators are enabled by default */ 170 case S32CC_FIRC: 171 break; 172 case S32CC_SIRC: 173 break; 174 default: 175 ERROR("Invalid oscillator %d\n", osc->source); 176 ret = -EINVAL; 177 break; 178 }; 179 180 return ret; 181 } 182 183 static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module) 184 { 185 const struct s32cc_pll *pll = s32cc_obj2pll(module); 186 187 if (pll->source == NULL) { 188 ERROR("Failed to identify PLL's parent\n"); 189 } 190 191 return pll->source; 192 } 193 194 static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq, 195 uint32_t *mfi, uint32_t *mfn) 196 197 { 198 unsigned long vco; 199 unsigned long mfn64; 200 201 /* FRAC-N mode */ 202 *mfi = (uint32_t)(pll_vco / ref_freq); 203 204 /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */ 205 mfn64 = pll_vco % ref_freq; 206 mfn64 *= FP_PRECISION; 207 mfn64 /= ref_freq; 208 mfn64 *= 18432UL; 209 mfn64 /= FP_PRECISION; 210 211 if (mfn64 > UINT32_MAX) { 212 return -EINVAL; 213 } 214 215 *mfn = (uint32_t)mfn64; 216 217 vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL; 218 vco += (unsigned long)*mfi * FP_PRECISION; 219 vco *= ref_freq; 220 vco /= FP_PRECISION; 221 222 if (vco != pll_vco) { 223 ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n", 224 pll_vco, vco); 225 return -EINVAL; 226 } 227 228 return 0; 229 } 230 231 static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll) 232 { 233 const struct s32cc_clk_obj *source = pll->source; 234 const struct s32cc_clk *clk; 235 236 if (source == NULL) { 237 ERROR("Failed to identify PLL's parent\n"); 238 return NULL; 239 } 240 241 if (source->type != s32cc_clk_t) { 242 ERROR("The parent of the PLL isn't a clock\n"); 243 return NULL; 244 } 245 246 clk = s32cc_obj2clk(source); 247 248 if (clk->module == NULL) { 249 ERROR("The clock isn't connected to a module\n"); 250 return NULL; 251 } 252 253 source = clk->module; 254 255 if ((source->type != s32cc_clkmux_t) && 256 (source->type != s32cc_shared_clkmux_t)) { 257 ERROR("The parent of the PLL isn't a MUX\n"); 258 return NULL; 259 } 260 261 return s32cc_obj2clkmux(source); 262 } 263 264 static void disable_odiv(uintptr_t pll_addr, uint32_t div_index) 265 { 266 mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 267 } 268 269 static void enable_odiv(uintptr_t pll_addr, uint32_t div_index) 270 { 271 mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE); 272 } 273 274 static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs) 275 { 276 uint32_t i; 277 278 for (i = 0; i < ndivs; i++) { 279 disable_odiv(pll_addr, i); 280 } 281 } 282 283 static void enable_pll_hw(uintptr_t pll_addr) 284 { 285 /* Enable the PLL. */ 286 mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0); 287 288 /* Poll until PLL acquires lock. */ 289 while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) { 290 } 291 } 292 293 static void disable_pll_hw(uintptr_t pll_addr) 294 { 295 mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD); 296 } 297 298 static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr, 299 const struct s32cc_clk_drv *drv, uint32_t sclk_id, 300 unsigned long sclk_freq) 301 { 302 uint32_t rdiv = 1, mfi, mfn; 303 int ret; 304 305 ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn); 306 if (ret != 0) { 307 return -EINVAL; 308 } 309 310 /* Disable ODIVs*/ 311 disable_odivs(pll_addr, pll->ndividers); 312 313 /* Disable PLL */ 314 disable_pll_hw(pll_addr); 315 316 /* Program PLLCLKMUX */ 317 mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id); 318 319 /* Program VCO */ 320 mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr), 321 PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK, 322 PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi)); 323 324 mmio_write_32(PLLDIG_PLLFD(pll_addr), 325 PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN); 326 327 enable_pll_hw(pll_addr); 328 329 return ret; 330 } 331 332 static int enable_pll(struct s32cc_clk_obj *module, 333 const struct s32cc_clk_drv *drv, 334 unsigned int depth) 335 { 336 const struct s32cc_pll *pll = s32cc_obj2pll(module); 337 const struct s32cc_clkmux *mux; 338 uintptr_t pll_addr = UL(0x0); 339 unsigned long sclk_freq; 340 uint32_t sclk_id; 341 int ret; 342 343 ret = update_stack_depth(&depth); 344 if (ret != 0) { 345 return ret; 346 } 347 348 mux = get_pll_mux(pll); 349 if (mux == NULL) { 350 return -EINVAL; 351 } 352 353 if (pll->instance != mux->module) { 354 ERROR("MUX type is not in sync with PLL ID\n"); 355 return -EINVAL; 356 } 357 358 ret = get_base_addr(pll->instance, drv, &pll_addr); 359 if (ret != 0) { 360 ERROR("Failed to detect PLL instance\n"); 361 return ret; 362 } 363 364 switch (mux->source_id) { 365 case S32CC_CLK_FIRC: 366 sclk_freq = 48U * MHZ; 367 sclk_id = 0; 368 break; 369 case S32CC_CLK_FXOSC: 370 sclk_freq = 40U * MHZ; 371 sclk_id = 1; 372 break; 373 default: 374 ERROR("Invalid source selection for PLL 0x%lx\n", 375 pll_addr); 376 return -EINVAL; 377 }; 378 379 return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq); 380 } 381 382 static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv) 383 { 384 const struct s32cc_clk_obj *parent; 385 386 parent = pdiv->parent; 387 if (parent == NULL) { 388 ERROR("Failed to identify PLL divider's parent\n"); 389 return NULL; 390 } 391 392 if (parent->type != s32cc_pll_t) { 393 ERROR("The parent of the divider is not a PLL instance\n"); 394 return NULL; 395 } 396 397 return s32cc_obj2pll(parent); 398 } 399 400 static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc) 401 { 402 uint32_t pllodiv; 403 uint32_t pdiv; 404 405 pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index)); 406 pdiv = PLLDIG_PLLODIV_DIV(pllodiv); 407 408 if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) { 409 return; 410 } 411 412 if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) { 413 disable_odiv(pll_addr, div_index); 414 } 415 416 pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U); 417 mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv); 418 419 enable_odiv(pll_addr, div_index); 420 } 421 422 static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module) 423 { 424 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 425 426 if (pdiv->parent == NULL) { 427 ERROR("Failed to identify PLL DIV's parent\n"); 428 } 429 430 return pdiv->parent; 431 } 432 433 static int enable_pll_div(struct s32cc_clk_obj *module, 434 const struct s32cc_clk_drv *drv, 435 unsigned int depth) 436 { 437 const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 438 uintptr_t pll_addr = 0x0ULL; 439 const struct s32cc_pll *pll; 440 uint32_t dc; 441 int ret; 442 443 ret = update_stack_depth(&depth); 444 if (ret != 0) { 445 return ret; 446 } 447 448 pll = get_div_pll(pdiv); 449 if (pll == NULL) { 450 ERROR("The parent of the PLL DIV is invalid\n"); 451 return 0; 452 } 453 454 ret = get_base_addr(pll->instance, drv, &pll_addr); 455 if (ret != 0) { 456 ERROR("Failed to detect PLL instance\n"); 457 return -EINVAL; 458 } 459 460 dc = (uint32_t)(pll->vco_freq / pdiv->freq); 461 462 config_pll_out_div(pll_addr, pdiv->index, dc); 463 464 return 0; 465 } 466 467 static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source, 468 bool safe_clk) 469 { 470 uint32_t css, csc; 471 472 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 473 474 /* Already configured */ 475 if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) && 476 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 477 ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) { 478 return 0; 479 } 480 481 /* Ongoing clock switch? */ 482 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 483 MC_CGM_MUXn_CSS_SWIP) != 0U) { 484 } 485 486 csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)); 487 488 /* Clear previous source. */ 489 csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK); 490 491 if (!safe_clk) { 492 /* Select the clock source and trigger the clock switch. */ 493 csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW; 494 } else { 495 /* Switch to safe clock */ 496 csc |= MC_CGM_MUXn_CSC_SAFE_SW; 497 } 498 499 mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc); 500 501 /* Wait for configuration bit to auto-clear. */ 502 while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) & 503 MC_CGM_MUXn_CSC_CLK_SW) != 0U) { 504 } 505 506 /* Is the clock switch completed? */ 507 while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) & 508 MC_CGM_MUXn_CSS_SWIP) != 0U) { 509 } 510 511 /* 512 * Check if the switch succeeded. 513 * Check switch trigger cause and the source. 514 */ 515 css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)); 516 if (!safe_clk) { 517 if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) && 518 (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) { 519 return 0; 520 } 521 522 ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n", 523 mux, source, cgm_addr); 524 } else { 525 if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) || 526 (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) && 527 ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) { 528 return 0; 529 } 530 531 ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n", 532 mux, cgm_addr); 533 } 534 535 return -EINVAL; 536 } 537 538 static int enable_cgm_mux(const struct s32cc_clkmux *mux, 539 const struct s32cc_clk_drv *drv) 540 { 541 uintptr_t cgm_addr = UL(0x0); 542 uint32_t mux_hw_clk; 543 int ret; 544 545 ret = get_base_addr(mux->module, drv, &cgm_addr); 546 if (ret != 0) { 547 return ret; 548 } 549 550 mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id); 551 552 return cgm_mux_clk_config(cgm_addr, mux->index, 553 mux_hw_clk, false); 554 } 555 556 static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module) 557 { 558 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 559 struct s32cc_clk *clk; 560 561 if (mux == NULL) { 562 return NULL; 563 } 564 565 clk = s32cc_get_arch_clk(mux->source_id); 566 if (clk == NULL) { 567 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 568 mux->source_id, mux->index); 569 return NULL; 570 } 571 572 return &clk->desc; 573 } 574 575 static int enable_mux(struct s32cc_clk_obj *module, 576 const struct s32cc_clk_drv *drv, 577 unsigned int depth) 578 { 579 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 580 const struct s32cc_clk *clk; 581 int ret = 0; 582 583 ret = update_stack_depth(&depth); 584 if (ret != 0) { 585 return ret; 586 } 587 588 if (mux == NULL) { 589 return -EINVAL; 590 } 591 592 clk = s32cc_get_arch_clk(mux->source_id); 593 if (clk == NULL) { 594 ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n", 595 mux->source_id, mux->index); 596 return -EINVAL; 597 } 598 599 switch (mux->module) { 600 /* PLL mux will be enabled by PLL setup */ 601 case S32CC_ARM_PLL: 602 case S32CC_PERIPH_PLL: 603 case S32CC_DDR_PLL: 604 break; 605 case S32CC_CGM1: 606 ret = enable_cgm_mux(mux, drv); 607 break; 608 case S32CC_CGM0: 609 ret = enable_cgm_mux(mux, drv); 610 break; 611 case S32CC_CGM5: 612 ret = enable_cgm_mux(mux, drv); 613 break; 614 default: 615 ERROR("Unknown mux parent type: %d\n", mux->module); 616 ret = -EINVAL; 617 break; 618 }; 619 620 return ret; 621 } 622 623 static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module) 624 { 625 const struct s32cc_dfs *dfs = s32cc_obj2dfs(module); 626 627 if (dfs->parent == NULL) { 628 ERROR("Failed to identify DFS's parent\n"); 629 } 630 631 return dfs->parent; 632 } 633 634 static int enable_dfs(struct s32cc_clk_obj *module, 635 const struct s32cc_clk_drv *drv, 636 unsigned int depth) 637 { 638 int ret = 0; 639 640 ret = update_stack_depth(&depth); 641 if (ret != 0) { 642 return ret; 643 } 644 645 return 0; 646 } 647 648 static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div) 649 { 650 const struct s32cc_clk_obj *parent = dfs_div->parent; 651 652 if (parent->type != s32cc_dfs_t) { 653 ERROR("DFS DIV doesn't have a DFS as parent\n"); 654 return NULL; 655 } 656 657 return s32cc_obj2dfs(parent); 658 } 659 660 static struct s32cc_pll *dfsdiv2pll(const struct s32cc_dfs_div *dfs_div) 661 { 662 const struct s32cc_clk_obj *parent; 663 const struct s32cc_dfs *dfs; 664 665 dfs = get_div_dfs(dfs_div); 666 if (dfs == NULL) { 667 return NULL; 668 } 669 670 parent = dfs->parent; 671 if (parent->type != s32cc_pll_t) { 672 return NULL; 673 } 674 675 return s32cc_obj2pll(parent); 676 } 677 678 static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div, 679 uint32_t *mfi, uint32_t *mfn) 680 { 681 uint64_t factor64, tmp64, ofreq; 682 uint32_t factor32; 683 684 unsigned long in = dfs_freq; 685 unsigned long out = dfs_div->freq; 686 687 /** 688 * factor = (IN / OUT) / 2 689 * MFI = integer(factor) 690 * MFN = (factor - MFI) * 36 691 */ 692 factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL; 693 tmp64 = factor64 / FP_PRECISION; 694 if (tmp64 > UINT32_MAX) { 695 return -EINVAL; 696 } 697 698 factor32 = (uint32_t)tmp64; 699 *mfi = factor32; 700 701 tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION; 702 if (tmp64 > UINT32_MAX) { 703 return -EINVAL; 704 } 705 706 *mfn = (uint32_t)tmp64; 707 708 /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */ 709 factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL; 710 factor64 += ((uint64_t)*mfi) * FP_PRECISION; 711 factor64 *= 2ULL; 712 ofreq = (((uint64_t)in) * FP_PRECISION) / factor64; 713 714 if (ofreq != dfs_div->freq) { 715 ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n", 716 dfs_div->freq); 717 ERROR("Nearest freq = %" PRIx64 "\n", ofreq); 718 return -EINVAL; 719 } 720 721 return 0; 722 } 723 724 static int init_dfs_port(uintptr_t dfs_addr, uint32_t port, 725 uint32_t mfi, uint32_t mfn) 726 { 727 uint32_t portsr, portolsr; 728 uint32_t mask, old_mfi, old_mfn; 729 uint32_t dvport; 730 bool init_dfs; 731 732 dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port)); 733 734 old_mfi = DFS_DVPORTn_MFI(dvport); 735 old_mfn = DFS_DVPORTn_MFN(dvport); 736 737 portsr = mmio_read_32(DFS_PORTSR(dfs_addr)); 738 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 739 740 /* Skip configuration if it's not needed */ 741 if (((portsr & BIT_32(port)) != 0U) && 742 ((portolsr & BIT_32(port)) == 0U) && 743 (mfi == old_mfi) && (mfn == old_mfn)) { 744 return 0; 745 } 746 747 init_dfs = (portsr == 0U); 748 749 if (init_dfs) { 750 mask = DFS_PORTRESET_MASK; 751 } else { 752 mask = DFS_PORTRESET_SET(BIT_32(port)); 753 } 754 755 mmio_write_32(DFS_PORTOLSR(dfs_addr), mask); 756 mmio_write_32(DFS_PORTRESET(dfs_addr), mask); 757 758 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) { 759 } 760 761 if (init_dfs) { 762 mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 763 } 764 765 mmio_write_32(DFS_DVPORTn(dfs_addr, port), 766 DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn)); 767 768 if (init_dfs) { 769 /* DFS clk enable programming */ 770 mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET); 771 } 772 773 mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port)); 774 775 while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) { 776 } 777 778 portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr)); 779 if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) { 780 ERROR("Failed to lock DFS divider\n"); 781 return -EINVAL; 782 } 783 784 return 0; 785 } 786 787 static struct s32cc_clk_obj * 788 get_dfs_div_parent(const struct s32cc_clk_obj *module) 789 { 790 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 791 792 if (dfs_div->parent == NULL) { 793 ERROR("Failed to identify DFS divider's parent\n"); 794 } 795 796 return dfs_div->parent; 797 } 798 799 static int enable_dfs_div(struct s32cc_clk_obj *module, 800 const struct s32cc_clk_drv *drv, 801 unsigned int depth) 802 { 803 const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 804 const struct s32cc_pll *pll; 805 const struct s32cc_dfs *dfs; 806 uintptr_t dfs_addr = 0UL; 807 uint32_t mfi, mfn; 808 int ret = 0; 809 810 ret = update_stack_depth(&depth); 811 if (ret != 0) { 812 return ret; 813 } 814 815 dfs = get_div_dfs(dfs_div); 816 if (dfs == NULL) { 817 return -EINVAL; 818 } 819 820 pll = dfsdiv2pll(dfs_div); 821 if (pll == NULL) { 822 ERROR("Failed to identify DFS divider's parent\n"); 823 return -EINVAL; 824 } 825 826 ret = get_base_addr(dfs->instance, drv, &dfs_addr); 827 if ((ret != 0) || (dfs_addr == 0UL)) { 828 return -EINVAL; 829 } 830 831 ret = get_dfs_mfi_mfn(pll->vco_freq, dfs_div, &mfi, &mfn); 832 if (ret != 0) { 833 return -EINVAL; 834 } 835 836 return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn); 837 } 838 839 typedef int (*enable_clk_t)(struct s32cc_clk_obj *module, 840 const struct s32cc_clk_drv *drv, 841 unsigned int depth); 842 843 static int enable_part(struct s32cc_clk_obj *module, 844 const struct s32cc_clk_drv *drv, 845 unsigned int depth) 846 { 847 const struct s32cc_part *part = s32cc_obj2part(module); 848 uint32_t part_no = part->partition_id; 849 850 if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) { 851 return -EINVAL; 852 } 853 854 return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no); 855 } 856 857 static int enable_part_block(struct s32cc_clk_obj *module, 858 const struct s32cc_clk_drv *drv, 859 unsigned int depth) 860 { 861 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 862 const struct s32cc_part *part = block->part; 863 uint32_t part_no = part->partition_id; 864 unsigned int ldepth = depth; 865 uint32_t cofb; 866 int ret; 867 868 ret = update_stack_depth(&ldepth); 869 if (ret != 0) { 870 return ret; 871 } 872 873 if ((block->block >= s32cc_part_block0) && 874 (block->block <= s32cc_part_block15)) { 875 cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0; 876 mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status); 877 } else { 878 ERROR("Unknown partition block type: %d\n", block->block); 879 return -EINVAL; 880 } 881 882 return 0; 883 } 884 885 static struct s32cc_clk_obj * 886 get_part_block_parent(const struct s32cc_clk_obj *module) 887 { 888 const struct s32cc_part_block *block = s32cc_obj2partblock(module); 889 890 return &block->part->desc; 891 } 892 893 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 894 const struct s32cc_clk_drv *drv, 895 unsigned int depth); 896 897 static int enable_part_block_link(struct s32cc_clk_obj *module, 898 const struct s32cc_clk_drv *drv, 899 unsigned int depth) 900 { 901 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 902 struct s32cc_part_block *block = link->block; 903 unsigned int ldepth = depth; 904 int ret; 905 906 ret = update_stack_depth(&ldepth); 907 if (ret != 0) { 908 return ret; 909 } 910 911 /* Move the enablement algorithm to partition tree */ 912 return enable_module_with_refcount(&block->desc, drv, ldepth); 913 } 914 915 static struct s32cc_clk_obj * 916 get_part_block_link_parent(const struct s32cc_clk_obj *module) 917 { 918 const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module); 919 920 return link->parent; 921 } 922 923 static int no_enable(struct s32cc_clk_obj *module, 924 const struct s32cc_clk_drv *drv, 925 unsigned int depth) 926 { 927 return 0; 928 } 929 930 static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod, 931 const struct s32cc_clk_drv *drv, bool leaf_node, 932 unsigned int depth) 933 { 934 int ret = 0; 935 936 if (mod == NULL) { 937 return 0; 938 } 939 940 ret = update_stack_depth(&depth); 941 if (ret != 0) { 942 return ret; 943 } 944 945 /* Refcount will be updated as part of the recursivity */ 946 if (leaf_node) { 947 return en_cb(mod, drv, depth); 948 } 949 950 if (mod->refcount == 0U) { 951 ret = en_cb(mod, drv, depth); 952 } 953 954 if (ret == 0) { 955 mod->refcount++; 956 } 957 958 return ret; 959 } 960 961 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module); 962 963 static int enable_module(struct s32cc_clk_obj *module, 964 const struct s32cc_clk_drv *drv, 965 unsigned int depth) 966 { 967 struct s32cc_clk_obj *parent = get_module_parent(module); 968 static const enable_clk_t enable_clbs[12] = { 969 [s32cc_clk_t] = no_enable, 970 [s32cc_osc_t] = enable_osc, 971 [s32cc_pll_t] = enable_pll, 972 [s32cc_pll_out_div_t] = enable_pll_div, 973 [s32cc_clkmux_t] = enable_mux, 974 [s32cc_shared_clkmux_t] = enable_mux, 975 [s32cc_dfs_t] = enable_dfs, 976 [s32cc_dfs_div_t] = enable_dfs_div, 977 [s32cc_part_t] = enable_part, 978 [s32cc_part_block_t] = enable_part_block, 979 [s32cc_part_block_link_t] = enable_part_block_link, 980 }; 981 uint32_t index; 982 int ret = 0; 983 984 ret = update_stack_depth(&depth); 985 if (ret != 0) { 986 return ret; 987 } 988 989 if (drv == NULL) { 990 return -EINVAL; 991 } 992 993 index = (uint32_t)module->type; 994 995 if (index >= ARRAY_SIZE(enable_clbs)) { 996 ERROR("Undefined module type: %d\n", module->type); 997 return -EINVAL; 998 } 999 1000 if (enable_clbs[index] == NULL) { 1001 ERROR("Undefined callback for the clock type: %d\n", 1002 module->type); 1003 return -EINVAL; 1004 } 1005 1006 parent = get_module_parent(module); 1007 1008 ret = exec_cb_with_refcount(enable_module, parent, drv, 1009 false, depth); 1010 if (ret != 0) { 1011 return ret; 1012 } 1013 1014 ret = exec_cb_with_refcount(enable_clbs[index], module, drv, 1015 true, depth); 1016 if (ret != 0) { 1017 return ret; 1018 } 1019 1020 return ret; 1021 } 1022 1023 static int enable_module_with_refcount(struct s32cc_clk_obj *module, 1024 const struct s32cc_clk_drv *drv, 1025 unsigned int depth) 1026 { 1027 return exec_cb_with_refcount(enable_module, module, drv, false, depth); 1028 } 1029 1030 static int s32cc_clk_enable(unsigned long id) 1031 { 1032 const struct s32cc_clk_drv *drv = get_drv(); 1033 unsigned int depth = MAX_STACK_DEPTH; 1034 struct s32cc_clk *clk; 1035 1036 clk = s32cc_get_arch_clk(id); 1037 if (clk == NULL) { 1038 return -EINVAL; 1039 } 1040 1041 return enable_module_with_refcount(&clk->desc, drv, depth); 1042 } 1043 1044 static void s32cc_clk_disable(unsigned long id) 1045 { 1046 } 1047 1048 static bool s32cc_clk_is_enabled(unsigned long id) 1049 { 1050 return false; 1051 } 1052 1053 static unsigned long s32cc_clk_get_rate(unsigned long id) 1054 { 1055 return 0; 1056 } 1057 1058 static int set_module_rate(const struct s32cc_clk_obj *module, 1059 unsigned long rate, unsigned long *orate, 1060 unsigned int *depth); 1061 1062 static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1063 unsigned long *orate, unsigned int *depth) 1064 { 1065 struct s32cc_osc *osc = s32cc_obj2osc(module); 1066 int ret; 1067 1068 ret = update_stack_depth(depth); 1069 if (ret != 0) { 1070 return ret; 1071 } 1072 1073 if ((osc->freq != 0UL) && (rate != osc->freq)) { 1074 ERROR("Already initialized oscillator. freq = %lu\n", 1075 osc->freq); 1076 return -EINVAL; 1077 } 1078 1079 osc->freq = rate; 1080 *orate = osc->freq; 1081 1082 return 0; 1083 } 1084 1085 static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1086 unsigned long *orate, unsigned int *depth) 1087 { 1088 const struct s32cc_clk *clk = s32cc_obj2clk(module); 1089 int ret; 1090 1091 ret = update_stack_depth(depth); 1092 if (ret != 0) { 1093 return ret; 1094 } 1095 1096 if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) && 1097 ((rate < clk->min_freq) || (rate > clk->max_freq))) { 1098 ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n", 1099 rate, clk->min_freq, clk->max_freq); 1100 return -EINVAL; 1101 } 1102 1103 if (clk->module != NULL) { 1104 return set_module_rate(clk->module, rate, orate, depth); 1105 } 1106 1107 if (clk->pclock != NULL) { 1108 return set_clk_freq(&clk->pclock->desc, rate, orate, depth); 1109 } 1110 1111 return -EINVAL; 1112 } 1113 1114 static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1115 unsigned long *orate, unsigned int *depth) 1116 { 1117 struct s32cc_pll *pll = s32cc_obj2pll(module); 1118 int ret; 1119 1120 ret = update_stack_depth(depth); 1121 if (ret != 0) { 1122 return ret; 1123 } 1124 1125 if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) { 1126 ERROR("PLL frequency was already set\n"); 1127 return -EINVAL; 1128 } 1129 1130 pll->vco_freq = rate; 1131 *orate = pll->vco_freq; 1132 1133 return 0; 1134 } 1135 1136 static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1137 unsigned long *orate, unsigned int *depth) 1138 { 1139 struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module); 1140 const struct s32cc_pll *pll; 1141 unsigned long prate, dc; 1142 int ret; 1143 1144 ret = update_stack_depth(depth); 1145 if (ret != 0) { 1146 return ret; 1147 } 1148 1149 if (pdiv->parent == NULL) { 1150 ERROR("Failed to identify PLL divider's parent\n"); 1151 return -EINVAL; 1152 } 1153 1154 pll = s32cc_obj2pll(pdiv->parent); 1155 if (pll == NULL) { 1156 ERROR("The parent of the PLL DIV is invalid\n"); 1157 return -EINVAL; 1158 } 1159 1160 prate = pll->vco_freq; 1161 1162 /** 1163 * The PLL is not initialized yet, so let's take a risk 1164 * and accept the proposed rate. 1165 */ 1166 if (prate == 0UL) { 1167 pdiv->freq = rate; 1168 *orate = rate; 1169 return 0; 1170 } 1171 1172 /* Decline in case the rate cannot fit PLL's requirements. */ 1173 dc = prate / rate; 1174 if ((prate / dc) != rate) { 1175 return -EINVAL; 1176 } 1177 1178 pdiv->freq = rate; 1179 *orate = pdiv->freq; 1180 1181 return 0; 1182 } 1183 1184 static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1185 unsigned long *orate, unsigned int *depth) 1186 { 1187 const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module); 1188 int ret; 1189 1190 ret = update_stack_depth(depth); 1191 if (ret != 0) { 1192 return ret; 1193 } 1194 1195 if (fdiv->parent == NULL) { 1196 ERROR("The divider doesn't have a valid parent\b"); 1197 return -EINVAL; 1198 } 1199 1200 ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth); 1201 1202 /* Update the output rate based on the parent's rate */ 1203 *orate /= fdiv->rate_div; 1204 1205 return ret; 1206 } 1207 1208 static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1209 unsigned long *orate, unsigned int *depth) 1210 { 1211 const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module); 1212 const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id); 1213 int ret; 1214 1215 ret = update_stack_depth(depth); 1216 if (ret != 0) { 1217 return ret; 1218 } 1219 1220 if (clk == NULL) { 1221 ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n", 1222 mux->index, mux->source_id); 1223 return -EINVAL; 1224 } 1225 1226 return set_module_rate(&clk->desc, rate, orate, depth); 1227 } 1228 1229 static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate, 1230 unsigned long *orate, unsigned int *depth) 1231 { 1232 struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module); 1233 const struct s32cc_dfs *dfs; 1234 int ret; 1235 1236 ret = update_stack_depth(depth); 1237 if (ret != 0) { 1238 return ret; 1239 } 1240 1241 if (dfs_div->parent == NULL) { 1242 ERROR("Failed to identify DFS divider's parent\n"); 1243 return -EINVAL; 1244 } 1245 1246 /* Sanity check */ 1247 dfs = s32cc_obj2dfs(dfs_div->parent); 1248 if (dfs->parent == NULL) { 1249 ERROR("Failed to identify DFS's parent\n"); 1250 return -EINVAL; 1251 } 1252 1253 if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) { 1254 ERROR("DFS DIV frequency was already set to %lu\n", 1255 dfs_div->freq); 1256 return -EINVAL; 1257 } 1258 1259 dfs_div->freq = rate; 1260 *orate = rate; 1261 1262 return ret; 1263 } 1264 1265 static int set_module_rate(const struct s32cc_clk_obj *module, 1266 unsigned long rate, unsigned long *orate, 1267 unsigned int *depth) 1268 { 1269 int ret = 0; 1270 1271 ret = update_stack_depth(depth); 1272 if (ret != 0) { 1273 return ret; 1274 } 1275 1276 ret = -EINVAL; 1277 1278 switch (module->type) { 1279 case s32cc_clk_t: 1280 ret = set_clk_freq(module, rate, orate, depth); 1281 break; 1282 case s32cc_osc_t: 1283 ret = set_osc_freq(module, rate, orate, depth); 1284 break; 1285 case s32cc_pll_t: 1286 ret = set_pll_freq(module, rate, orate, depth); 1287 break; 1288 case s32cc_pll_out_div_t: 1289 ret = set_pll_div_freq(module, rate, orate, depth); 1290 break; 1291 case s32cc_fixed_div_t: 1292 ret = set_fixed_div_freq(module, rate, orate, depth); 1293 break; 1294 case s32cc_clkmux_t: 1295 ret = set_mux_freq(module, rate, orate, depth); 1296 break; 1297 case s32cc_shared_clkmux_t: 1298 ret = set_mux_freq(module, rate, orate, depth); 1299 break; 1300 case s32cc_dfs_t: 1301 ERROR("Setting the frequency of a DFS is not allowed!"); 1302 break; 1303 case s32cc_dfs_div_t: 1304 ret = set_dfs_div_freq(module, rate, orate, depth); 1305 break; 1306 default: 1307 break; 1308 } 1309 1310 return ret; 1311 } 1312 1313 static int s32cc_clk_set_rate(unsigned long id, unsigned long rate, 1314 unsigned long *orate) 1315 { 1316 unsigned int depth = MAX_STACK_DEPTH; 1317 const struct s32cc_clk *clk; 1318 int ret; 1319 1320 clk = s32cc_get_arch_clk(id); 1321 if (clk == NULL) { 1322 return -EINVAL; 1323 } 1324 1325 ret = set_module_rate(&clk->desc, rate, orate, &depth); 1326 if (ret != 0) { 1327 ERROR("Failed to set frequency (%lu MHz) for clock %lu\n", 1328 rate, id); 1329 } 1330 1331 return ret; 1332 } 1333 1334 static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module) 1335 { 1336 return NULL; 1337 } 1338 1339 typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj); 1340 1341 static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module) 1342 { 1343 static const get_parent_clb_t parents_clbs[12] = { 1344 [s32cc_clk_t] = get_clk_parent, 1345 [s32cc_osc_t] = get_no_parent, 1346 [s32cc_pll_t] = get_pll_parent, 1347 [s32cc_pll_out_div_t] = get_pll_div_parent, 1348 [s32cc_clkmux_t] = get_mux_parent, 1349 [s32cc_shared_clkmux_t] = get_mux_parent, 1350 [s32cc_dfs_t] = get_dfs_parent, 1351 [s32cc_dfs_div_t] = get_dfs_div_parent, 1352 [s32cc_part_t] = get_no_parent, 1353 [s32cc_part_block_t] = get_part_block_parent, 1354 [s32cc_part_block_link_t] = get_part_block_link_parent, 1355 }; 1356 uint32_t index; 1357 1358 if (module == NULL) { 1359 return NULL; 1360 } 1361 1362 index = (uint32_t)module->type; 1363 1364 if (index >= ARRAY_SIZE(parents_clbs)) { 1365 ERROR("Undefined module type: %d\n", module->type); 1366 return NULL; 1367 } 1368 1369 if (parents_clbs[index] == NULL) { 1370 ERROR("Undefined parent getter for type: %d\n", module->type); 1371 return NULL; 1372 } 1373 1374 return parents_clbs[index](module); 1375 } 1376 1377 static int s32cc_clk_get_parent(unsigned long id) 1378 { 1379 struct s32cc_clk *parent_clk; 1380 const struct s32cc_clk_obj *parent; 1381 const struct s32cc_clk *clk; 1382 unsigned long parent_id; 1383 int ret; 1384 1385 clk = s32cc_get_arch_clk(id); 1386 if (clk == NULL) { 1387 return -EINVAL; 1388 } 1389 1390 parent = get_module_parent(clk->module); 1391 if (parent == NULL) { 1392 return -EINVAL; 1393 } 1394 1395 parent_clk = s32cc_obj2clk(parent); 1396 if (parent_clk == NULL) { 1397 return -EINVAL; 1398 } 1399 1400 ret = s32cc_get_clk_id(parent_clk, &parent_id); 1401 if (ret != 0) { 1402 return ret; 1403 } 1404 1405 if (parent_id > (unsigned long)INT_MAX) { 1406 return -E2BIG; 1407 } 1408 1409 return (int)parent_id; 1410 } 1411 1412 static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id) 1413 { 1414 const struct s32cc_clk *parent; 1415 const struct s32cc_clk *clk; 1416 bool valid_source = false; 1417 struct s32cc_clkmux *mux; 1418 uint8_t i; 1419 1420 clk = s32cc_get_arch_clk(id); 1421 if (clk == NULL) { 1422 return -EINVAL; 1423 } 1424 1425 parent = s32cc_get_arch_clk(parent_id); 1426 if (parent == NULL) { 1427 return -EINVAL; 1428 } 1429 1430 if (!is_s32cc_clk_mux(clk)) { 1431 ERROR("Clock %lu is not a mux\n", id); 1432 return -EINVAL; 1433 } 1434 1435 mux = s32cc_clk2mux(clk); 1436 if (mux == NULL) { 1437 ERROR("Failed to cast clock %lu to clock mux\n", id); 1438 return -EINVAL; 1439 } 1440 1441 for (i = 0; i < mux->nclks; i++) { 1442 if (mux->clkids[i] == parent_id) { 1443 valid_source = true; 1444 break; 1445 } 1446 } 1447 1448 if (!valid_source) { 1449 ERROR("Clock %lu is not a valid clock for mux %lu\n", 1450 parent_id, id); 1451 return -EINVAL; 1452 } 1453 1454 mux->source_id = parent_id; 1455 1456 return 0; 1457 } 1458 1459 void s32cc_clk_register_drv(void) 1460 { 1461 static const struct clk_ops s32cc_clk_ops = { 1462 .enable = s32cc_clk_enable, 1463 .disable = s32cc_clk_disable, 1464 .is_enabled = s32cc_clk_is_enabled, 1465 .get_rate = s32cc_clk_get_rate, 1466 .set_rate = s32cc_clk_set_rate, 1467 .get_parent = s32cc_clk_get_parent, 1468 .set_parent = s32cc_clk_set_parent, 1469 }; 1470 1471 clk_register(&s32cc_clk_ops); 1472 } 1473 1474