1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright (C) STMicroelectronics 2022 - All Rights Reserved 4 */ 5 6 #include <config.h> 7 #include <drivers/clk.h> 8 #include <drivers/clk_dt.h> 9 #include <drivers/stm32_shared_io.h> 10 #include <io.h> 11 #include <kernel/boot.h> 12 #include <kernel/delay.h> 13 #include <kernel/dt.h> 14 #include <libfdt.h> 15 #include <stdio.h> 16 #include <stm32_util.h> 17 18 #include "clk-stm32-core.h" 19 20 #define RCC_MP_ENCLRR_OFFSET 0x4 21 22 #define TIMEOUT_US_200MS U(200000) 23 #define TIMEOUT_US_1S U(1000000) 24 25 static struct clk_stm32_priv *stm32_clock_data; 26 27 struct clk_stm32_priv *clk_stm32_get_priv(void) 28 { 29 return stm32_clock_data; 30 } 31 32 uintptr_t clk_stm32_get_rcc_base(void) 33 { 34 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 35 36 return priv->base; 37 } 38 39 /* STM32 MUX API */ 40 size_t stm32_mux_get_parent(uint32_t mux_id) 41 { 42 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 43 const struct mux_cfg *mux = &priv->muxes[mux_id]; 44 uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift); 45 46 return (io_read32(priv->base + mux->offset) & mask) >> mux->shift; 47 } 48 49 TEE_Result stm32_mux_set_parent(uint16_t mux_id, uint8_t sel) 50 { 51 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 52 const struct mux_cfg *mux = &priv->muxes[mux_id]; 53 uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift); 54 uintptr_t address = priv->base + mux->offset; 55 56 io_clrsetbits32(address, mask, (sel << mux->shift) & mask); 57 58 if (mux->ready != MUX_NO_RDY) 59 return stm32_gate_wait_ready((uint16_t)mux->ready, true); 60 61 return TEE_SUCCESS; 62 } 63 64 /* STM32 GATE API */ 65 static void stm32_gate_endisable(uint16_t gate_id, bool enable) 66 { 67 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 68 const struct gate_cfg *gate = &priv->gates[gate_id]; 69 uintptr_t addr = priv->base + gate->offset; 70 71 if (enable) { 72 if (gate->set_clr) 73 io_write32(addr, BIT(gate->bit_idx)); 74 else 75 io_setbits32_stm32shregs(addr, BIT(gate->bit_idx)); 76 /* Make sure the clock is enabled before returning to caller */ 77 dsb(); 78 } else { 79 /* Waiting pending operation before disabling clock */ 80 dsb(); 81 82 if (gate->set_clr) 83 io_write32(addr + RCC_MP_ENCLRR_OFFSET, 84 BIT(gate->bit_idx)); 85 else 86 io_clrbits32_stm32shregs(addr, BIT(gate->bit_idx)); 87 } 88 } 89 90 void stm32_gate_disable(uint16_t gate_id) 91 { 92 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 93 uint8_t *gate_cpt = priv->gate_cpt; 94 95 assert(gate_cpt[gate_id] > 0); 96 if (gate_cpt[gate_id] == 1) 97 stm32_gate_endisable(gate_id, false); 98 gate_cpt[gate_id]--; 99 } 100 101 void stm32_gate_enable(uint16_t gate_id) 102 { 103 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 104 uint8_t *gate_cpt = priv->gate_cpt; 105 106 assert(gate_cpt[gate_id] < 0xFF); 107 if (gate_cpt[gate_id] == 0) 108 stm32_gate_endisable(gate_id, true); 109 gate_cpt[gate_id]++; 110 } 111 112 bool stm32_gate_is_enabled(uint16_t gate_id) 113 { 114 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 115 const struct gate_cfg *gate = &priv->gates[gate_id]; 116 uintptr_t addr = priv->base + gate->offset; 117 118 return (io_read32(addr) & BIT(gate->bit_idx)) != 0U; 119 } 120 121 TEE_Result stm32_gate_wait_ready(uint16_t gate_id, bool ready_on) 122 { 123 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 124 const struct gate_cfg *gate = &priv->gates[gate_id]; 125 uintptr_t address = priv->base + gate->offset; 126 uint32_t mask_rdy = BIT(gate->bit_idx); 127 uint64_t timeout = timeout_init_us(TIMEOUT_US_1S); 128 uint32_t mask = 0U; 129 130 if (ready_on) 131 mask = BIT(gate->bit_idx); 132 133 while ((io_read32(address) & mask_rdy) != mask) 134 if (timeout_elapsed(timeout)) 135 break; 136 137 if ((io_read32(address) & mask_rdy) != mask) 138 return TEE_ERROR_GENERIC; 139 140 return TEE_SUCCESS; 141 } 142 143 /* STM32 GATE READY clock operators */ 144 static TEE_Result stm32_gate_ready_endisable(uint16_t gate_id, bool enable, 145 bool wait_rdy) 146 { 147 stm32_gate_endisable(gate_id, enable); 148 149 if (wait_rdy) 150 return stm32_gate_wait_ready(gate_id + 1, enable); 151 152 return TEE_SUCCESS; 153 } 154 155 TEE_Result stm32_gate_rdy_enable(uint16_t gate_id) 156 { 157 return stm32_gate_ready_endisable(gate_id, true, true); 158 } 159 160 TEE_Result stm32_gate_rdy_disable(uint16_t gate_id) 161 { 162 return stm32_gate_ready_endisable(gate_id, false, true); 163 } 164 165 /* STM32 DIV API */ 166 static unsigned int _get_table_div(const struct div_table_cfg *table, 167 unsigned int val) 168 { 169 const struct div_table_cfg *clkt = NULL; 170 171 for (clkt = table; clkt->div; clkt++) 172 if (clkt->val == val) 173 return clkt->div; 174 175 return 0; 176 } 177 178 static unsigned int _get_table_val(const struct div_table_cfg *table, 179 unsigned int div) 180 { 181 const struct div_table_cfg *clkt = NULL; 182 183 for (clkt = table; clkt->div; clkt++) 184 if (clkt->div == div) 185 return clkt->val; 186 187 return 0; 188 } 189 190 static unsigned int _get_div(const struct div_table_cfg *table, 191 unsigned int val, unsigned long flags, 192 uint8_t width) 193 { 194 if (flags & CLK_DIVIDER_ONE_BASED) 195 return val; 196 197 if (flags & CLK_DIVIDER_POWER_OF_TWO) 198 return BIT(val); 199 200 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 201 return (val != 0U) ? val : BIT(width); 202 203 if (table) 204 return _get_table_div(table, val); 205 206 return val + 1U; 207 } 208 209 static unsigned int _get_val(const struct div_table_cfg *table, 210 unsigned int div, unsigned long flags, 211 uint8_t width) 212 { 213 if (flags & CLK_DIVIDER_ONE_BASED) 214 return div; 215 216 if (flags & CLK_DIVIDER_POWER_OF_TWO) 217 return __builtin_ffs(div) - 1; 218 219 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 220 return (div != 0U) ? div : BIT(width); 221 222 if (table) 223 return _get_table_val(table, div); 224 225 return div - 1U; 226 } 227 228 static bool _is_valid_table_div(const struct div_table_cfg *table, 229 unsigned int div) 230 { 231 const struct div_table_cfg *clkt = NULL; 232 233 for (clkt = table; clkt->div; clkt++) 234 if (clkt->div == div) 235 return true; 236 237 return false; 238 } 239 240 static bool _is_valid_div(const struct div_table_cfg *table, 241 unsigned int div, unsigned long flags) 242 { 243 if (flags & CLK_DIVIDER_POWER_OF_TWO) 244 return IS_POWER_OF_TWO(div); 245 246 if (table) 247 return _is_valid_table_div(table, div); 248 249 return true; 250 } 251 252 static int divider_get_val(unsigned long rate, unsigned long parent_rate, 253 const struct div_table_cfg *table, uint8_t width, 254 unsigned long flags) 255 { 256 unsigned int div = 0U; 257 unsigned int value = 0U; 258 259 div = UDIV_ROUND_NEAREST((uint64_t)parent_rate, rate); 260 261 if (!_is_valid_div(table, div, flags)) 262 return -1; 263 264 value = _get_val(table, div, flags, width); 265 266 return MIN(value, MASK_WIDTH_SHIFT(width, 0)); 267 } 268 269 uint32_t stm32_div_get_value(int div_id) 270 { 271 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 272 const struct div_cfg *divider = &priv->div[div_id]; 273 uint32_t val = 0; 274 275 val = io_read32(priv->base + divider->offset) >> divider->shift; 276 val &= MASK_WIDTH_SHIFT(divider->width, 0); 277 278 return val; 279 } 280 281 TEE_Result stm32_div_set_value(uint32_t div_id, uint32_t value) 282 { 283 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 284 const struct div_cfg *divider = NULL; 285 uintptr_t address = 0; 286 uint32_t mask = 0; 287 288 if (div_id >= priv->nb_div) 289 panic(); 290 291 divider = &priv->div[div_id]; 292 address = priv->base + divider->offset; 293 294 mask = MASK_WIDTH_SHIFT(divider->width, divider->shift); 295 io_clrsetbits32(address, mask, (value << divider->shift) & mask); 296 297 if (divider->ready == DIV_NO_RDY) 298 return TEE_SUCCESS; 299 300 return stm32_gate_wait_ready((uint16_t)divider->ready, true); 301 } 302 303 static unsigned long stm32_div_get_rate(int div_id, unsigned long prate) 304 { 305 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 306 const struct div_cfg *divider = &priv->div[div_id]; 307 uint32_t val = stm32_div_get_value(div_id); 308 unsigned int div = 0U; 309 310 div = _get_div(divider->table, val, divider->flags, divider->width); 311 if (!div) 312 return prate; 313 314 return ROUNDUP_DIV((uint64_t)prate, div); 315 } 316 317 TEE_Result stm32_div_set_rate(int div_id, unsigned long rate, 318 unsigned long prate) 319 { 320 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 321 const struct div_cfg *divider = &priv->div[div_id]; 322 int value = 0; 323 324 value = divider_get_val(rate, prate, divider->table, 325 divider->width, divider->flags); 326 327 if (value < 0) 328 return TEE_ERROR_GENERIC; 329 330 return stm32_div_set_value(div_id, value); 331 } 332 333 /* STM32 MUX clock operators */ 334 static size_t clk_stm32_mux_get_parent(struct clk *clk) 335 { 336 struct clk_stm32_mux_cfg *cfg = clk->priv; 337 338 return stm32_mux_get_parent(cfg->mux_id); 339 } 340 341 static TEE_Result clk_stm32_mux_set_parent(struct clk *clk, size_t pidx) 342 { 343 struct clk_stm32_mux_cfg *cfg = clk->priv; 344 345 return stm32_mux_set_parent(cfg->mux_id, pidx); 346 } 347 348 const struct clk_ops clk_stm32_mux_ops = { 349 .get_parent = clk_stm32_mux_get_parent, 350 .set_parent = clk_stm32_mux_set_parent, 351 }; 352 353 /* STM32 GATE clock operators */ 354 static TEE_Result clk_stm32_gate_enable(struct clk *clk) 355 { 356 struct clk_stm32_gate_cfg *cfg = clk->priv; 357 358 stm32_gate_enable(cfg->gate_id); 359 360 return TEE_SUCCESS; 361 } 362 363 static void clk_stm32_gate_disable(struct clk *clk) 364 { 365 struct clk_stm32_gate_cfg *cfg = clk->priv; 366 367 stm32_gate_disable(cfg->gate_id); 368 } 369 370 const struct clk_ops clk_stm32_gate_ops = { 371 .enable = clk_stm32_gate_enable, 372 .disable = clk_stm32_gate_disable, 373 }; 374 375 static TEE_Result clk_stm32_gate_ready_enable(struct clk *clk) 376 { 377 struct clk_stm32_gate_cfg *cfg = clk->priv; 378 379 return stm32_gate_rdy_enable(cfg->gate_id); 380 } 381 382 static void clk_stm32_gate_ready_disable(struct clk *clk) 383 { 384 struct clk_stm32_gate_cfg *cfg = clk->priv; 385 386 if (stm32_gate_rdy_disable(cfg->gate_id)) 387 panic(); 388 } 389 390 const struct clk_ops clk_stm32_gate_ready_ops = { 391 .enable = clk_stm32_gate_ready_enable, 392 .disable = clk_stm32_gate_ready_disable, 393 }; 394 395 /* STM32 DIV clock operators */ 396 unsigned long clk_stm32_divider_get_rate(struct clk *clk, 397 unsigned long parent_rate) 398 { 399 struct clk_stm32_div_cfg *cfg = clk->priv; 400 401 return stm32_div_get_rate(cfg->div_id, parent_rate); 402 } 403 404 TEE_Result clk_stm32_divider_set_rate(struct clk *clk, 405 unsigned long rate, 406 unsigned long parent_rate) 407 { 408 struct clk_stm32_div_cfg *cfg = clk->priv; 409 410 return stm32_div_set_rate(cfg->div_id, rate, parent_rate); 411 } 412 413 const struct clk_ops clk_stm32_divider_ops = { 414 .get_rate = clk_stm32_divider_get_rate, 415 .set_rate = clk_stm32_divider_set_rate, 416 }; 417 418 /* STM32 COMPOSITE clock operators */ 419 size_t clk_stm32_composite_get_parent(struct clk *clk) 420 { 421 struct clk_stm32_composite_cfg *cfg = clk->priv; 422 423 if (cfg->mux_id == NO_MUX) { 424 /* It could be a normal case */ 425 return 0; 426 } 427 428 return stm32_mux_get_parent(cfg->mux_id); 429 } 430 431 TEE_Result clk_stm32_composite_set_parent(struct clk *clk, size_t pidx) 432 { 433 struct clk_stm32_composite_cfg *cfg = clk->priv; 434 435 if (cfg->mux_id == NO_MUX) 436 panic(); 437 438 return stm32_mux_set_parent(cfg->mux_id, pidx); 439 } 440 441 unsigned long clk_stm32_composite_get_rate(struct clk *clk, 442 unsigned long parent_rate) 443 { 444 struct clk_stm32_composite_cfg *cfg = clk->priv; 445 446 if (cfg->div_id == NO_DIV) 447 return parent_rate; 448 449 return stm32_div_get_rate(cfg->div_id, parent_rate); 450 } 451 452 TEE_Result clk_stm32_composite_set_rate(struct clk *clk, unsigned long rate, 453 unsigned long parent_rate) 454 { 455 struct clk_stm32_composite_cfg *cfg = clk->priv; 456 457 if (cfg->div_id == NO_DIV) 458 return TEE_SUCCESS; 459 460 return stm32_div_set_rate(cfg->div_id, rate, parent_rate); 461 } 462 463 TEE_Result clk_stm32_composite_gate_enable(struct clk *clk) 464 { 465 struct clk_stm32_composite_cfg *cfg = clk->priv; 466 467 stm32_gate_enable(cfg->gate_id); 468 469 return TEE_SUCCESS; 470 } 471 472 void clk_stm32_composite_gate_disable(struct clk *clk) 473 { 474 struct clk_stm32_composite_cfg *cfg = clk->priv; 475 476 stm32_gate_disable(cfg->gate_id); 477 } 478 479 const struct clk_ops clk_stm32_composite_ops = { 480 .get_parent = clk_stm32_composite_get_parent, 481 .set_parent = clk_stm32_composite_set_parent, 482 .get_rate = clk_stm32_composite_get_rate, 483 .set_rate = clk_stm32_composite_set_rate, 484 .enable = clk_stm32_composite_gate_enable, 485 .disable = clk_stm32_composite_gate_disable, 486 }; 487 488 TEE_Result clk_stm32_set_parent_by_index(struct clk *clk, size_t pidx) 489 { 490 struct clk *parent = clk_get_parent_by_index(clk, pidx); 491 TEE_Result res = TEE_ERROR_GENERIC; 492 493 if (parent) 494 res = clk_set_parent(clk, parent); 495 496 return res; 497 } 498 499 int clk_stm32_parse_fdt_by_name(const void *fdt, int node, const char *name, 500 uint32_t *tab, uint32_t *nb) 501 { 502 const fdt32_t *cell = NULL; 503 int len = 0; 504 uint32_t i = 0; 505 506 cell = fdt_getprop(fdt, node, name, &len); 507 if (cell && len > 0) { 508 for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) 509 tab[i] = fdt32_to_cpu(cell[i]); 510 511 *nb = (uint32_t)len / sizeof(uint32_t); 512 } else { 513 *nb = 0; 514 } 515 516 return 0; 517 } 518 519 TEE_Result clk_stm32_init(struct clk_stm32_priv *priv, uintptr_t base) 520 { 521 stm32_clock_data = priv; 522 523 priv->base = base; 524 525 priv->gate_cpt = calloc(priv->nb_gates, sizeof(*priv->gate_cpt)); 526 if (!priv->gate_cpt) 527 return TEE_ERROR_OUT_OF_MEMORY; 528 529 return TEE_SUCCESS; 530 } 531 532 static unsigned long fixed_factor_get_rate(struct clk *clk, 533 unsigned long parent_rate) 534 { 535 struct fixed_factor_cfg *d = clk->priv; 536 537 unsigned long long rate = (unsigned long long)parent_rate * d->mult; 538 539 if (d->div == 0U) 540 panic("error division by zero"); 541 542 return (unsigned long)(rate / d->div); 543 }; 544 545 const struct clk_ops clk_fixed_factor_ops = { 546 .get_rate = fixed_factor_get_rate, 547 }; 548 549 static unsigned long clk_fixed_get_rate(struct clk *clk, 550 unsigned long parent_rate __unused) 551 { 552 struct clk_fixed_rate_cfg *cfg = clk->priv; 553 554 return cfg->rate; 555 } 556 557 const struct clk_ops clk_fixed_clk_ops = { 558 .get_rate = clk_fixed_get_rate, 559 }; 560 561 struct clk *stm32mp_rcc_clock_id_to_clk(unsigned long clock_id) 562 { 563 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 564 565 if (clock_id > priv->nb_clk_refs) 566 return NULL; 567 568 return priv->clk_refs[clock_id]; 569 } 570 571 static TEE_Result stm32mp_clk_dt_get_clk(struct dt_pargs *pargs, 572 void *data __unused, 573 struct clk **out_clk) 574 { 575 unsigned long clock_id = pargs->args[0]; 576 struct clk *clk = NULL; 577 578 if (pargs->args_count != 1) 579 return TEE_ERROR_BAD_PARAMETERS; 580 581 clk = stm32mp_rcc_clock_id_to_clk(clock_id); 582 if (!clk) 583 return TEE_ERROR_BAD_PARAMETERS; 584 585 *out_clk = clk; 586 587 return TEE_SUCCESS; 588 } 589 590 static void clk_stm32_register_clocks(struct clk_stm32_priv *priv) 591 { 592 unsigned int i = 0; 593 594 for (i = 0; i < priv->nb_clk_refs; i++) { 595 struct clk *clk = priv->clk_refs[i]; 596 597 if (!clk) 598 continue; 599 600 refcount_set(&clk->enabled_count, 0); 601 602 if (clk_register(clk)) 603 panic(); 604 } 605 606 /* Critical clocks management */ 607 for (i = 0; i < priv->nb_clk_refs; i++) { 608 struct clk *clk = priv->clk_refs[i]; 609 610 if (!clk) 611 continue; 612 613 if (priv->is_critical && priv->is_critical(clk)) 614 clk_enable(clk); 615 } 616 } 617 618 void stm32mp_clk_provider_probe_final(const void *fdt, int node, 619 struct clk_stm32_priv *priv) 620 { 621 TEE_Result res = TEE_ERROR_GENERIC; 622 623 clk_stm32_register_clocks(priv); 624 625 res = clk_dt_register_clk_provider(fdt, node, stm32mp_clk_dt_get_clk, 626 priv); 627 if (res) 628 panic("Couldn't register clock provider"); 629 } 630