1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright (C) STMicroelectronics 2022 - All Rights Reserved 4 */ 5 6 #include <config.h> 7 #include <drivers/clk.h> 8 #include <drivers/clk_dt.h> 9 #include <drivers/stm32_shared_io.h> 10 #include <io.h> 11 #include <kernel/boot.h> 12 #include <kernel/delay.h> 13 #include <kernel/dt.h> 14 #include <libfdt.h> 15 #include <stdio.h> 16 #include <stm32_util.h> 17 18 #include "clk-stm32-core.h" 19 20 #define RCC_MP_ENCLRR_OFFSET 0x4 21 22 #define TIMEOUT_US_200MS U(200000) 23 #define TIMEOUT_US_1S U(1000000) 24 25 static struct clk_stm32_priv *stm32_clock_data; 26 27 struct clk_stm32_priv *clk_stm32_get_priv(void) 28 { 29 return stm32_clock_data; 30 } 31 32 uintptr_t clk_stm32_get_rcc_base(void) 33 { 34 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 35 36 return priv->base; 37 } 38 39 /* STM32 MUX API */ 40 size_t stm32_mux_get_parent(uint32_t mux_id) 41 { 42 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 43 const struct mux_cfg *mux = &priv->muxes[mux_id]; 44 uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift); 45 46 return (io_read32(priv->base + mux->offset) & mask) >> mux->shift; 47 } 48 49 TEE_Result stm32_mux_set_parent(uint16_t mux_id, uint8_t sel) 50 { 51 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 52 const struct mux_cfg *mux = &priv->muxes[mux_id]; 53 uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift); 54 uintptr_t address = priv->base + mux->offset; 55 56 io_clrsetbits32(address, mask, (sel << mux->shift) & mask); 57 58 if (mux->ready != MUX_NO_RDY) 59 return stm32_gate_wait_ready((uint16_t)mux->ready, true); 60 61 return TEE_SUCCESS; 62 } 63 64 /* STM32 GATE API */ 65 static void stm32_gate_endisable(uint16_t gate_id, bool enable) 66 { 67 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 68 const struct gate_cfg *gate = &priv->gates[gate_id]; 69 uintptr_t addr = priv->base + gate->offset; 70 71 if (enable) { 72 if (gate->set_clr) 73 io_write32(addr, BIT(gate->bit_idx)); 74 else 75 io_setbits32_stm32shregs(addr, BIT(gate->bit_idx)); 76 /* Make sure the clock is enabled before returning to caller */ 77 dsb(); 78 } else { 79 /* Waiting pending operation before disabling clock */ 80 dsb(); 81 82 if (gate->set_clr) 83 io_write32(addr + RCC_MP_ENCLRR_OFFSET, 84 BIT(gate->bit_idx)); 85 else 86 io_clrbits32_stm32shregs(addr, BIT(gate->bit_idx)); 87 } 88 } 89 90 void stm32_gate_disable(uint16_t gate_id) 91 { 92 stm32_gate_endisable(gate_id, false); 93 } 94 95 void stm32_gate_enable(uint16_t gate_id) 96 { 97 stm32_gate_endisable(gate_id, true); 98 } 99 100 bool stm32_gate_is_enabled(uint16_t gate_id) 101 { 102 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 103 const struct gate_cfg *gate = &priv->gates[gate_id]; 104 uintptr_t addr = priv->base + gate->offset; 105 106 return (io_read32(addr) & BIT(gate->bit_idx)) != 0U; 107 } 108 109 TEE_Result stm32_gate_wait_ready(uint16_t gate_id, bool ready_on) 110 { 111 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 112 const struct gate_cfg *gate = &priv->gates[gate_id]; 113 uintptr_t address = priv->base + gate->offset; 114 uint32_t mask_rdy = BIT(gate->bit_idx); 115 uint64_t timeout = timeout_init_us(TIMEOUT_US_1S); 116 uint32_t mask = 0U; 117 118 if (ready_on) 119 mask = BIT(gate->bit_idx); 120 121 while ((io_read32(address) & mask_rdy) != mask) 122 if (timeout_elapsed(timeout)) 123 break; 124 125 if ((io_read32(address) & mask_rdy) != mask) 126 return TEE_ERROR_GENERIC; 127 128 return TEE_SUCCESS; 129 } 130 131 /* STM32 GATE READY clock operators */ 132 static TEE_Result stm32_gate_ready_endisable(uint16_t gate_id, bool enable, 133 bool wait_rdy) 134 { 135 stm32_gate_endisable(gate_id, enable); 136 137 if (wait_rdy) 138 return stm32_gate_wait_ready(gate_id + 1, enable); 139 140 return TEE_SUCCESS; 141 } 142 143 TEE_Result stm32_gate_rdy_enable(uint16_t gate_id) 144 { 145 return stm32_gate_ready_endisable(gate_id, true, true); 146 } 147 148 TEE_Result stm32_gate_rdy_disable(uint16_t gate_id) 149 { 150 return stm32_gate_ready_endisable(gate_id, false, true); 151 } 152 153 /* STM32 DIV API */ 154 static unsigned int _get_table_div(const struct div_table_cfg *table, 155 unsigned int val) 156 { 157 const struct div_table_cfg *clkt = NULL; 158 159 for (clkt = table; clkt->div; clkt++) 160 if (clkt->val == val) 161 return clkt->div; 162 163 return 0; 164 } 165 166 static unsigned int _get_table_val(const struct div_table_cfg *table, 167 unsigned int div) 168 { 169 const struct div_table_cfg *clkt = NULL; 170 171 for (clkt = table; clkt->div; clkt++) 172 if (clkt->div == div) 173 return clkt->val; 174 175 return 0; 176 } 177 178 static unsigned int _get_div(const struct div_table_cfg *table, 179 unsigned int val, unsigned long flags, 180 uint8_t width) 181 { 182 if (flags & CLK_DIVIDER_ONE_BASED) 183 return val; 184 185 if (flags & CLK_DIVIDER_POWER_OF_TWO) 186 return BIT(val); 187 188 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 189 return (val != 0U) ? val : BIT(width); 190 191 if (table) 192 return _get_table_div(table, val); 193 194 return val + 1U; 195 } 196 197 static unsigned int _get_val(const struct div_table_cfg *table, 198 unsigned int div, unsigned long flags, 199 uint8_t width) 200 { 201 if (flags & CLK_DIVIDER_ONE_BASED) 202 return div; 203 204 if (flags & CLK_DIVIDER_POWER_OF_TWO) 205 return __builtin_ffs(div) - 1; 206 207 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 208 return (div != 0U) ? div : BIT(width); 209 210 if (table) 211 return _get_table_val(table, div); 212 213 return div - 1U; 214 } 215 216 static bool _is_valid_table_div(const struct div_table_cfg *table, 217 unsigned int div) 218 { 219 const struct div_table_cfg *clkt = NULL; 220 221 for (clkt = table; clkt->div; clkt++) 222 if (clkt->div == div) 223 return true; 224 225 return false; 226 } 227 228 static bool _is_valid_div(const struct div_table_cfg *table, 229 unsigned int div, unsigned long flags) 230 { 231 if (flags & CLK_DIVIDER_POWER_OF_TWO) 232 return IS_POWER_OF_TWO(div); 233 234 if (table) 235 return _is_valid_table_div(table, div); 236 237 return true; 238 } 239 240 static int divider_get_val(unsigned long rate, unsigned long parent_rate, 241 const struct div_table_cfg *table, uint8_t width, 242 unsigned long flags) 243 { 244 unsigned int div = 0U; 245 unsigned int value = 0U; 246 247 div = UDIV_ROUND_NEAREST((uint64_t)parent_rate, rate); 248 249 if (!_is_valid_div(table, div, flags)) 250 return -1; 251 252 value = _get_val(table, div, flags, width); 253 254 return MIN(value, MASK_WIDTH_SHIFT(width, 0)); 255 } 256 257 uint32_t stm32_div_get_value(int div_id) 258 { 259 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 260 const struct div_cfg *divider = &priv->div[div_id]; 261 uint32_t val = 0; 262 263 val = io_read32(priv->base + divider->offset) >> divider->shift; 264 val &= MASK_WIDTH_SHIFT(divider->width, 0); 265 266 return val; 267 } 268 269 TEE_Result stm32_div_set_value(uint32_t div_id, uint32_t value) 270 { 271 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 272 const struct div_cfg *divider = NULL; 273 uintptr_t address = 0; 274 uint32_t mask = 0; 275 276 if (div_id >= priv->nb_div) 277 panic(); 278 279 divider = &priv->div[div_id]; 280 address = priv->base + divider->offset; 281 282 mask = MASK_WIDTH_SHIFT(divider->width, divider->shift); 283 io_clrsetbits32(address, mask, (value << divider->shift) & mask); 284 285 if (divider->ready == DIV_NO_RDY) 286 return TEE_SUCCESS; 287 288 return stm32_gate_wait_ready((uint16_t)divider->ready, true); 289 } 290 291 static unsigned long stm32_div_get_rate(int div_id, unsigned long prate) 292 { 293 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 294 const struct div_cfg *divider = &priv->div[div_id]; 295 uint32_t val = stm32_div_get_value(div_id); 296 unsigned int div = 0U; 297 298 div = _get_div(divider->table, val, divider->flags, divider->width); 299 if (!div) 300 return prate; 301 302 return ROUNDUP_DIV((uint64_t)prate, div); 303 } 304 305 TEE_Result stm32_div_set_rate(int div_id, unsigned long rate, 306 unsigned long prate) 307 { 308 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 309 const struct div_cfg *divider = &priv->div[div_id]; 310 int value = 0; 311 312 value = divider_get_val(rate, prate, divider->table, 313 divider->width, divider->flags); 314 315 if (value < 0) 316 return TEE_ERROR_GENERIC; 317 318 return stm32_div_set_value(div_id, value); 319 } 320 321 /* STM32 MUX clock operators */ 322 static size_t clk_stm32_mux_get_parent(struct clk *clk) 323 { 324 struct clk_stm32_mux_cfg *cfg = clk->priv; 325 326 return stm32_mux_get_parent(cfg->mux_id); 327 } 328 329 static TEE_Result clk_stm32_mux_set_parent(struct clk *clk, size_t pidx) 330 { 331 struct clk_stm32_mux_cfg *cfg = clk->priv; 332 333 return stm32_mux_set_parent(cfg->mux_id, pidx); 334 } 335 336 const struct clk_ops clk_stm32_mux_ops = { 337 .get_parent = clk_stm32_mux_get_parent, 338 .set_parent = clk_stm32_mux_set_parent, 339 }; 340 341 /* STM32 GATE clock operators */ 342 static TEE_Result clk_stm32_gate_enable(struct clk *clk) 343 { 344 struct clk_stm32_gate_cfg *cfg = clk->priv; 345 346 stm32_gate_enable(cfg->gate_id); 347 348 return TEE_SUCCESS; 349 } 350 351 static void clk_stm32_gate_disable(struct clk *clk) 352 { 353 struct clk_stm32_gate_cfg *cfg = clk->priv; 354 355 stm32_gate_disable(cfg->gate_id); 356 } 357 358 const struct clk_ops clk_stm32_gate_ops = { 359 .enable = clk_stm32_gate_enable, 360 .disable = clk_stm32_gate_disable, 361 }; 362 363 static TEE_Result clk_stm32_gate_ready_enable(struct clk *clk) 364 { 365 struct clk_stm32_gate_cfg *cfg = clk->priv; 366 367 return stm32_gate_rdy_enable(cfg->gate_id); 368 } 369 370 static void clk_stm32_gate_ready_disable(struct clk *clk) 371 { 372 struct clk_stm32_gate_cfg *cfg = clk->priv; 373 374 if (stm32_gate_rdy_disable(cfg->gate_id)) 375 panic(); 376 } 377 378 const struct clk_ops clk_stm32_gate_ready_ops = { 379 .enable = clk_stm32_gate_ready_enable, 380 .disable = clk_stm32_gate_ready_disable, 381 }; 382 383 /* STM32 DIV clock operators */ 384 unsigned long clk_stm32_divider_get_rate(struct clk *clk, 385 unsigned long parent_rate) 386 { 387 struct clk_stm32_div_cfg *cfg = clk->priv; 388 389 return stm32_div_get_rate(cfg->div_id, parent_rate); 390 } 391 392 TEE_Result clk_stm32_divider_set_rate(struct clk *clk, 393 unsigned long rate, 394 unsigned long parent_rate) 395 { 396 struct clk_stm32_div_cfg *cfg = clk->priv; 397 398 return stm32_div_set_rate(cfg->div_id, rate, parent_rate); 399 } 400 401 const struct clk_ops clk_stm32_divider_ops = { 402 .get_rate = clk_stm32_divider_get_rate, 403 .set_rate = clk_stm32_divider_set_rate, 404 }; 405 406 /* STM32 COMPOSITE clock operators */ 407 size_t clk_stm32_composite_get_parent(struct clk *clk) 408 { 409 struct clk_stm32_composite_cfg *cfg = clk->priv; 410 411 if (cfg->mux_id == NO_MUX) { 412 /* It could be a normal case */ 413 return 0; 414 } 415 416 return stm32_mux_get_parent(cfg->mux_id); 417 } 418 419 TEE_Result clk_stm32_composite_set_parent(struct clk *clk, size_t pidx) 420 { 421 struct clk_stm32_composite_cfg *cfg = clk->priv; 422 423 if (cfg->mux_id == NO_MUX) 424 panic(); 425 426 return stm32_mux_set_parent(cfg->mux_id, pidx); 427 } 428 429 unsigned long clk_stm32_composite_get_rate(struct clk *clk, 430 unsigned long parent_rate) 431 { 432 struct clk_stm32_composite_cfg *cfg = clk->priv; 433 434 if (cfg->div_id == NO_DIV) 435 return parent_rate; 436 437 return stm32_div_get_rate(cfg->div_id, parent_rate); 438 } 439 440 TEE_Result clk_stm32_composite_set_rate(struct clk *clk, unsigned long rate, 441 unsigned long parent_rate) 442 { 443 struct clk_stm32_composite_cfg *cfg = clk->priv; 444 445 if (cfg->div_id == NO_DIV) 446 return TEE_SUCCESS; 447 448 return stm32_div_set_rate(cfg->div_id, rate, parent_rate); 449 } 450 451 TEE_Result clk_stm32_composite_gate_enable(struct clk *clk) 452 { 453 struct clk_stm32_composite_cfg *cfg = clk->priv; 454 455 stm32_gate_enable(cfg->gate_id); 456 457 return TEE_SUCCESS; 458 } 459 460 void clk_stm32_composite_gate_disable(struct clk *clk) 461 { 462 struct clk_stm32_composite_cfg *cfg = clk->priv; 463 464 stm32_gate_disable(cfg->gate_id); 465 } 466 467 const struct clk_ops clk_stm32_composite_ops = { 468 .get_parent = clk_stm32_composite_get_parent, 469 .set_parent = clk_stm32_composite_set_parent, 470 .get_rate = clk_stm32_composite_get_rate, 471 .set_rate = clk_stm32_composite_set_rate, 472 .enable = clk_stm32_composite_gate_enable, 473 .disable = clk_stm32_composite_gate_disable, 474 }; 475 476 TEE_Result clk_stm32_set_parent_by_index(struct clk *clk, size_t pidx) 477 { 478 struct clk *parent = clk_get_parent_by_index(clk, pidx); 479 TEE_Result res = TEE_ERROR_GENERIC; 480 481 if (parent) 482 res = clk_set_parent(clk, parent); 483 484 return res; 485 } 486 487 int clk_stm32_parse_fdt_by_name(const void *fdt, int node, const char *name, 488 uint32_t *tab, uint32_t *nb) 489 { 490 const fdt32_t *cell = NULL; 491 int len = 0; 492 uint32_t i = 0; 493 494 cell = fdt_getprop(fdt, node, name, &len); 495 if (cell) 496 for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) 497 tab[i] = fdt32_to_cpu(cell[i]); 498 499 *nb = (uint32_t)len / sizeof(uint32_t); 500 501 return 0; 502 } 503 504 TEE_Result clk_stm32_init(struct clk_stm32_priv *priv, uintptr_t base) 505 { 506 stm32_clock_data = priv; 507 508 priv->base = base; 509 510 return TEE_SUCCESS; 511 } 512 513 static unsigned long fixed_factor_get_rate(struct clk *clk, 514 unsigned long parent_rate) 515 { 516 struct fixed_factor_cfg *d = clk->priv; 517 518 unsigned long long rate = (unsigned long long)parent_rate * d->mult; 519 520 if (d->div == 0U) 521 panic("error division by zero"); 522 523 return (unsigned long)(rate / d->div); 524 }; 525 526 const struct clk_ops clk_fixed_factor_ops = { 527 .get_rate = fixed_factor_get_rate, 528 }; 529 530 static unsigned long clk_fixed_get_rate(struct clk *clk, 531 unsigned long parent_rate __unused) 532 { 533 struct clk_fixed_rate_cfg *cfg = clk->priv; 534 535 return cfg->rate; 536 } 537 538 const struct clk_ops clk_fixed_clk_ops = { 539 .get_rate = clk_fixed_get_rate, 540 }; 541 542 struct clk *stm32mp_rcc_clock_id_to_clk(unsigned long clock_id) 543 { 544 struct clk_stm32_priv *priv = clk_stm32_get_priv(); 545 546 if (clock_id > priv->nb_clk_refs) 547 return NULL; 548 549 return priv->clk_refs[clock_id]; 550 } 551 552 static TEE_Result stm32mp_clk_dt_get_clk(struct dt_pargs *pargs, 553 void *data __unused, 554 struct clk **out_clk) 555 { 556 unsigned long clock_id = pargs->args[0]; 557 struct clk *clk = NULL; 558 559 if (pargs->args_count != 1) 560 return TEE_ERROR_BAD_PARAMETERS; 561 562 clk = stm32mp_rcc_clock_id_to_clk(clock_id); 563 if (!clk) 564 return TEE_ERROR_BAD_PARAMETERS; 565 566 *out_clk = clk; 567 568 return TEE_SUCCESS; 569 } 570 571 static void clk_stm32_register_clocks(struct clk_stm32_priv *priv) 572 { 573 unsigned int i = 0; 574 575 for (i = 0; i < priv->nb_clk_refs; i++) { 576 struct clk *clk = priv->clk_refs[i]; 577 578 if (!clk) 579 continue; 580 581 refcount_set(&clk->enabled_count, 0); 582 583 if (clk_register(clk)) 584 panic(); 585 } 586 587 /* Critical clocks management */ 588 for (i = 0; i < priv->nb_clk_refs; i++) { 589 struct clk *clk = priv->clk_refs[i]; 590 591 if (!clk) 592 continue; 593 594 if (priv->is_critical && priv->is_critical(clk)) 595 clk_enable(clk); 596 } 597 } 598 599 void stm32mp_clk_provider_probe_final(const void *fdt, int node, 600 struct clk_stm32_priv *priv) 601 { 602 TEE_Result res = TEE_ERROR_GENERIC; 603 604 clk_stm32_register_clocks(priv); 605 606 res = clk_dt_register_clk_provider(fdt, node, stm32mp_clk_dt_get_clk, 607 priv); 608 if (res) 609 panic("Couldn't register clock provider"); 610 } 611