1 /*
2 * Copyright (C) 2022-2026, STMicroelectronics - All Rights Reserved
3 *
4 * SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #include "clk-stm32-core.h"
11 #include <common/debug.h>
12 #include <common/fdt_wrappers.h>
13 #include <drivers/clk.h>
14 #include <drivers/delay_timer.h>
15 #include <drivers/st/stm32mp_clkfunc.h>
16 #include <lib/mmio.h>
17 #include <lib/spinlock.h>
18
19 #define TIMEOUT_US_200MS U(200000)
20 #define TIMEOUT_US_2S U(2000000)
21 #define CLKSRC_TIMEOUT TIMEOUT_US_200MS
22
23 static struct spinlock reg_lock;
24 static struct spinlock refcount_lock;
25
26 static struct stm32_clk_priv *stm32_clock_data;
27
28 const struct stm32_clk_ops clk_mux_ops;
29
clk_stm32_get_priv(void)30 struct stm32_clk_priv *clk_stm32_get_priv(void)
31 {
32 return stm32_clock_data;
33 }
34
_clk_lock(struct spinlock * lock)35 static void _clk_lock(struct spinlock *lock)
36 {
37 if (stm32mp_lock_available()) {
38 /* Assume interrupts are masked */
39 spin_lock(lock);
40 }
41 }
42
_clk_unlock(struct spinlock * lock)43 static void _clk_unlock(struct spinlock *lock)
44 {
45 if (stm32mp_lock_available()) {
46 spin_unlock(lock);
47 }
48 }
49
clk_stm32_rcc_regs_lock(void)50 void clk_stm32_rcc_regs_lock(void)
51 {
52 _clk_lock(®_lock);
53 }
54
clk_stm32_rcc_regs_unlock(void)55 void clk_stm32_rcc_regs_unlock(void)
56 {
57 _clk_unlock(®_lock);
58 }
59
clk_oscillator_get_data(struct stm32_clk_priv * priv,int id)60 struct clk_oscillator_data *clk_oscillator_get_data(struct stm32_clk_priv *priv, int id)
61 {
62 const struct clk_stm32 *clk = _clk_get(priv, id);
63 struct stm32_osc_cfg *osc_cfg = clk->clock_cfg;
64 int osc_id = osc_cfg->osc_id;
65
66 return &priv->osci_data[osc_id];
67 }
68
clk_gate_enable(struct stm32_clk_priv * priv,int id)69 static int clk_gate_enable(struct stm32_clk_priv *priv, int id)
70 {
71 const struct clk_stm32 *clk = _clk_get(priv, id);
72 struct clk_gate_cfg *cfg = clk->clock_cfg;
73
74 mmio_setbits_32(priv->base + cfg->offset, BIT(cfg->bit_idx));
75
76 /* Make sure the clock register has been written */
77 (void)mmio_read_32(priv->base + cfg->offset);
78
79 return 0;
80 }
81
clk_gate_disable(struct stm32_clk_priv * priv,int id)82 static void clk_gate_disable(struct stm32_clk_priv *priv, int id)
83 {
84 const struct clk_stm32 *clk = _clk_get(priv, id);
85 struct clk_gate_cfg *cfg = clk->clock_cfg;
86
87 dmbsy(); /* Ensure previous transactions are performed. */
88
89 mmio_clrbits_32(priv->base + cfg->offset, BIT(cfg->bit_idx));
90
91 /* Make sure the clock register has been written */
92 (void)mmio_read_32(priv->base + cfg->offset);
93 }
94
clk_gate_is_enabled(struct stm32_clk_priv * priv,int id)95 static bool clk_gate_is_enabled(struct stm32_clk_priv *priv, int id)
96 {
97 const struct clk_stm32 *clk = _clk_get(priv, id);
98 struct clk_gate_cfg *cfg = clk->clock_cfg;
99
100 return ((mmio_read_32(priv->base + cfg->offset) & BIT(cfg->bit_idx)) != 0U);
101 }
102
103 const struct stm32_clk_ops clk_gate_ops = {
104 .enable = clk_gate_enable,
105 .disable = clk_gate_disable,
106 .is_enabled = clk_gate_is_enabled,
107 };
108
_clk_stm32_gate_disable(struct stm32_clk_priv * priv,uint16_t gate_id)109 void _clk_stm32_gate_disable(struct stm32_clk_priv *priv, uint16_t gate_id)
110 {
111 const struct gate_cfg *gate = &priv->gates[gate_id];
112 uintptr_t addr = priv->base + gate->offset;
113
114 if (gate->set_clr != 0U) {
115 mmio_write_32(addr + RCC_MP_ENCLRR_OFFSET, BIT(gate->bit_idx));
116 } else {
117 mmio_clrbits_32(addr, BIT(gate->bit_idx));
118 }
119 }
120
_clk_stm32_gate_enable(struct stm32_clk_priv * priv,uint16_t gate_id)121 int _clk_stm32_gate_enable(struct stm32_clk_priv *priv, uint16_t gate_id)
122 {
123 const struct gate_cfg *gate = &priv->gates[gate_id];
124 uintptr_t addr = priv->base + gate->offset;
125
126 if (gate->set_clr != 0U) {
127 mmio_write_32(addr, BIT(gate->bit_idx));
128
129 } else {
130 mmio_setbits_32(addr, BIT(gate->bit_idx));
131 }
132
133 return 0;
134 }
135
_clk_get(struct stm32_clk_priv * priv,int id)136 const struct clk_stm32 *_clk_get(struct stm32_clk_priv *priv, int id)
137 {
138 if ((unsigned int)id >= priv->num) {
139 panic();
140 }
141
142 return &priv->clks[id];
143 }
144
_clk_get_ops(struct stm32_clk_priv * priv,int id)145 static const struct stm32_clk_ops *_clk_get_ops(struct stm32_clk_priv *priv, int id)
146 {
147 const struct clk_stm32 *clk = _clk_get(priv, id);
148
149 assert(clk->ops != (uint8_t)NO_OPS);
150
151 return priv->ops_array[clk->ops];
152 }
153
154 #define clk_div_mask(_width) GENMASK(((_width) - 1U), 0U)
155
_get_table_div(const struct clk_div_table * table,unsigned int val)156 static unsigned int _get_table_div(const struct clk_div_table *table,
157 unsigned int val)
158 {
159 const struct clk_div_table *clkt;
160
161 for (clkt = table; clkt->div; clkt++) {
162 if (clkt->val == val) {
163 return clkt->div;
164 }
165 }
166
167 return 0;
168 }
169
_get_div(const struct clk_div_table * table,unsigned int val,unsigned long flags,uint8_t width)170 static unsigned int _get_div(const struct clk_div_table *table,
171 unsigned int val, unsigned long flags,
172 uint8_t width)
173 {
174 if ((flags & CLK_DIVIDER_ONE_BASED) != 0UL) {
175 return val;
176 }
177
178 if ((flags & CLK_DIVIDER_POWER_OF_TWO) != 0UL) {
179 return BIT(val);
180 }
181
182 if ((flags & CLK_DIVIDER_MAX_AT_ZERO) != 0UL) {
183 return (val != 0U) ? val : BIT(width);
184 }
185
186 if (table != NULL) {
187 return _get_table_div(table, val);
188 }
189
190 return val + 1U;
191 }
192
clk_mux_set_parent(struct stm32_clk_priv * priv,uint16_t pid,uint8_t sel)193 int clk_mux_set_parent(struct stm32_clk_priv *priv, uint16_t pid, uint8_t sel)
194 {
195 const struct parent_cfg *parents = &priv->parents[pid & MUX_PARENT_MASK];
196 const struct mux_cfg *mux = parents->mux;
197 uintptr_t address = priv->base + mux->offset;
198 uint32_t mask;
199 uint64_t timeout;
200
201 mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
202
203 mmio_clrsetbits_32(address, mask, (sel << mux->shift) & mask);
204
205 if (mux->bitrdy == MUX_NO_BIT_RDY) {
206 return 0;
207 }
208
209 timeout = timeout_init_us(CLKSRC_TIMEOUT);
210
211 mask = BIT(mux->bitrdy);
212
213 while ((mmio_read_32(address) & mask) == 0U) {
214 if (timeout_elapsed(timeout)) {
215 return -ETIMEDOUT;
216 }
217 }
218
219 return 0;
220 }
221
_clk_stm32_set_parent(struct stm32_clk_priv * priv,int clk,int clkp)222 int _clk_stm32_set_parent(struct stm32_clk_priv *priv, int clk, int clkp)
223 {
224 const struct parent_cfg *parents;
225 uint16_t pid;
226 uint8_t sel;
227 int old_parent;
228
229 pid = priv->clks[clk].parent;
230
231 if ((pid == CLK_IS_ROOT) || (pid < MUX_MAX_PARENTS)) {
232 return -EINVAL;
233 }
234
235 old_parent = _clk_stm32_get_parent(priv, clk);
236 if (old_parent < 0) {
237 return old_parent;
238 }
239 if (old_parent == clkp) {
240 return 0;
241 }
242
243 parents = &priv->parents[pid & MUX_PARENT_MASK];
244
245 for (sel = 0; sel < parents->num_parents; sel++) {
246 if (parents->id_parents[sel] == (uint16_t)clkp) {
247 bool clk_was_enabled = _clk_stm32_is_enabled(priv, clk);
248 int err = 0;
249
250 /* Enable the parents (for glitch free mux) */
251 _clk_stm32_enable(priv, clkp);
252 _clk_stm32_enable(priv, old_parent);
253
254 err = clk_mux_set_parent(priv, pid, sel);
255
256 _clk_stm32_disable(priv, old_parent);
257
258 if (clk_was_enabled) {
259 _clk_stm32_disable(priv, old_parent);
260 } else {
261 _clk_stm32_disable(priv, clkp);
262 }
263
264 return err;
265 }
266 }
267
268 return -EINVAL;
269 }
270
clk_mux_get_parent(struct stm32_clk_priv * priv,uint32_t mux_id)271 int clk_mux_get_parent(struct stm32_clk_priv *priv, uint32_t mux_id)
272 {
273 const struct parent_cfg *parent;
274 const struct mux_cfg *mux;
275 uint32_t mask;
276
277 if (mux_id >= priv->nb_parents) {
278 panic();
279 }
280
281 parent = &priv->parents[mux_id];
282 mux = parent->mux;
283
284 mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
285
286 return (mmio_read_32(priv->base + mux->offset) & mask) >> mux->shift;
287 }
288
_clk_stm32_set_parent_by_index(struct stm32_clk_priv * priv,int clk,int sel)289 int _clk_stm32_set_parent_by_index(struct stm32_clk_priv *priv, int clk, int sel)
290 {
291 uint16_t pid;
292
293 pid = priv->clks[clk].parent;
294
295 if ((pid == CLK_IS_ROOT) || (pid < MUX_MAX_PARENTS)) {
296 return -EINVAL;
297 }
298
299 return clk_mux_set_parent(priv, pid, sel);
300 }
301
_clk_stm32_get_parent(struct stm32_clk_priv * priv,int clk_id)302 int _clk_stm32_get_parent(struct stm32_clk_priv *priv, int clk_id)
303 {
304 const struct stm32_clk_ops *ops = _clk_get_ops(priv, clk_id);
305 const struct parent_cfg *parent;
306 uint16_t mux_id;
307 int sel;
308
309 mux_id = priv->clks[clk_id].parent;
310 if (mux_id == CLK_IS_ROOT) {
311 return CLK_IS_ROOT;
312 }
313
314 if (mux_id < MUX_MAX_PARENTS) {
315 return mux_id & MUX_PARENT_MASK;
316 }
317
318 mux_id &= MUX_PARENT_MASK;
319 parent = &priv->parents[mux_id];
320
321 if (ops->get_parent != NULL) {
322 sel = ops->get_parent(priv, clk_id);
323 } else {
324 sel = clk_mux_get_parent(priv, mux_id);
325 }
326
327 if ((sel >= 0) && (sel < parent->num_parents)) {
328 return parent->id_parents[sel];
329 }
330
331 return -EINVAL;
332 }
333
_clk_stm32_get_parent_index(struct stm32_clk_priv * priv,int clk_id)334 int _clk_stm32_get_parent_index(struct stm32_clk_priv *priv, int clk_id)
335 {
336 uint16_t mux_id;
337
338 mux_id = priv->clks[clk_id].parent;
339 if (mux_id == CLK_IS_ROOT) {
340 return CLK_IS_ROOT;
341 }
342
343 if (mux_id < MUX_MAX_PARENTS) {
344 return mux_id & MUX_PARENT_MASK;
345 }
346
347 mux_id &= MUX_PARENT_MASK;
348
349 return clk_mux_get_parent(priv, mux_id);
350 }
351
_clk_stm32_get_parent_by_index(struct stm32_clk_priv * priv,int clk_id,int idx)352 int _clk_stm32_get_parent_by_index(struct stm32_clk_priv *priv, int clk_id, int idx)
353 {
354 const struct parent_cfg *parent;
355 uint16_t mux_id;
356
357 mux_id = priv->clks[clk_id].parent;
358 if (mux_id == CLK_IS_ROOT) {
359 return CLK_IS_ROOT;
360 }
361
362 if (mux_id < MUX_MAX_PARENTS) {
363 return mux_id & MUX_PARENT_MASK;
364 }
365
366 mux_id &= MUX_PARENT_MASK;
367 parent = &priv->parents[mux_id];
368
369 if (idx < parent->num_parents) {
370 return parent->id_parents[idx];
371 }
372
373 return -EINVAL;
374 }
375
clk_get_index(struct stm32_clk_priv * priv,unsigned long binding_id)376 int clk_get_index(struct stm32_clk_priv *priv, unsigned long binding_id)
377 {
378 unsigned int i;
379
380 for (i = 0U; i < priv->num; i++) {
381 if (binding_id == priv->clks[i].binding) {
382 return (int)i;
383 }
384 }
385
386 return -EINVAL;
387 }
388
_clk_stm32_get_rate(struct stm32_clk_priv * priv,int id)389 unsigned long _clk_stm32_get_rate(struct stm32_clk_priv *priv, int id)
390 {
391 const struct stm32_clk_ops *ops = _clk_get_ops(priv, id);
392 int parent;
393
394 if ((unsigned int)id >= priv->num) {
395 return 0UL;
396 }
397
398 parent = _clk_stm32_get_parent(priv, id);
399 if (parent < 0) {
400 return 0UL;
401 }
402
403 if (ops->recalc_rate != NULL) {
404 unsigned long prate = 0UL;
405
406 if (parent != CLK_IS_ROOT) {
407 prate = _clk_stm32_get_rate(priv, parent);
408 }
409
410 return ops->recalc_rate(priv, id, prate);
411 }
412
413 if (parent == CLK_IS_ROOT) {
414 panic();
415 }
416
417 return _clk_stm32_get_rate(priv, parent);
418 }
419
_clk_stm32_get_parent_rate(struct stm32_clk_priv * priv,int id)420 unsigned long _clk_stm32_get_parent_rate(struct stm32_clk_priv *priv, int id)
421 {
422 int parent_id = _clk_stm32_get_parent(priv, id);
423
424 if (parent_id < 0) {
425 return 0UL;
426 }
427
428 return _clk_stm32_get_rate(priv, parent_id);
429 }
430
_stm32_clk_get_flags(struct stm32_clk_priv * priv,int id)431 static uint8_t _stm32_clk_get_flags(struct stm32_clk_priv *priv, int id)
432 {
433 return priv->clks[id].flags;
434 }
435
_stm32_clk_is_flags(struct stm32_clk_priv * priv,int id,uint8_t flag)436 bool _stm32_clk_is_flags(struct stm32_clk_priv *priv, int id, uint8_t flag)
437 {
438 if ((_stm32_clk_get_flags(priv, id) & flag) != 0U) {
439 return true;
440 }
441
442 return false;
443 }
444
clk_stm32_enable_call_ops(struct stm32_clk_priv * priv,uint16_t id)445 int clk_stm32_enable_call_ops(struct stm32_clk_priv *priv, uint16_t id)
446 {
447 const struct stm32_clk_ops *ops = _clk_get_ops(priv, id);
448
449 if (ops->enable != NULL) {
450 ops->enable(priv, id);
451 }
452
453 return 0;
454 }
455
_clk_stm32_enable_core(struct stm32_clk_priv * priv,int id)456 static int _clk_stm32_enable_core(struct stm32_clk_priv *priv, int id)
457 {
458 int parent;
459 int ret = 0;
460
461 if (priv->gate_refcounts[id] == 0U) {
462 parent = _clk_stm32_get_parent(priv, id);
463 if (parent < 0) {
464 return parent;
465 }
466 if (parent != CLK_IS_ROOT) {
467 ret = _clk_stm32_enable_core(priv, parent);
468 if (ret != 0) {
469 return ret;
470 }
471 }
472 clk_stm32_enable_call_ops(priv, id);
473 }
474
475 priv->gate_refcounts[id]++;
476
477 if (priv->gate_refcounts[id] == UINT8_MAX) {
478 ERROR("%s: %d max enable count !", __func__, id);
479 panic();
480 }
481
482 return 0;
483 }
484
_clk_stm32_enable(struct stm32_clk_priv * priv,int id)485 int _clk_stm32_enable(struct stm32_clk_priv *priv, int id)
486 {
487 int ret;
488
489 _clk_lock(&refcount_lock);
490 ret = _clk_stm32_enable_core(priv, id);
491 _clk_unlock(&refcount_lock);
492
493 return ret;
494 }
495
clk_stm32_disable_call_ops(struct stm32_clk_priv * priv,uint16_t id)496 void clk_stm32_disable_call_ops(struct stm32_clk_priv *priv, uint16_t id)
497 {
498 const struct stm32_clk_ops *ops = _clk_get_ops(priv, id);
499
500 if (ops->disable != NULL) {
501 ops->disable(priv, id);
502 }
503 }
504
_clk_stm32_disable_core(struct stm32_clk_priv * priv,int id)505 static void _clk_stm32_disable_core(struct stm32_clk_priv *priv, int id)
506 {
507 int parent;
508
509 if ((priv->gate_refcounts[id] == 1U) && _stm32_clk_is_flags(priv, id, CLK_IS_CRITICAL)) {
510 return;
511 }
512
513 if (priv->gate_refcounts[id] == 0U) {
514 /* case of clock ignore unused */
515 if (_clk_stm32_is_enabled(priv, id)) {
516 clk_stm32_disable_call_ops(priv, id);
517 return;
518 }
519 VERBOSE("%s: %d already disabled !\n\n", __func__, id);
520 return;
521 }
522
523 if (--priv->gate_refcounts[id] > 0U) {
524 return;
525 }
526
527 clk_stm32_disable_call_ops(priv, id);
528
529 parent = _clk_stm32_get_parent(priv, id);
530 if ((parent >= 0) && (parent != CLK_IS_ROOT)) {
531 _clk_stm32_disable_core(priv, parent);
532 }
533 }
534
_clk_stm32_disable(struct stm32_clk_priv * priv,int id)535 void _clk_stm32_disable(struct stm32_clk_priv *priv, int id)
536 {
537 _clk_lock(&refcount_lock);
538
539 _clk_stm32_disable_core(priv, id);
540
541 _clk_unlock(&refcount_lock);
542 }
543
_clk_stm32_is_enabled(struct stm32_clk_priv * priv,int id)544 bool _clk_stm32_is_enabled(struct stm32_clk_priv *priv, int id)
545 {
546 const struct stm32_clk_ops *ops = _clk_get_ops(priv, id);
547
548 if (ops->is_enabled != NULL) {
549 return ops->is_enabled(priv, id);
550 }
551
552 return priv->gate_refcounts[id];
553 }
554
clk_stm32_enable(unsigned long binding_id)555 static int clk_stm32_enable(unsigned long binding_id)
556 {
557 struct stm32_clk_priv *priv = clk_stm32_get_priv();
558 int id;
559
560 id = clk_get_index(priv, binding_id);
561 if (id == -EINVAL) {
562 ERROR("%s: unsupported clock id %lu\n", __func__, binding_id);
563 return id;
564 }
565
566 return _clk_stm32_enable(priv, id);
567 }
568
clk_stm32_disable(unsigned long binding_id)569 static void clk_stm32_disable(unsigned long binding_id)
570 {
571 struct stm32_clk_priv *priv = clk_stm32_get_priv();
572 int id;
573
574 id = clk_get_index(priv, binding_id);
575 if (id != -EINVAL) {
576 _clk_stm32_disable(priv, id);
577 } else {
578 ERROR("%s: unsupported clock id %lu\n", __func__, binding_id);
579 }
580 }
581
clk_stm32_is_enabled(unsigned long binding_id)582 static bool clk_stm32_is_enabled(unsigned long binding_id)
583 {
584 struct stm32_clk_priv *priv = clk_stm32_get_priv();
585 int id;
586
587 id = clk_get_index(priv, binding_id);
588 if (id == -EINVAL) {
589 ERROR("%s: unsupported clock id %lu\n", __func__, binding_id);
590 return false;
591 }
592
593 return _clk_stm32_is_enabled(priv, id);
594 }
595
clk_stm32_get_rate(unsigned long binding_id)596 static unsigned long clk_stm32_get_rate(unsigned long binding_id)
597 {
598 struct stm32_clk_priv *priv = clk_stm32_get_priv();
599 int id;
600
601 id = clk_get_index(priv, binding_id);
602 if (id == -EINVAL) {
603 ERROR("%s: unsupported clock id %lu\n", __func__, binding_id);
604 return 0UL;
605 }
606
607 return _clk_stm32_get_rate(priv, id);
608 }
609
clk_stm32_get_parent(unsigned long binding_id)610 static int clk_stm32_get_parent(unsigned long binding_id)
611 {
612 struct stm32_clk_priv *priv = clk_stm32_get_priv();
613 int id;
614
615 id = clk_get_index(priv, binding_id);
616 if (id == -EINVAL) {
617 ERROR("%s: unsupported clock id %lu\n", __func__, binding_id);
618 return id;
619 }
620
621 return _clk_stm32_get_parent(priv, id);
622 }
623
624 static const struct clk_ops stm32mp_clk_ops = {
625 .enable = clk_stm32_enable,
626 .disable = clk_stm32_disable,
627 .is_enabled = clk_stm32_is_enabled,
628 .get_rate = clk_stm32_get_rate,
629 .get_parent = clk_stm32_get_parent,
630 };
631
clk_stm32_enable_critical_clocks(void)632 void clk_stm32_enable_critical_clocks(void)
633 {
634 struct stm32_clk_priv *priv = clk_stm32_get_priv();
635 unsigned int i;
636
637 for (i = 0U; i < priv->num; i++) {
638 if (_stm32_clk_is_flags(priv, i, CLK_IS_CRITICAL)) {
639 _clk_stm32_enable(priv, i);
640 }
641 }
642 }
643
stm32_clk_register(void)644 static void stm32_clk_register(void)
645 {
646 clk_register(&stm32mp_clk_ops);
647 }
648
clk_stm32_div_get_value(struct stm32_clk_priv * priv,int div_id)649 uint32_t clk_stm32_div_get_value(struct stm32_clk_priv *priv, int div_id)
650 {
651 const struct div_cfg *divider = &priv->div[div_id];
652 uint32_t val = 0;
653
654 val = mmio_read_32(priv->base + divider->offset) >> divider->shift;
655 val &= clk_div_mask(divider->width);
656
657 return val;
658 }
659
_clk_stm32_divider_recalc(struct stm32_clk_priv * priv,int div_id,unsigned long prate)660 unsigned long _clk_stm32_divider_recalc(struct stm32_clk_priv *priv,
661 int div_id,
662 unsigned long prate)
663 {
664 const struct div_cfg *divider = &priv->div[div_id];
665 uint32_t val = clk_stm32_div_get_value(priv, div_id);
666 unsigned int div = 0U;
667
668 div = _get_div(divider->table, val, divider->flags, divider->width);
669 if (div == 0U) {
670 return prate;
671 }
672
673 return div_round_up((uint64_t)prate, div);
674 }
675
clk_stm32_divider_recalc(struct stm32_clk_priv * priv,int id,unsigned long prate)676 unsigned long clk_stm32_divider_recalc(struct stm32_clk_priv *priv, int id,
677 unsigned long prate)
678 {
679 const struct clk_stm32 *clk = _clk_get(priv, id);
680 struct clk_stm32_div_cfg *div_cfg = clk->clock_cfg;
681
682 return _clk_stm32_divider_recalc(priv, div_cfg->id, prate);
683 }
684
685 const struct stm32_clk_ops clk_stm32_divider_ops = {
686 .recalc_rate = clk_stm32_divider_recalc,
687 };
688
clk_stm32_set_div(struct stm32_clk_priv * priv,uint32_t div_id,uint32_t value)689 int clk_stm32_set_div(struct stm32_clk_priv *priv, uint32_t div_id, uint32_t value)
690 {
691 const struct div_cfg *divider;
692 uintptr_t address;
693 uint64_t timeout;
694 uint32_t mask;
695
696 if (div_id >= priv->nb_div) {
697 panic();
698 }
699
700 divider = &priv->div[div_id];
701 address = priv->base + divider->offset;
702
703 mask = MASK_WIDTH_SHIFT(divider->width, divider->shift);
704 mmio_clrsetbits_32(address, mask, (value << divider->shift) & mask);
705
706 if (divider->bitrdy == DIV_NO_BIT_RDY) {
707 return 0;
708 }
709
710 timeout = timeout_init_us(CLKSRC_TIMEOUT);
711 mask = BIT(divider->bitrdy);
712
713 while ((mmio_read_32(address) & mask) == 0U) {
714 if (timeout_elapsed(timeout)) {
715 return -ETIMEDOUT;
716 }
717 }
718
719 return 0;
720 }
721
_clk_stm32_gate_wait_ready(struct stm32_clk_priv * priv,uint16_t gate_id,bool ready_on)722 int _clk_stm32_gate_wait_ready(struct stm32_clk_priv *priv, uint16_t gate_id,
723 bool ready_on)
724 {
725 const struct gate_cfg *gate = &priv->gates[gate_id];
726 uintptr_t address = priv->base + gate->offset;
727 uint32_t mask_rdy = BIT(gate->bit_idx);
728 uint64_t timeout;
729 uint32_t mask_test;
730
731 if (ready_on) {
732 mask_test = BIT(gate->bit_idx);
733 } else {
734 mask_test = 0U;
735 }
736
737 timeout = timeout_init_us(TIMEOUT_US_2S);
738
739 while ((mmio_read_32(address) & mask_rdy) != mask_test) {
740 if (timeout_elapsed(timeout)) {
741 break;
742 }
743 }
744
745 if ((mmio_read_32(address) & mask_rdy) != mask_test) {
746 return -ETIMEDOUT;
747 }
748
749 return 0;
750 }
751
clk_stm32_gate_enable(struct stm32_clk_priv * priv,int id)752 int clk_stm32_gate_enable(struct stm32_clk_priv *priv, int id)
753 {
754 const struct clk_stm32 *clk = _clk_get(priv, id);
755 struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
756 const struct gate_cfg *gate = &priv->gates[cfg->id];
757 uintptr_t addr = priv->base + gate->offset;
758
759 if (gate->set_clr != 0U) {
760 mmio_write_32(addr, BIT(gate->bit_idx));
761
762 } else {
763 mmio_setbits_32(addr, BIT(gate->bit_idx));
764 }
765
766 return 0;
767 }
768
clk_stm32_gate_disable(struct stm32_clk_priv * priv,int id)769 void clk_stm32_gate_disable(struct stm32_clk_priv *priv, int id)
770 {
771 const struct clk_stm32 *clk = _clk_get(priv, id);
772 struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
773 const struct gate_cfg *gate = &priv->gates[cfg->id];
774 uintptr_t addr = priv->base + gate->offset;
775
776 if (gate->set_clr != 0U) {
777 mmio_write_32(addr + RCC_MP_ENCLRR_OFFSET, BIT(gate->bit_idx));
778 } else {
779 mmio_clrbits_32(addr, BIT(gate->bit_idx));
780 }
781 }
782
_clk_stm32_gate_is_enabled(struct stm32_clk_priv * priv,int gate_id)783 bool _clk_stm32_gate_is_enabled(struct stm32_clk_priv *priv, int gate_id)
784 {
785 const struct gate_cfg *gate;
786 uint32_t addr;
787
788 gate = &priv->gates[gate_id];
789 addr = priv->base + gate->offset;
790
791 return ((mmio_read_32(addr) & BIT(gate->bit_idx)) != 0U);
792 }
793
clk_stm32_gate_is_enabled(struct stm32_clk_priv * priv,int id)794 bool clk_stm32_gate_is_enabled(struct stm32_clk_priv *priv, int id)
795 {
796 const struct clk_stm32 *clk = _clk_get(priv, id);
797 struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
798
799 return _clk_stm32_gate_is_enabled(priv, cfg->id);
800 }
801
802 const struct stm32_clk_ops clk_stm32_gate_ops = {
803 .enable = clk_stm32_gate_enable,
804 .disable = clk_stm32_gate_disable,
805 .is_enabled = clk_stm32_gate_is_enabled,
806 };
807
808 const struct stm32_clk_ops clk_fixed_factor_ops = {
809 .recalc_rate = fixed_factor_recalc_rate,
810 };
811
fixed_factor_recalc_rate(struct stm32_clk_priv * priv,int id,unsigned long prate)812 unsigned long fixed_factor_recalc_rate(struct stm32_clk_priv *priv,
813 int id, unsigned long prate)
814 {
815 const struct clk_stm32 *clk = _clk_get(priv, id);
816 const struct fixed_factor_cfg *cfg = clk->clock_cfg;
817 unsigned long long rate;
818
819 rate = (unsigned long long)prate * cfg->mult;
820
821 if (cfg->div == 0U) {
822 ERROR("division by zero\n");
823 panic();
824 }
825
826 return (unsigned long)(rate / cfg->div);
827 };
828
829 #define APB_DIV_MASK GENMASK(2, 0)
830 #define TIM_PRE_MASK BIT(0)
831
timer_recalc_rate(struct stm32_clk_priv * priv,int id,unsigned long prate)832 static unsigned long timer_recalc_rate(struct stm32_clk_priv *priv,
833 int id, unsigned long prate)
834 {
835 const struct clk_stm32 *clk = _clk_get(priv, id);
836 const struct clk_timer_cfg *cfg = clk->clock_cfg;
837 uint32_t prescaler, timpre;
838 uintptr_t rcc_base = priv->base;
839
840 prescaler = mmio_read_32(rcc_base + cfg->apbdiv) &
841 APB_DIV_MASK;
842
843 timpre = mmio_read_32(rcc_base + cfg->timpre) &
844 TIM_PRE_MASK;
845
846 if (prescaler == 0U) {
847 return prate;
848 }
849
850 return prate * (timpre + 1U) * 2U;
851 };
852
853 const struct stm32_clk_ops clk_timer_ops = {
854 .recalc_rate = timer_recalc_rate,
855 };
856
clk_fixed_rate_recalc(struct stm32_clk_priv * priv,int id,unsigned long prate)857 static unsigned long clk_fixed_rate_recalc(struct stm32_clk_priv *priv, int id,
858 unsigned long prate)
859 {
860 const struct clk_stm32 *clk = _clk_get(priv, id);
861 struct clk_stm32_fixed_rate_cfg *cfg = clk->clock_cfg;
862
863 return cfg->rate;
864 }
865
866 const struct stm32_clk_ops clk_stm32_fixed_rate_ops = {
867 .recalc_rate = clk_fixed_rate_recalc,
868 };
869
stm32_clk_parse_fdt_by_name(void * fdt,int node,const char * name,uint32_t * tab,uint32_t * nb)870 int stm32_clk_parse_fdt_by_name(void *fdt, int node, const char *name, uint32_t *tab, uint32_t *nb)
871 {
872 const fdt32_t *cell;
873 int len = 0;
874 uint32_t i;
875
876 cell = fdt_getprop(fdt, node, name, &len);
877 if (cell == NULL) {
878 *nb = 0U;
879 return 0;
880 }
881
882 for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
883 uint32_t val = fdt32_to_cpu(cell[i]);
884
885 tab[i] = val;
886 }
887
888 *nb = (uint32_t)len / sizeof(uint32_t);
889
890 return 0;
891 }
892
clk_stm32_init(struct stm32_clk_priv * priv,uintptr_t base)893 int clk_stm32_init(struct stm32_clk_priv *priv, uintptr_t base)
894 {
895 unsigned int i;
896
897 stm32_clock_data = priv;
898
899 priv->base = base;
900
901 for (i = 0U; i < priv->num; i++) {
902 const struct stm32_clk_ops *ops = _clk_get_ops(priv, i);
903
904 if (ops->init != NULL) {
905 ops->init(priv, i);
906 }
907 }
908
909 stm32_clk_register();
910
911 return 0;
912 }
913