xref: /optee_os/core/drivers/clk/clk-stm32-core.c (revision 9f34db38245c9b3a4e6e7e63eb78a75e23ab2da3)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
4  */
5 
6 #include <config.h>
7 #include <drivers/clk.h>
8 #include <drivers/clk_dt.h>
9 #include <drivers/stm32_shared_io.h>
10 #include <io.h>
11 #include <kernel/boot.h>
12 #include <kernel/delay.h>
13 #include <kernel/dt.h>
14 #include <libfdt.h>
15 #include <stdio.h>
16 #include <stm32_util.h>
17 
18 #include "clk-stm32-core.h"
19 
20 #define RCC_MP_ENCLRR_OFFSET	0x4
21 
22 #define TIMEOUT_US_200MS	U(200000)
23 #define TIMEOUT_US_1S		U(1000000)
24 
25 static struct clk_stm32_priv *stm32_clock_data;
26 
27 struct clk_stm32_priv *clk_stm32_get_priv(void)
28 {
29 	return stm32_clock_data;
30 }
31 
32 uintptr_t clk_stm32_get_rcc_base(void)
33 {
34 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
35 
36 	return priv->base;
37 }
38 
39 /* STM32 MUX API */
40 size_t stm32_mux_get_parent(uint32_t mux_id)
41 {
42 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
43 	const struct mux_cfg *mux = &priv->muxes[mux_id];
44 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
45 
46 	return (io_read32(priv->base + mux->offset) & mask) >> mux->shift;
47 }
48 
49 TEE_Result stm32_mux_set_parent(uint16_t mux_id, uint8_t sel)
50 {
51 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
52 	const struct mux_cfg *mux = &priv->muxes[mux_id];
53 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
54 	uintptr_t address = priv->base + mux->offset;
55 
56 	io_clrsetbits32(address, mask, (sel << mux->shift) & mask);
57 
58 	if (mux->ready != MUX_NO_RDY)
59 		return stm32_gate_wait_ready((uint16_t)mux->ready, true);
60 
61 	return TEE_SUCCESS;
62 }
63 
64 /* STM32 GATE API */
65 static void stm32_gate_endisable(uint16_t gate_id, bool enable)
66 {
67 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
68 	const struct gate_cfg *gate = &priv->gates[gate_id];
69 	uintptr_t addr = priv->base + gate->offset;
70 
71 	if (enable) {
72 		if (gate->set_clr)
73 			io_write32(addr, BIT(gate->bit_idx));
74 		else
75 			io_setbits32_stm32shregs(addr, BIT(gate->bit_idx));
76 		/* Make sure the clock is enabled before returning to caller */
77 		dsb();
78 	} else {
79 		/* Waiting pending operation before disabling clock */
80 		dsb();
81 
82 		if (gate->set_clr)
83 			io_write32(addr + RCC_MP_ENCLRR_OFFSET,
84 				   BIT(gate->bit_idx));
85 		else
86 			io_clrbits32_stm32shregs(addr, BIT(gate->bit_idx));
87 	}
88 }
89 
90 void stm32_gate_set_init_state(uint16_t gate_id, bool enable)
91 {
92 	struct clk_stm32_priv __maybe_unused *priv = clk_stm32_get_priv();
93 
94 	assert(!priv->gate_cpt[gate_id]);
95 	stm32_gate_endisable(gate_id, enable);
96 }
97 
98 void stm32_gate_disable(uint16_t gate_id)
99 {
100 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
101 	uint8_t *gate_cpt = priv->gate_cpt;
102 
103 	assert(gate_cpt[gate_id] > 0);
104 	if (gate_cpt[gate_id] == 1)
105 		stm32_gate_endisable(gate_id, false);
106 	gate_cpt[gate_id]--;
107 }
108 
109 void stm32_gate_enable(uint16_t gate_id)
110 {
111 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
112 	uint8_t *gate_cpt = priv->gate_cpt;
113 
114 	assert(gate_cpt[gate_id] < 0xFF);
115 	if (gate_cpt[gate_id] == 0)
116 		stm32_gate_endisable(gate_id, true);
117 	gate_cpt[gate_id]++;
118 }
119 
120 bool stm32_gate_is_enabled(uint16_t gate_id)
121 {
122 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
123 	const struct gate_cfg *gate = &priv->gates[gate_id];
124 	uintptr_t addr = priv->base + gate->offset;
125 
126 	return (io_read32(addr) & BIT(gate->bit_idx)) != 0U;
127 }
128 
129 TEE_Result stm32_gate_wait_ready(uint16_t gate_id, bool ready_on)
130 {
131 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
132 	const struct gate_cfg *gate = &priv->gates[gate_id];
133 	uintptr_t address = priv->base + gate->offset;
134 	uint32_t mask_rdy = BIT(gate->bit_idx);
135 	uint64_t timeout = timeout_init_us(TIMEOUT_US_1S);
136 	uint32_t mask = 0U;
137 
138 	if (ready_on)
139 		mask = BIT(gate->bit_idx);
140 
141 	while ((io_read32(address) & mask_rdy) != mask)
142 		if (timeout_elapsed(timeout))
143 			break;
144 
145 	if ((io_read32(address) & mask_rdy) != mask)
146 		return TEE_ERROR_GENERIC;
147 
148 	return TEE_SUCCESS;
149 }
150 
151 /* STM32 GATE READY clock operators */
152 static TEE_Result stm32_gate_ready_endisable(uint16_t gate_id, bool enable,
153 					     bool wait_rdy)
154 {
155 	TEE_Result res = TEE_ERROR_GENERIC;
156 
157 	stm32_gate_endisable(gate_id, enable);
158 
159 	if (wait_rdy) {
160 		res = stm32_gate_wait_ready(gate_id + 1, enable);
161 		if (res) {
162 			stm32_gate_endisable(gate_id, !enable);
163 			if (stm32_gate_wait_ready(gate_id + 1, !enable))
164 				panic("Gate failed to sync");
165 
166 			return res;
167 		}
168 	}
169 
170 	return TEE_SUCCESS;
171 }
172 
173 TEE_Result stm32_gate_rdy_enable(uint16_t gate_id)
174 {
175 	return stm32_gate_ready_endisable(gate_id, true, true);
176 }
177 
178 TEE_Result stm32_gate_rdy_disable(uint16_t gate_id)
179 {
180 	return stm32_gate_ready_endisable(gate_id, false, true);
181 }
182 
183 /* STM32 DIV API */
184 static unsigned int _get_table_div(const struct div_table_cfg *table,
185 				   unsigned int val)
186 {
187 	const struct div_table_cfg *clkt = NULL;
188 
189 	for (clkt = table; clkt->div; clkt++)
190 		if (clkt->val == val)
191 			return clkt->div;
192 
193 	return 0;
194 }
195 
196 static unsigned int _get_table_val(const struct div_table_cfg *table,
197 				   unsigned int div)
198 {
199 	const struct div_table_cfg *clkt = NULL;
200 
201 	for (clkt = table; clkt->div; clkt++)
202 		if (clkt->div == div)
203 			return clkt->val;
204 
205 	return 0;
206 }
207 
208 static unsigned int _get_div(const struct div_table_cfg *table,
209 			     unsigned int val, unsigned long flags,
210 			     uint8_t width)
211 {
212 	if (flags & CLK_DIVIDER_ONE_BASED)
213 		return val;
214 
215 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
216 		return BIT(val);
217 
218 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
219 		return (val != 0U) ? val : BIT(width);
220 
221 	if (table)
222 		return _get_table_div(table, val);
223 
224 	return val + 1U;
225 }
226 
227 static unsigned int _get_val(const struct div_table_cfg *table,
228 			     unsigned int div, unsigned long flags,
229 			     uint8_t width)
230 {
231 	if (flags & CLK_DIVIDER_ONE_BASED)
232 		return div;
233 
234 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
235 		return __builtin_ffs(div) - 1;
236 
237 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
238 		return (div != 0U) ? div : BIT(width);
239 
240 	if (table)
241 		return _get_table_val(table, div);
242 
243 	return div - 1U;
244 }
245 
246 static bool _is_valid_table_div(const struct div_table_cfg *table,
247 				unsigned int div)
248 {
249 	const struct div_table_cfg *clkt = NULL;
250 
251 	for (clkt = table; clkt->div; clkt++)
252 		if (clkt->div == div)
253 			return true;
254 
255 	return false;
256 }
257 
258 static bool _is_valid_div(const struct div_table_cfg *table,
259 			  unsigned int div, unsigned long flags)
260 {
261 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
262 		return IS_POWER_OF_TWO(div);
263 
264 	if (table)
265 		return _is_valid_table_div(table, div);
266 
267 	return true;
268 }
269 
270 static int divider_get_val(unsigned long rate, unsigned long parent_rate,
271 			   const struct div_table_cfg *table, uint8_t width,
272 			   unsigned long flags)
273 {
274 	unsigned int div = 0U;
275 	unsigned int value = 0U;
276 
277 	div = UDIV_ROUND_NEAREST((uint64_t)parent_rate, rate);
278 
279 	if (!_is_valid_div(table, div, flags))
280 		return -1;
281 
282 	value = _get_val(table, div, flags, width);
283 
284 	return MIN(value, MASK_WIDTH_SHIFT(width, 0));
285 }
286 
287 uint32_t stm32_div_get_value(int div_id)
288 {
289 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
290 	const struct div_cfg *divider = &priv->div[div_id];
291 	uint32_t val = 0;
292 
293 	val = io_read32(priv->base + divider->offset) >> divider->shift;
294 	val &= MASK_WIDTH_SHIFT(divider->width, 0);
295 
296 	return val;
297 }
298 
299 TEE_Result stm32_div_set_value(uint32_t div_id, uint32_t value)
300 {
301 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
302 	const struct div_cfg *divider = NULL;
303 	uintptr_t address = 0;
304 	uint32_t mask = 0;
305 
306 	if (div_id >= priv->nb_div)
307 		panic();
308 
309 	divider = &priv->div[div_id];
310 	address = priv->base + divider->offset;
311 
312 	mask = MASK_WIDTH_SHIFT(divider->width, divider->shift);
313 	io_clrsetbits32(address, mask, (value << divider->shift) & mask);
314 
315 	if (divider->ready == DIV_NO_RDY)
316 		return TEE_SUCCESS;
317 
318 	return stm32_gate_wait_ready((uint16_t)divider->ready, true);
319 }
320 
321 static unsigned long stm32_div_get_rate(int div_id, unsigned long prate)
322 {
323 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
324 	const struct div_cfg *divider = &priv->div[div_id];
325 	uint32_t val = stm32_div_get_value(div_id);
326 	unsigned int div = 0U;
327 
328 	div = _get_div(divider->table, val, divider->flags, divider->width);
329 	if (!div)
330 		return prate;
331 
332 	return ROUNDUP_DIV((uint64_t)prate, div);
333 }
334 
335 TEE_Result stm32_div_set_rate(int div_id, unsigned long rate,
336 			      unsigned long prate)
337 {
338 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
339 	const struct div_cfg *divider = &priv->div[div_id];
340 	int value = 0;
341 
342 	value = divider_get_val(rate, prate, divider->table,
343 				divider->width, divider->flags);
344 
345 	if (value < 0)
346 		return TEE_ERROR_GENERIC;
347 
348 	return stm32_div_set_value(div_id, value);
349 }
350 
351 /* STM32 MUX clock operators */
352 static size_t clk_stm32_mux_get_parent(struct clk *clk)
353 {
354 	struct clk_stm32_mux_cfg *cfg = clk->priv;
355 
356 	return stm32_mux_get_parent(cfg->mux_id);
357 }
358 
359 static TEE_Result clk_stm32_mux_set_parent(struct clk *clk, size_t pidx)
360 {
361 	struct clk_stm32_mux_cfg *cfg = clk->priv;
362 
363 	return stm32_mux_set_parent(cfg->mux_id, pidx);
364 }
365 
366 const struct clk_ops clk_stm32_mux_ops = {
367 	.get_parent	= clk_stm32_mux_get_parent,
368 	.set_parent	= clk_stm32_mux_set_parent,
369 };
370 
371 /* STM32 GATE clock operators */
372 static TEE_Result clk_stm32_gate_enable(struct clk *clk)
373 {
374 	struct clk_stm32_gate_cfg *cfg = clk->priv;
375 
376 	stm32_gate_enable(cfg->gate_id);
377 
378 	return TEE_SUCCESS;
379 }
380 
381 static void clk_stm32_gate_disable(struct clk *clk)
382 {
383 	struct clk_stm32_gate_cfg *cfg = clk->priv;
384 
385 	stm32_gate_disable(cfg->gate_id);
386 }
387 
388 const struct clk_ops clk_stm32_gate_ops = {
389 	.enable		= clk_stm32_gate_enable,
390 	.disable	= clk_stm32_gate_disable,
391 };
392 
393 static TEE_Result clk_stm32_gate_ready_enable(struct clk *clk)
394 {
395 	struct clk_stm32_gate_cfg *cfg = clk->priv;
396 
397 	return stm32_gate_rdy_enable(cfg->gate_id);
398 }
399 
400 static void clk_stm32_gate_ready_disable(struct clk *clk)
401 {
402 	struct clk_stm32_gate_cfg *cfg = clk->priv;
403 
404 	if (stm32_gate_rdy_disable(cfg->gate_id))
405 		panic();
406 }
407 
408 const struct clk_ops clk_stm32_gate_ready_ops = {
409 	.enable		= clk_stm32_gate_ready_enable,
410 	.disable	= clk_stm32_gate_ready_disable,
411 };
412 
413 /* STM32 DIV clock operators */
414 unsigned long clk_stm32_divider_get_rate(struct clk *clk,
415 					 unsigned long parent_rate)
416 {
417 	struct clk_stm32_div_cfg *cfg = clk->priv;
418 
419 	return stm32_div_get_rate(cfg->div_id, parent_rate);
420 }
421 
422 TEE_Result clk_stm32_divider_set_rate(struct clk *clk,
423 				      unsigned long rate,
424 				      unsigned long parent_rate)
425 {
426 	struct clk_stm32_div_cfg *cfg = clk->priv;
427 
428 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
429 }
430 
431 const struct clk_ops clk_stm32_divider_ops = {
432 	.get_rate	= clk_stm32_divider_get_rate,
433 	.set_rate	= clk_stm32_divider_set_rate,
434 };
435 
436 /* STM32 COMPOSITE clock operators */
437 size_t clk_stm32_composite_get_parent(struct clk *clk)
438 {
439 	struct clk_stm32_composite_cfg *cfg = clk->priv;
440 
441 	if (cfg->mux_id == NO_MUX) {
442 		/* It could be a normal case */
443 		return 0;
444 	}
445 
446 	return stm32_mux_get_parent(cfg->mux_id);
447 }
448 
449 TEE_Result clk_stm32_composite_set_parent(struct clk *clk, size_t pidx)
450 {
451 	struct clk_stm32_composite_cfg *cfg = clk->priv;
452 
453 	if (cfg->mux_id == NO_MUX)
454 		panic();
455 
456 	return stm32_mux_set_parent(cfg->mux_id, pidx);
457 }
458 
459 unsigned long clk_stm32_composite_get_rate(struct clk *clk,
460 					   unsigned long parent_rate)
461 {
462 	struct clk_stm32_composite_cfg *cfg = clk->priv;
463 
464 	if (cfg->div_id == NO_DIV)
465 		return parent_rate;
466 
467 	return stm32_div_get_rate(cfg->div_id, parent_rate);
468 }
469 
470 TEE_Result clk_stm32_composite_set_rate(struct clk *clk, unsigned long rate,
471 					unsigned long parent_rate)
472 {
473 	struct clk_stm32_composite_cfg *cfg = clk->priv;
474 
475 	if (cfg->div_id == NO_DIV)
476 		return TEE_SUCCESS;
477 
478 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
479 }
480 
481 TEE_Result clk_stm32_composite_gate_enable(struct clk *clk)
482 {
483 	struct clk_stm32_composite_cfg *cfg = clk->priv;
484 
485 	stm32_gate_enable(cfg->gate_id);
486 
487 	return TEE_SUCCESS;
488 }
489 
490 void clk_stm32_composite_gate_disable(struct clk *clk)
491 {
492 	struct clk_stm32_composite_cfg *cfg = clk->priv;
493 
494 	stm32_gate_disable(cfg->gate_id);
495 }
496 
497 const struct clk_ops clk_stm32_composite_ops = {
498 	.get_parent	= clk_stm32_composite_get_parent,
499 	.set_parent	= clk_stm32_composite_set_parent,
500 	.get_rate	= clk_stm32_composite_get_rate,
501 	.set_rate	= clk_stm32_composite_set_rate,
502 	.enable		= clk_stm32_composite_gate_enable,
503 	.disable	= clk_stm32_composite_gate_disable,
504 };
505 
506 TEE_Result clk_stm32_set_parent_by_index(struct clk *clk, size_t pidx)
507 {
508 	struct clk *parent = clk_get_parent_by_index(clk, pidx);
509 	TEE_Result res = TEE_ERROR_GENERIC;
510 
511 	if (parent)
512 		res = clk_set_parent(clk, parent);
513 
514 	return res;
515 }
516 
517 int clk_stm32_parse_fdt_by_name(const void *fdt, int node, const char *name,
518 				uint32_t *tab, uint32_t *nb)
519 {
520 	const fdt32_t *cell = NULL;
521 	int len = 0;
522 	uint32_t i = 0;
523 
524 	cell = fdt_getprop(fdt, node, name, &len);
525 	if (cell && len > 0) {
526 		for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++)
527 			tab[i] = fdt32_to_cpu(cell[i]);
528 
529 		*nb = (uint32_t)len / sizeof(uint32_t);
530 	} else {
531 		*nb = 0;
532 	}
533 
534 	return 0;
535 }
536 
537 TEE_Result clk_stm32_init(struct clk_stm32_priv *priv, uintptr_t base)
538 {
539 	stm32_clock_data = priv;
540 
541 	priv->base = base;
542 
543 	priv->gate_cpt = calloc(priv->nb_gates, sizeof(*priv->gate_cpt));
544 	if (!priv->gate_cpt)
545 		return TEE_ERROR_OUT_OF_MEMORY;
546 
547 	return TEE_SUCCESS;
548 }
549 
550 static unsigned long fixed_factor_get_rate(struct clk *clk,
551 					   unsigned long parent_rate)
552 {
553 	struct fixed_factor_cfg *d = clk->priv;
554 
555 	unsigned long long rate = (unsigned long long)parent_rate * d->mult;
556 
557 	if (d->div == 0U)
558 		panic("error division by zero");
559 
560 	return (unsigned long)(rate / d->div);
561 };
562 
563 const struct clk_ops clk_fixed_factor_ops = {
564 	.get_rate	= fixed_factor_get_rate,
565 };
566 
567 static unsigned long clk_fixed_get_rate(struct clk *clk,
568 					unsigned long parent_rate __unused)
569 {
570 	struct clk_fixed_rate_cfg *cfg = clk->priv;
571 
572 	return cfg->rate;
573 }
574 
575 const struct clk_ops clk_fixed_clk_ops = {
576 	.get_rate	= clk_fixed_get_rate,
577 };
578 
579 struct clk *stm32mp_rcc_clock_id_to_clk(unsigned long clock_id)
580 {
581 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
582 
583 	if (clock_id > priv->nb_clk_refs)
584 		return NULL;
585 
586 	return priv->clk_refs[clock_id];
587 }
588 
589 static TEE_Result stm32mp_clk_dt_get_clk(struct dt_pargs *pargs,
590 					 void *data __unused,
591 					 struct clk **out_clk)
592 {
593 	unsigned long clock_id = pargs->args[0];
594 	struct clk *clk = NULL;
595 
596 	if (pargs->args_count != 1)
597 		return TEE_ERROR_BAD_PARAMETERS;
598 
599 	clk = stm32mp_rcc_clock_id_to_clk(clock_id);
600 	if (!clk)
601 		return TEE_ERROR_BAD_PARAMETERS;
602 
603 	*out_clk = clk;
604 
605 	return TEE_SUCCESS;
606 }
607 
608 static void clk_stm32_register_clocks(struct clk_stm32_priv *priv)
609 {
610 	unsigned int i = 0;
611 
612 	for (i = 0; i < priv->nb_clk_refs; i++) {
613 		struct clk *clk = priv->clk_refs[i];
614 
615 		if (!clk)
616 			continue;
617 
618 		refcount_set(&clk->enabled_count, 0);
619 
620 		if (clk_register(clk))
621 			panic();
622 	}
623 
624 	/* Critical clocks management */
625 	for (i = 0; i < priv->nb_clk_refs; i++) {
626 		struct clk *clk = priv->clk_refs[i];
627 
628 		if (!clk)
629 			continue;
630 
631 		if (priv->is_critical && priv->is_critical(clk))
632 			clk_enable(clk);
633 	}
634 }
635 
636 void stm32mp_clk_provider_probe_final(const void *fdt, int node,
637 				      struct clk_stm32_priv *priv)
638 {
639 	TEE_Result res = TEE_ERROR_GENERIC;
640 
641 	clk_stm32_register_clocks(priv);
642 
643 	res = clk_dt_register_clk_provider(fdt, node, stm32mp_clk_dt_get_clk,
644 					   priv);
645 	if (res)
646 		panic("Couldn't register clock provider");
647 }
648