xref: /optee_os/core/drivers/clk/clk-stm32-core.c (revision fc9ea0db8ddf8150754aac716691616c7e3f404a)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
4  */
5 
6 #include <config.h>
7 #include <drivers/clk.h>
8 #include <drivers/clk_dt.h>
9 #include <drivers/stm32_shared_io.h>
10 #include <io.h>
11 #include <kernel/boot.h>
12 #include <kernel/delay.h>
13 #include <kernel/dt.h>
14 #include <libfdt.h>
15 #include <stdio.h>
16 #include <stm32_util.h>
17 
18 #include "clk-stm32-core.h"
19 
20 #define RCC_MP_ENCLRR_OFFSET	0x4
21 
22 #define TIMEOUT_US_200MS	U(200000)
23 #define TIMEOUT_US_1S		U(1000000)
24 
25 static struct clk_stm32_priv *stm32_clock_data;
26 
27 struct clk_stm32_priv *clk_stm32_get_priv(void)
28 {
29 	return stm32_clock_data;
30 }
31 
32 uintptr_t clk_stm32_get_rcc_base(void)
33 {
34 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
35 
36 	return priv->base;
37 }
38 
39 /* STM32 MUX API */
40 size_t stm32_mux_get_parent(uint32_t mux_id)
41 {
42 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
43 	const struct mux_cfg *mux = &priv->muxes[mux_id];
44 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
45 
46 	return (io_read32(priv->base + mux->offset) & mask) >> mux->shift;
47 }
48 
49 TEE_Result stm32_mux_set_parent(uint16_t mux_id, uint8_t sel)
50 {
51 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
52 	const struct mux_cfg *mux = &priv->muxes[mux_id];
53 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
54 	uintptr_t address = priv->base + mux->offset;
55 
56 	io_clrsetbits32(address, mask, (sel << mux->shift) & mask);
57 
58 	if (mux->ready != MUX_NO_RDY)
59 		return stm32_gate_wait_ready((uint16_t)mux->ready, true);
60 
61 	return TEE_SUCCESS;
62 }
63 
64 /* STM32 GATE API */
65 static void stm32_gate_endisable(uint16_t gate_id, bool enable)
66 {
67 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
68 	const struct gate_cfg *gate = &priv->gates[gate_id];
69 	uintptr_t addr = priv->base + gate->offset;
70 
71 	if (enable) {
72 		if (gate->set_clr)
73 			io_write32(addr, BIT(gate->bit_idx));
74 		else
75 			io_setbits32_stm32shregs(addr, BIT(gate->bit_idx));
76 		/* Make sure the clock is enabled before returning to caller */
77 		dsb();
78 	} else {
79 		/* Waiting pending operation before disabling clock */
80 		dsb();
81 
82 		if (gate->set_clr)
83 			io_write32(addr + RCC_MP_ENCLRR_OFFSET,
84 				   BIT(gate->bit_idx));
85 		else
86 			io_clrbits32_stm32shregs(addr, BIT(gate->bit_idx));
87 	}
88 }
89 
90 void stm32_gate_set_init_state(uint16_t gate_id, bool enable)
91 {
92 	struct clk_stm32_priv __maybe_unused *priv = clk_stm32_get_priv();
93 
94 	assert(!priv->gate_cpt[gate_id]);
95 	stm32_gate_endisable(gate_id, enable);
96 }
97 
98 void stm32_gate_disable(uint16_t gate_id)
99 {
100 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
101 	uint8_t *gate_cpt = priv->gate_cpt;
102 
103 	assert(gate_cpt[gate_id] > 0);
104 	if (gate_cpt[gate_id] == 1)
105 		stm32_gate_endisable(gate_id, false);
106 	gate_cpt[gate_id]--;
107 }
108 
109 void stm32_gate_enable(uint16_t gate_id)
110 {
111 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
112 	uint8_t *gate_cpt = priv->gate_cpt;
113 
114 	assert(gate_cpt[gate_id] < 0xFF);
115 	if (gate_cpt[gate_id] == 0)
116 		stm32_gate_endisable(gate_id, true);
117 	gate_cpt[gate_id]++;
118 }
119 
120 bool stm32_gate_is_enabled(uint16_t gate_id)
121 {
122 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
123 	const struct gate_cfg *gate = &priv->gates[gate_id];
124 	uintptr_t addr = priv->base + gate->offset;
125 
126 	return (io_read32(addr) & BIT(gate->bit_idx)) != 0U;
127 }
128 
129 TEE_Result stm32_gate_wait_ready(uint16_t gate_id, bool ready_on)
130 {
131 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
132 	const struct gate_cfg *gate = &priv->gates[gate_id];
133 	uintptr_t address = priv->base + gate->offset;
134 	uint32_t mask_rdy = BIT(gate->bit_idx);
135 	uint64_t timeout = timeout_init_us(TIMEOUT_US_1S);
136 	uint32_t mask = 0U;
137 
138 	if (ready_on)
139 		mask = BIT(gate->bit_idx);
140 
141 	while ((io_read32(address) & mask_rdy) != mask)
142 		if (timeout_elapsed(timeout))
143 			break;
144 
145 	if ((io_read32(address) & mask_rdy) != mask)
146 		return TEE_ERROR_GENERIC;
147 
148 	return TEE_SUCCESS;
149 }
150 
151 /* STM32 GATE READY clock operators */
152 static TEE_Result stm32_gate_ready_endisable(uint16_t gate_id, bool enable,
153 					     bool wait_rdy)
154 {
155 	stm32_gate_endisable(gate_id, enable);
156 
157 	if (wait_rdy)
158 		return stm32_gate_wait_ready(gate_id + 1, enable);
159 
160 	return TEE_SUCCESS;
161 }
162 
163 TEE_Result stm32_gate_rdy_enable(uint16_t gate_id)
164 {
165 	return stm32_gate_ready_endisable(gate_id, true, true);
166 }
167 
168 TEE_Result stm32_gate_rdy_disable(uint16_t gate_id)
169 {
170 	return stm32_gate_ready_endisable(gate_id, false, true);
171 }
172 
173 /* STM32 DIV API */
174 static unsigned int _get_table_div(const struct div_table_cfg *table,
175 				   unsigned int val)
176 {
177 	const struct div_table_cfg *clkt = NULL;
178 
179 	for (clkt = table; clkt->div; clkt++)
180 		if (clkt->val == val)
181 			return clkt->div;
182 
183 	return 0;
184 }
185 
186 static unsigned int _get_table_val(const struct div_table_cfg *table,
187 				   unsigned int div)
188 {
189 	const struct div_table_cfg *clkt = NULL;
190 
191 	for (clkt = table; clkt->div; clkt++)
192 		if (clkt->div == div)
193 			return clkt->val;
194 
195 	return 0;
196 }
197 
198 static unsigned int _get_div(const struct div_table_cfg *table,
199 			     unsigned int val, unsigned long flags,
200 			     uint8_t width)
201 {
202 	if (flags & CLK_DIVIDER_ONE_BASED)
203 		return val;
204 
205 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
206 		return BIT(val);
207 
208 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
209 		return (val != 0U) ? val : BIT(width);
210 
211 	if (table)
212 		return _get_table_div(table, val);
213 
214 	return val + 1U;
215 }
216 
217 static unsigned int _get_val(const struct div_table_cfg *table,
218 			     unsigned int div, unsigned long flags,
219 			     uint8_t width)
220 {
221 	if (flags & CLK_DIVIDER_ONE_BASED)
222 		return div;
223 
224 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
225 		return __builtin_ffs(div) - 1;
226 
227 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
228 		return (div != 0U) ? div : BIT(width);
229 
230 	if (table)
231 		return _get_table_val(table, div);
232 
233 	return div - 1U;
234 }
235 
236 static bool _is_valid_table_div(const struct div_table_cfg *table,
237 				unsigned int div)
238 {
239 	const struct div_table_cfg *clkt = NULL;
240 
241 	for (clkt = table; clkt->div; clkt++)
242 		if (clkt->div == div)
243 			return true;
244 
245 	return false;
246 }
247 
248 static bool _is_valid_div(const struct div_table_cfg *table,
249 			  unsigned int div, unsigned long flags)
250 {
251 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
252 		return IS_POWER_OF_TWO(div);
253 
254 	if (table)
255 		return _is_valid_table_div(table, div);
256 
257 	return true;
258 }
259 
260 static int divider_get_val(unsigned long rate, unsigned long parent_rate,
261 			   const struct div_table_cfg *table, uint8_t width,
262 			   unsigned long flags)
263 {
264 	unsigned int div = 0U;
265 	unsigned int value = 0U;
266 
267 	div = UDIV_ROUND_NEAREST((uint64_t)parent_rate, rate);
268 
269 	if (!_is_valid_div(table, div, flags))
270 		return -1;
271 
272 	value = _get_val(table, div, flags, width);
273 
274 	return MIN(value, MASK_WIDTH_SHIFT(width, 0));
275 }
276 
277 uint32_t stm32_div_get_value(int div_id)
278 {
279 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
280 	const struct div_cfg *divider = &priv->div[div_id];
281 	uint32_t val = 0;
282 
283 	val = io_read32(priv->base + divider->offset) >> divider->shift;
284 	val &= MASK_WIDTH_SHIFT(divider->width, 0);
285 
286 	return val;
287 }
288 
289 TEE_Result stm32_div_set_value(uint32_t div_id, uint32_t value)
290 {
291 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
292 	const struct div_cfg *divider = NULL;
293 	uintptr_t address = 0;
294 	uint32_t mask = 0;
295 
296 	if (div_id >= priv->nb_div)
297 		panic();
298 
299 	divider = &priv->div[div_id];
300 	address = priv->base + divider->offset;
301 
302 	mask = MASK_WIDTH_SHIFT(divider->width, divider->shift);
303 	io_clrsetbits32(address, mask, (value << divider->shift) & mask);
304 
305 	if (divider->ready == DIV_NO_RDY)
306 		return TEE_SUCCESS;
307 
308 	return stm32_gate_wait_ready((uint16_t)divider->ready, true);
309 }
310 
311 static unsigned long stm32_div_get_rate(int div_id, unsigned long prate)
312 {
313 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
314 	const struct div_cfg *divider = &priv->div[div_id];
315 	uint32_t val = stm32_div_get_value(div_id);
316 	unsigned int div = 0U;
317 
318 	div = _get_div(divider->table, val, divider->flags, divider->width);
319 	if (!div)
320 		return prate;
321 
322 	return ROUNDUP_DIV((uint64_t)prate, div);
323 }
324 
325 TEE_Result stm32_div_set_rate(int div_id, unsigned long rate,
326 			      unsigned long prate)
327 {
328 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
329 	const struct div_cfg *divider = &priv->div[div_id];
330 	int value = 0;
331 
332 	value = divider_get_val(rate, prate, divider->table,
333 				divider->width, divider->flags);
334 
335 	if (value < 0)
336 		return TEE_ERROR_GENERIC;
337 
338 	return stm32_div_set_value(div_id, value);
339 }
340 
341 /* STM32 MUX clock operators */
342 static size_t clk_stm32_mux_get_parent(struct clk *clk)
343 {
344 	struct clk_stm32_mux_cfg *cfg = clk->priv;
345 
346 	return stm32_mux_get_parent(cfg->mux_id);
347 }
348 
349 static TEE_Result clk_stm32_mux_set_parent(struct clk *clk, size_t pidx)
350 {
351 	struct clk_stm32_mux_cfg *cfg = clk->priv;
352 
353 	return stm32_mux_set_parent(cfg->mux_id, pidx);
354 }
355 
356 const struct clk_ops clk_stm32_mux_ops = {
357 	.get_parent	= clk_stm32_mux_get_parent,
358 	.set_parent	= clk_stm32_mux_set_parent,
359 };
360 
361 /* STM32 GATE clock operators */
362 static TEE_Result clk_stm32_gate_enable(struct clk *clk)
363 {
364 	struct clk_stm32_gate_cfg *cfg = clk->priv;
365 
366 	stm32_gate_enable(cfg->gate_id);
367 
368 	return TEE_SUCCESS;
369 }
370 
371 static void clk_stm32_gate_disable(struct clk *clk)
372 {
373 	struct clk_stm32_gate_cfg *cfg = clk->priv;
374 
375 	stm32_gate_disable(cfg->gate_id);
376 }
377 
378 const struct clk_ops clk_stm32_gate_ops = {
379 	.enable		= clk_stm32_gate_enable,
380 	.disable	= clk_stm32_gate_disable,
381 };
382 
383 static TEE_Result clk_stm32_gate_ready_enable(struct clk *clk)
384 {
385 	struct clk_stm32_gate_cfg *cfg = clk->priv;
386 
387 	return stm32_gate_rdy_enable(cfg->gate_id);
388 }
389 
390 static void clk_stm32_gate_ready_disable(struct clk *clk)
391 {
392 	struct clk_stm32_gate_cfg *cfg = clk->priv;
393 
394 	if (stm32_gate_rdy_disable(cfg->gate_id))
395 		panic();
396 }
397 
398 const struct clk_ops clk_stm32_gate_ready_ops = {
399 	.enable		= clk_stm32_gate_ready_enable,
400 	.disable	= clk_stm32_gate_ready_disable,
401 };
402 
403 /* STM32 DIV clock operators */
404 unsigned long clk_stm32_divider_get_rate(struct clk *clk,
405 					 unsigned long parent_rate)
406 {
407 	struct clk_stm32_div_cfg *cfg = clk->priv;
408 
409 	return stm32_div_get_rate(cfg->div_id, parent_rate);
410 }
411 
412 TEE_Result clk_stm32_divider_set_rate(struct clk *clk,
413 				      unsigned long rate,
414 				      unsigned long parent_rate)
415 {
416 	struct clk_stm32_div_cfg *cfg = clk->priv;
417 
418 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
419 }
420 
421 const struct clk_ops clk_stm32_divider_ops = {
422 	.get_rate	= clk_stm32_divider_get_rate,
423 	.set_rate	= clk_stm32_divider_set_rate,
424 };
425 
426 /* STM32 COMPOSITE clock operators */
427 size_t clk_stm32_composite_get_parent(struct clk *clk)
428 {
429 	struct clk_stm32_composite_cfg *cfg = clk->priv;
430 
431 	if (cfg->mux_id == NO_MUX) {
432 		/* It could be a normal case */
433 		return 0;
434 	}
435 
436 	return stm32_mux_get_parent(cfg->mux_id);
437 }
438 
439 TEE_Result clk_stm32_composite_set_parent(struct clk *clk, size_t pidx)
440 {
441 	struct clk_stm32_composite_cfg *cfg = clk->priv;
442 
443 	if (cfg->mux_id == NO_MUX)
444 		panic();
445 
446 	return stm32_mux_set_parent(cfg->mux_id, pidx);
447 }
448 
449 unsigned long clk_stm32_composite_get_rate(struct clk *clk,
450 					   unsigned long parent_rate)
451 {
452 	struct clk_stm32_composite_cfg *cfg = clk->priv;
453 
454 	if (cfg->div_id == NO_DIV)
455 		return parent_rate;
456 
457 	return stm32_div_get_rate(cfg->div_id, parent_rate);
458 }
459 
460 TEE_Result clk_stm32_composite_set_rate(struct clk *clk, unsigned long rate,
461 					unsigned long parent_rate)
462 {
463 	struct clk_stm32_composite_cfg *cfg = clk->priv;
464 
465 	if (cfg->div_id == NO_DIV)
466 		return TEE_SUCCESS;
467 
468 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
469 }
470 
471 TEE_Result clk_stm32_composite_gate_enable(struct clk *clk)
472 {
473 	struct clk_stm32_composite_cfg *cfg = clk->priv;
474 
475 	stm32_gate_enable(cfg->gate_id);
476 
477 	return TEE_SUCCESS;
478 }
479 
480 void clk_stm32_composite_gate_disable(struct clk *clk)
481 {
482 	struct clk_stm32_composite_cfg *cfg = clk->priv;
483 
484 	stm32_gate_disable(cfg->gate_id);
485 }
486 
487 const struct clk_ops clk_stm32_composite_ops = {
488 	.get_parent	= clk_stm32_composite_get_parent,
489 	.set_parent	= clk_stm32_composite_set_parent,
490 	.get_rate	= clk_stm32_composite_get_rate,
491 	.set_rate	= clk_stm32_composite_set_rate,
492 	.enable		= clk_stm32_composite_gate_enable,
493 	.disable	= clk_stm32_composite_gate_disable,
494 };
495 
496 TEE_Result clk_stm32_set_parent_by_index(struct clk *clk, size_t pidx)
497 {
498 	struct clk *parent = clk_get_parent_by_index(clk, pidx);
499 	TEE_Result res = TEE_ERROR_GENERIC;
500 
501 	if (parent)
502 		res = clk_set_parent(clk, parent);
503 
504 	return res;
505 }
506 
507 int clk_stm32_parse_fdt_by_name(const void *fdt, int node, const char *name,
508 				uint32_t *tab, uint32_t *nb)
509 {
510 	const fdt32_t *cell = NULL;
511 	int len = 0;
512 	uint32_t i = 0;
513 
514 	cell = fdt_getprop(fdt, node, name, &len);
515 	if (cell && len > 0) {
516 		for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++)
517 			tab[i] = fdt32_to_cpu(cell[i]);
518 
519 		*nb = (uint32_t)len / sizeof(uint32_t);
520 	} else {
521 		*nb = 0;
522 	}
523 
524 	return 0;
525 }
526 
527 TEE_Result clk_stm32_init(struct clk_stm32_priv *priv, uintptr_t base)
528 {
529 	stm32_clock_data = priv;
530 
531 	priv->base = base;
532 
533 	priv->gate_cpt = calloc(priv->nb_gates, sizeof(*priv->gate_cpt));
534 	if (!priv->gate_cpt)
535 		return TEE_ERROR_OUT_OF_MEMORY;
536 
537 	return TEE_SUCCESS;
538 }
539 
540 static unsigned long fixed_factor_get_rate(struct clk *clk,
541 					   unsigned long parent_rate)
542 {
543 	struct fixed_factor_cfg *d = clk->priv;
544 
545 	unsigned long long rate = (unsigned long long)parent_rate * d->mult;
546 
547 	if (d->div == 0U)
548 		panic("error division by zero");
549 
550 	return (unsigned long)(rate / d->div);
551 };
552 
553 const struct clk_ops clk_fixed_factor_ops = {
554 	.get_rate	= fixed_factor_get_rate,
555 };
556 
557 static unsigned long clk_fixed_get_rate(struct clk *clk,
558 					unsigned long parent_rate __unused)
559 {
560 	struct clk_fixed_rate_cfg *cfg = clk->priv;
561 
562 	return cfg->rate;
563 }
564 
565 const struct clk_ops clk_fixed_clk_ops = {
566 	.get_rate	= clk_fixed_get_rate,
567 };
568 
569 struct clk *stm32mp_rcc_clock_id_to_clk(unsigned long clock_id)
570 {
571 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
572 
573 	if (clock_id > priv->nb_clk_refs)
574 		return NULL;
575 
576 	return priv->clk_refs[clock_id];
577 }
578 
579 static TEE_Result stm32mp_clk_dt_get_clk(struct dt_pargs *pargs,
580 					 void *data __unused,
581 					 struct clk **out_clk)
582 {
583 	unsigned long clock_id = pargs->args[0];
584 	struct clk *clk = NULL;
585 
586 	if (pargs->args_count != 1)
587 		return TEE_ERROR_BAD_PARAMETERS;
588 
589 	clk = stm32mp_rcc_clock_id_to_clk(clock_id);
590 	if (!clk)
591 		return TEE_ERROR_BAD_PARAMETERS;
592 
593 	*out_clk = clk;
594 
595 	return TEE_SUCCESS;
596 }
597 
598 static void clk_stm32_register_clocks(struct clk_stm32_priv *priv)
599 {
600 	unsigned int i = 0;
601 
602 	for (i = 0; i < priv->nb_clk_refs; i++) {
603 		struct clk *clk = priv->clk_refs[i];
604 
605 		if (!clk)
606 			continue;
607 
608 		refcount_set(&clk->enabled_count, 0);
609 
610 		if (clk_register(clk))
611 			panic();
612 	}
613 
614 	/* Critical clocks management */
615 	for (i = 0; i < priv->nb_clk_refs; i++) {
616 		struct clk *clk = priv->clk_refs[i];
617 
618 		if (!clk)
619 			continue;
620 
621 		if (priv->is_critical && priv->is_critical(clk))
622 			clk_enable(clk);
623 	}
624 }
625 
626 void stm32mp_clk_provider_probe_final(const void *fdt, int node,
627 				      struct clk_stm32_priv *priv)
628 {
629 	TEE_Result res = TEE_ERROR_GENERIC;
630 
631 	clk_stm32_register_clocks(priv);
632 
633 	res = clk_dt_register_clk_provider(fdt, node, stm32mp_clk_dt_get_clk,
634 					   priv);
635 	if (res)
636 		panic("Couldn't register clock provider");
637 }
638