xref: /optee_os/core/drivers/clk/clk-stm32-core.c (revision ad0ae8003583f7270d62568c85a32a5f25a601c6)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
4  */
5 
6 #include <config.h>
7 #include <drivers/clk.h>
8 #include <drivers/clk_dt.h>
9 #include <io.h>
10 #include <kernel/boot.h>
11 #include <kernel/delay.h>
12 #include <kernel/dt.h>
13 #include <libfdt.h>
14 #include <stdio.h>
15 #include <stm32_util.h>
16 
17 #include "clk-stm32-core.h"
18 
19 #define RCC_MP_ENCLRR_OFFSET	0x4
20 
21 #define TIMEOUT_US_200MS	U(200000)
22 #define TIMEOUT_US_1S		U(1000000)
23 
24 static struct clk_stm32_priv *stm32_clock_data;
25 
26 struct clk_stm32_priv *clk_stm32_get_priv(void)
27 {
28 	return stm32_clock_data;
29 }
30 
31 uintptr_t clk_stm32_get_rcc_base(void)
32 {
33 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
34 
35 	return priv->base;
36 }
37 
38 /* STM32 MUX API */
39 size_t stm32_mux_get_parent(uint32_t mux_id)
40 {
41 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
42 	const struct mux_cfg *mux = &priv->muxes[mux_id];
43 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
44 
45 	return (io_read32(priv->base + mux->offset) & mask) >> mux->shift;
46 }
47 
48 TEE_Result stm32_mux_set_parent(uint16_t mux_id, uint8_t sel)
49 {
50 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
51 	const struct mux_cfg *mux = &priv->muxes[mux_id];
52 	uint32_t mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
53 	uintptr_t address = priv->base + mux->offset;
54 
55 	io_clrsetbits32(address, mask, (sel << mux->shift) & mask);
56 
57 	if (mux->ready != MUX_NO_RDY)
58 		return stm32_gate_wait_ready((uint16_t)mux->ready, true);
59 
60 	return TEE_SUCCESS;
61 }
62 
63 /* STM32 GATE API */
64 static void stm32_gate_endisable(uint16_t gate_id, bool enable)
65 {
66 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
67 	const struct gate_cfg *gate = &priv->gates[gate_id];
68 	uintptr_t addr = priv->base + gate->offset;
69 
70 	if (enable) {
71 		if (gate->set_clr)
72 			io_write32(addr, BIT(gate->bit_idx));
73 		else
74 			io_setbits32_stm32shregs(addr, BIT(gate->bit_idx));
75 	} else {
76 		if (gate->set_clr)
77 			io_write32(addr + RCC_MP_ENCLRR_OFFSET,
78 				   BIT(gate->bit_idx));
79 		else
80 			io_clrbits32_stm32shregs(addr, BIT(gate->bit_idx));
81 	}
82 }
83 
84 void stm32_gate_disable(uint16_t gate_id)
85 {
86 	stm32_gate_endisable(gate_id, false);
87 }
88 
89 void stm32_gate_enable(uint16_t gate_id)
90 {
91 	stm32_gate_endisable(gate_id, true);
92 }
93 
94 bool stm32_gate_is_enabled(uint16_t gate_id)
95 {
96 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
97 	const struct gate_cfg *gate = &priv->gates[gate_id];
98 	uintptr_t addr = priv->base + gate->offset;
99 
100 	return (io_read32(addr) & BIT(gate->bit_idx)) != 0U;
101 }
102 
103 TEE_Result stm32_gate_wait_ready(uint16_t gate_id, bool ready_on)
104 {
105 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
106 	const struct gate_cfg *gate = &priv->gates[gate_id];
107 	uintptr_t address = priv->base + gate->offset;
108 	uint32_t mask_rdy = BIT(gate->bit_idx);
109 	uint64_t timeout = timeout_init_us(TIMEOUT_US_1S);
110 	uint32_t mask = 0U;
111 
112 	if (ready_on)
113 		mask = BIT(gate->bit_idx);
114 
115 	while ((io_read32(address) & mask_rdy) != mask)
116 		if (timeout_elapsed(timeout))
117 			break;
118 
119 	if ((io_read32(address) & mask_rdy) != mask)
120 		return TEE_ERROR_GENERIC;
121 
122 	return TEE_SUCCESS;
123 }
124 
125 /* STM32 GATE READY clock operators */
126 static TEE_Result stm32_gate_ready_endisable(uint16_t gate_id, bool enable,
127 					     bool wait_rdy)
128 {
129 	stm32_gate_endisable(gate_id, enable);
130 
131 	if (wait_rdy)
132 		return stm32_gate_wait_ready(gate_id + 1, enable);
133 
134 	return TEE_SUCCESS;
135 }
136 
137 TEE_Result stm32_gate_rdy_enable(uint16_t gate_id)
138 {
139 	return stm32_gate_ready_endisable(gate_id, true, true);
140 }
141 
142 TEE_Result stm32_gate_rdy_disable(uint16_t gate_id)
143 {
144 	return stm32_gate_ready_endisable(gate_id, false, true);
145 }
146 
147 /* STM32 DIV API */
148 static unsigned int _get_table_div(const struct div_table_cfg *table,
149 				   unsigned int val)
150 {
151 	const struct div_table_cfg *clkt = NULL;
152 
153 	for (clkt = table; clkt->div; clkt++)
154 		if (clkt->val == val)
155 			return clkt->div;
156 
157 	return 0;
158 }
159 
160 static unsigned int _get_table_val(const struct div_table_cfg *table,
161 				   unsigned int div)
162 {
163 	const struct div_table_cfg *clkt = NULL;
164 
165 	for (clkt = table; clkt->div; clkt++)
166 		if (clkt->div == div)
167 			return clkt->val;
168 
169 	return 0;
170 }
171 
172 static unsigned int _get_div(const struct div_table_cfg *table,
173 			     unsigned int val, unsigned long flags,
174 			     uint8_t width)
175 {
176 	if (flags & CLK_DIVIDER_ONE_BASED)
177 		return val;
178 
179 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
180 		return BIT(val);
181 
182 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
183 		return (val != 0U) ? val : BIT(width);
184 
185 	if (table)
186 		return _get_table_div(table, val);
187 
188 	return val + 1U;
189 }
190 
191 static unsigned int _get_val(const struct div_table_cfg *table,
192 			     unsigned int div, unsigned long flags,
193 			     uint8_t width)
194 {
195 	if (flags & CLK_DIVIDER_ONE_BASED)
196 		return div;
197 
198 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
199 		return __builtin_ffs(div) - 1;
200 
201 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
202 		return (div != 0U) ? div : BIT(width);
203 
204 	if (table)
205 		return _get_table_val(table, div);
206 
207 	return div - 1U;
208 }
209 
210 static bool _is_valid_table_div(const struct div_table_cfg *table,
211 				unsigned int div)
212 {
213 	const struct div_table_cfg *clkt = NULL;
214 
215 	for (clkt = table; clkt->div; clkt++)
216 		if (clkt->div == div)
217 			return true;
218 
219 	return false;
220 }
221 
222 static bool _is_valid_div(const struct div_table_cfg *table,
223 			  unsigned int div, unsigned long flags)
224 {
225 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
226 		return IS_POWER_OF_TWO(div);
227 
228 	if (table)
229 		return _is_valid_table_div(table, div);
230 
231 	return true;
232 }
233 
234 static int divider_get_val(unsigned long rate, unsigned long parent_rate,
235 			   const struct div_table_cfg *table, uint8_t width,
236 			   unsigned long flags)
237 {
238 	unsigned int div = 0U;
239 	unsigned int value = 0U;
240 
241 	div = UDIV_ROUND_NEAREST((uint64_t)parent_rate, rate);
242 
243 	if (!_is_valid_div(table, div, flags))
244 		return -1;
245 
246 	value = _get_val(table, div, flags, width);
247 
248 	return MIN(value, MASK_WIDTH_SHIFT(width, 0));
249 }
250 
251 uint32_t stm32_div_get_value(int div_id)
252 {
253 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
254 	const struct div_cfg *divider = &priv->div[div_id];
255 	uint32_t val = 0;
256 
257 	val = io_read32(priv->base + divider->offset) >> divider->shift;
258 	val &= MASK_WIDTH_SHIFT(divider->width, 0);
259 
260 	return val;
261 }
262 
263 TEE_Result stm32_div_set_value(uint32_t div_id, uint32_t value)
264 {
265 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
266 	const struct div_cfg *divider = NULL;
267 	uintptr_t address = 0;
268 	uint32_t mask = 0;
269 
270 	if (div_id >= priv->nb_div)
271 		panic();
272 
273 	divider = &priv->div[div_id];
274 	address = priv->base + divider->offset;
275 
276 	mask = MASK_WIDTH_SHIFT(divider->width, divider->shift);
277 	io_clrsetbits32(address, mask, (value << divider->shift) & mask);
278 
279 	if (divider->ready == DIV_NO_RDY)
280 		return TEE_SUCCESS;
281 
282 	return stm32_gate_wait_ready((uint16_t)divider->ready, true);
283 }
284 
285 static unsigned long stm32_div_get_rate(int div_id, unsigned long prate)
286 {
287 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
288 	const struct div_cfg *divider = &priv->div[div_id];
289 	uint32_t val = stm32_div_get_value(div_id);
290 	unsigned int div = 0U;
291 
292 	div = _get_div(divider->table, val, divider->flags, divider->width);
293 	if (!div)
294 		return prate;
295 
296 	return ROUNDUP_DIV((uint64_t)prate, div);
297 }
298 
299 TEE_Result stm32_div_set_rate(int div_id, unsigned long rate,
300 			      unsigned long prate)
301 {
302 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
303 	const struct div_cfg *divider = &priv->div[div_id];
304 	int value = 0;
305 
306 	value = divider_get_val(rate, prate, divider->table,
307 				divider->width, divider->flags);
308 
309 	if (value < 0)
310 		return TEE_ERROR_GENERIC;
311 
312 	return stm32_div_set_value(div_id, value);
313 }
314 
315 /* STM32 MUX clock operators */
316 static size_t clk_stm32_mux_get_parent(struct clk *clk)
317 {
318 	struct clk_stm32_mux_cfg *cfg = clk->priv;
319 
320 	return stm32_mux_get_parent(cfg->mux_id);
321 }
322 
323 static TEE_Result clk_stm32_mux_set_parent(struct clk *clk, size_t pidx)
324 {
325 	struct clk_stm32_mux_cfg *cfg = clk->priv;
326 
327 	return stm32_mux_set_parent(cfg->mux_id, pidx);
328 }
329 
330 const struct clk_ops clk_stm32_mux_ops = {
331 	.get_parent	= clk_stm32_mux_get_parent,
332 	.set_parent	= clk_stm32_mux_set_parent,
333 };
334 
335 /* STM32 GATE clock operators */
336 static TEE_Result clk_stm32_gate_enable(struct clk *clk)
337 {
338 	struct clk_stm32_gate_cfg *cfg = clk->priv;
339 
340 	stm32_gate_enable(cfg->gate_id);
341 
342 	return TEE_SUCCESS;
343 }
344 
345 static void clk_stm32_gate_disable(struct clk *clk)
346 {
347 	struct clk_stm32_gate_cfg *cfg = clk->priv;
348 
349 	stm32_gate_disable(cfg->gate_id);
350 }
351 
352 const struct clk_ops clk_stm32_gate_ops = {
353 	.enable		= clk_stm32_gate_enable,
354 	.disable	= clk_stm32_gate_disable,
355 };
356 
357 static TEE_Result clk_stm32_gate_ready_enable(struct clk *clk)
358 {
359 	struct clk_stm32_gate_cfg *cfg = clk->priv;
360 
361 	return stm32_gate_rdy_enable(cfg->gate_id);
362 }
363 
364 static void clk_stm32_gate_ready_disable(struct clk *clk)
365 {
366 	struct clk_stm32_gate_cfg *cfg = clk->priv;
367 
368 	if (stm32_gate_rdy_disable(cfg->gate_id))
369 		panic();
370 }
371 
372 const struct clk_ops clk_stm32_gate_ready_ops = {
373 	.enable		= clk_stm32_gate_ready_enable,
374 	.disable	= clk_stm32_gate_ready_disable,
375 };
376 
377 /* STM32 DIV clock operators */
378 unsigned long clk_stm32_divider_get_rate(struct clk *clk,
379 					 unsigned long parent_rate)
380 {
381 	struct clk_stm32_div_cfg *cfg = clk->priv;
382 
383 	return stm32_div_get_rate(cfg->div_id, parent_rate);
384 }
385 
386 TEE_Result clk_stm32_divider_set_rate(struct clk *clk,
387 				      unsigned long rate,
388 				      unsigned long parent_rate)
389 {
390 	struct clk_stm32_div_cfg *cfg = clk->priv;
391 
392 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
393 }
394 
395 const struct clk_ops clk_stm32_divider_ops = {
396 	.get_rate	= clk_stm32_divider_get_rate,
397 	.set_rate	= clk_stm32_divider_set_rate,
398 };
399 
400 /* STM32 COMPOSITE clock operators */
401 size_t clk_stm32_composite_get_parent(struct clk *clk)
402 {
403 	struct clk_stm32_composite_cfg *cfg = clk->priv;
404 
405 	if (cfg->mux_id == NO_MUX) {
406 		/* It could be a normal case */
407 		return 0;
408 	}
409 
410 	return stm32_mux_get_parent(cfg->mux_id);
411 }
412 
413 TEE_Result clk_stm32_composite_set_parent(struct clk *clk, size_t pidx)
414 {
415 	struct clk_stm32_composite_cfg *cfg = clk->priv;
416 
417 	if (cfg->mux_id == NO_MUX)
418 		panic();
419 
420 	return stm32_mux_set_parent(cfg->mux_id, pidx);
421 }
422 
423 unsigned long clk_stm32_composite_get_rate(struct clk *clk,
424 					   unsigned long parent_rate)
425 {
426 	struct clk_stm32_composite_cfg *cfg = clk->priv;
427 
428 	if (cfg->div_id == NO_DIV)
429 		return parent_rate;
430 
431 	return stm32_div_get_rate(cfg->div_id, parent_rate);
432 }
433 
434 TEE_Result clk_stm32_composite_set_rate(struct clk *clk, unsigned long rate,
435 					unsigned long parent_rate)
436 {
437 	struct clk_stm32_composite_cfg *cfg = clk->priv;
438 
439 	if (cfg->div_id == NO_DIV)
440 		return TEE_SUCCESS;
441 
442 	return stm32_div_set_rate(cfg->div_id, rate, parent_rate);
443 }
444 
445 TEE_Result clk_stm32_composite_gate_enable(struct clk *clk)
446 {
447 	struct clk_stm32_composite_cfg *cfg = clk->priv;
448 
449 	stm32_gate_enable(cfg->gate_id);
450 
451 	return TEE_SUCCESS;
452 }
453 
454 void clk_stm32_composite_gate_disable(struct clk *clk)
455 {
456 	struct clk_stm32_composite_cfg *cfg = clk->priv;
457 
458 	stm32_gate_disable(cfg->gate_id);
459 }
460 
461 const struct clk_ops clk_stm32_composite_ops = {
462 	.get_parent	= clk_stm32_composite_get_parent,
463 	.set_parent	= clk_stm32_composite_set_parent,
464 	.get_rate	= clk_stm32_composite_get_rate,
465 	.set_rate	= clk_stm32_composite_set_rate,
466 	.enable		= clk_stm32_composite_gate_enable,
467 	.disable	= clk_stm32_composite_gate_disable,
468 };
469 
470 TEE_Result clk_stm32_set_parent_by_index(struct clk *clk, size_t pidx)
471 {
472 	struct clk *parent = clk_get_parent_by_index(clk, pidx);
473 	TEE_Result res = TEE_ERROR_GENERIC;
474 
475 	if (parent)
476 		res = clk_set_parent(clk, parent);
477 
478 	return res;
479 }
480 
481 int clk_stm32_parse_fdt_by_name(const void *fdt, int node, const char *name,
482 				uint32_t *tab, uint32_t *nb)
483 {
484 	const fdt32_t *cell = NULL;
485 	int len = 0;
486 	uint32_t i = 0;
487 
488 	cell = fdt_getprop(fdt, node, name, &len);
489 	if (cell)
490 		for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++)
491 			tab[i] = fdt32_to_cpu(cell[i]);
492 
493 	*nb = (uint32_t)len / sizeof(uint32_t);
494 
495 	return 0;
496 }
497 
498 TEE_Result clk_stm32_init(struct clk_stm32_priv *priv, uintptr_t base)
499 {
500 	stm32_clock_data = priv;
501 
502 	priv->base = base;
503 
504 	return TEE_SUCCESS;
505 }
506 
507 static unsigned long fixed_factor_get_rate(struct clk *clk,
508 					   unsigned long parent_rate)
509 {
510 	struct fixed_factor_cfg *d = clk->priv;
511 
512 	unsigned long long rate = (unsigned long long)parent_rate * d->mult;
513 
514 	if (d->div == 0U)
515 		panic("error division by zero");
516 
517 	return (unsigned long)(rate / d->div);
518 };
519 
520 const struct clk_ops clk_fixed_factor_ops = {
521 	.get_rate	= fixed_factor_get_rate,
522 };
523 
524 static unsigned long clk_fixed_get_rate(struct clk *clk,
525 					unsigned long parent_rate __unused)
526 {
527 	struct clk_fixed_rate_cfg *cfg = clk->priv;
528 
529 	return cfg->rate;
530 }
531 
532 const struct clk_ops clk_fixed_clk_ops = {
533 	.get_rate	= clk_fixed_get_rate,
534 };
535 
536 struct clk *stm32mp_rcc_clock_id_to_clk(unsigned long clock_id)
537 {
538 	struct clk_stm32_priv *priv = clk_stm32_get_priv();
539 
540 	if (clock_id > priv->nb_clk_refs)
541 		return NULL;
542 
543 	return priv->clk_refs[clock_id];
544 }
545 
546 static struct clk *stm32mp_clk_dt_get_clk(struct dt_driver_phandle_args *pargs,
547 					  void *data __unused, TEE_Result *res)
548 {
549 	unsigned long clock_id = pargs->args[0];
550 	struct clk *clk = NULL;
551 
552 	*res = TEE_ERROR_BAD_PARAMETERS;
553 
554 	if (pargs->args_count != 1)
555 		return NULL;
556 
557 	clk = stm32mp_rcc_clock_id_to_clk(clock_id);
558 	if (!clk)
559 		return NULL;
560 
561 	*res = TEE_SUCCESS;
562 	return clk;
563 }
564 
565 static void clk_stm32_register_clocks(struct clk_stm32_priv *priv)
566 {
567 	unsigned int i = 0;
568 
569 	for (i = 0; i < priv->nb_clk_refs; i++) {
570 		struct clk *clk = priv->clk_refs[i];
571 
572 		if (!clk)
573 			continue;
574 
575 		refcount_set(&clk->enabled_count, 0);
576 
577 		if (clk_register(clk))
578 			panic();
579 	}
580 
581 	/* Critical clocks management */
582 	for (i = 0; i < priv->nb_clk_refs; i++) {
583 		struct clk *clk = priv->clk_refs[i];
584 
585 		if (!clk)
586 			continue;
587 
588 		if (priv->is_critical && priv->is_critical(clk))
589 			clk_enable(clk);
590 	}
591 }
592 
593 void stm32mp_clk_provider_probe_final(const void *fdt, int node,
594 				      struct clk_stm32_priv *priv)
595 {
596 	TEE_Result res = TEE_ERROR_GENERIC;
597 
598 	clk_stm32_register_clocks(priv);
599 
600 	res = clk_dt_register_clk_provider(fdt, node, stm32mp_clk_dt_get_clk,
601 					   priv);
602 	if (res)
603 		panic("Couldn't register clock provider");
604 }
605