xref: /optee_os/core/drivers/clk/clk.c (revision fab37ad7dc719c507661870039f7bcf91f5a6029)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021, Bootlin
4  * Copyright (c) 2023, STMicroelectronics
5  */
6 
7 #include <config.h>
8 #include <drivers/clk.h>
9 #include <kernel/boot.h>
10 #include <kernel/panic.h>
11 #include <kernel/spinlock.h>
12 #include <malloc.h>
13 #include <stddef.h>
14 #include <stdio.h>
15 
16 /* Global clock tree lock */
17 static unsigned int clk_lock = SPINLOCK_UNLOCK;
18 
19 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
20 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list);
21 #endif
22 
23 struct clk *clk_alloc(const char *name, const struct clk_ops *ops,
24 		      struct clk **parent_clks, size_t parent_count)
25 {
26 	struct clk *clk = NULL;
27 	size_t parent = 0;
28 
29 	clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk));
30 	if (!clk)
31 		return NULL;
32 
33 	clk->num_parents = parent_count;
34 	for (parent = 0; parent < parent_count; parent++)
35 		clk->parents[parent] = parent_clks[parent];
36 
37 	clk->name = name;
38 	clk->ops = ops;
39 	refcount_set(&clk->enabled_count, 0);
40 
41 	return clk;
42 }
43 
44 void clk_free(struct clk *clk)
45 {
46 	free(clk);
47 }
48 
49 static bool __maybe_unused clk_check(struct clk *clk)
50 {
51 	if (!clk || !clk->ops)
52 		return false;
53 
54 	if (clk->ops->set_parent && !clk->ops->get_parent)
55 		return false;
56 
57 	if (clk->num_parents > 1 && !clk->ops->get_parent)
58 		return false;
59 
60 	return true;
61 }
62 
63 static void clk_compute_rate_no_lock(struct clk *clk)
64 {
65 	unsigned long parent_rate = 0;
66 
67 	if (clk->parent)
68 		parent_rate = clk->parent->rate;
69 
70 	if (clk->ops->get_rate)
71 		clk->rate = clk->ops->get_rate(clk, parent_rate);
72 	else
73 		clk->rate = parent_rate;
74 }
75 
76 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx)
77 {
78 	if (pidx >= clk->num_parents)
79 		return NULL;
80 
81 	return clk->parents[pidx];
82 }
83 
84 static void clk_init_parent(struct clk *clk)
85 {
86 	size_t pidx = 0;
87 
88 	switch (clk->num_parents) {
89 	case 0:
90 		break;
91 	case 1:
92 		clk->parent = clk->parents[0];
93 		break;
94 	default:
95 		pidx = clk->ops->get_parent(clk);
96 		assert(pidx < clk->num_parents);
97 
98 		clk->parent = clk->parents[pidx];
99 		break;
100 	}
101 }
102 
103 TEE_Result clk_register(struct clk *clk)
104 {
105 	assert(clk_check(clk));
106 
107 	clk_init_parent(clk);
108 	clk_compute_rate_no_lock(clk);
109 
110 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
111 	SLIST_INSERT_HEAD(&clock_list, clk, link);
112 #endif
113 
114 	DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk));
115 
116 	return TEE_SUCCESS;
117 }
118 
119 static bool clk_is_enabled_no_lock(struct clk *clk)
120 {
121 	return refcount_val(&clk->enabled_count) != 0;
122 }
123 
124 bool clk_is_enabled(struct clk *clk)
125 {
126 	return clk_is_enabled_no_lock(clk);
127 }
128 
129 static void clk_disable_no_lock(struct clk *clk)
130 {
131 	struct clk *parent = NULL;
132 
133 	if (!refcount_dec(&clk->enabled_count))
134 		return;
135 
136 	if (clk->ops->disable)
137 		clk->ops->disable(clk);
138 
139 	parent = clk_get_parent(clk);
140 	if (parent)
141 		clk_disable_no_lock(parent);
142 }
143 
144 static TEE_Result clk_enable_no_lock(struct clk *clk)
145 {
146 	TEE_Result res = TEE_ERROR_GENERIC;
147 	struct clk *parent = NULL;
148 
149 	if (refcount_inc(&clk->enabled_count))
150 		return TEE_SUCCESS;
151 
152 	parent = clk_get_parent(clk);
153 	if (parent) {
154 		res = clk_enable_no_lock(parent);
155 		if (res)
156 			return res;
157 	}
158 
159 	if (clk->ops->enable) {
160 		res = clk->ops->enable(clk);
161 		if (res) {
162 			if (parent)
163 				clk_disable_no_lock(parent);
164 
165 			return res;
166 		}
167 	}
168 
169 	refcount_set(&clk->enabled_count, 1);
170 
171 	return TEE_SUCCESS;
172 }
173 
174 TEE_Result clk_enable(struct clk *clk)
175 {
176 	uint32_t exceptions = 0;
177 	TEE_Result res = TEE_ERROR_GENERIC;
178 
179 	exceptions = cpu_spin_lock_xsave(&clk_lock);
180 	res = clk_enable_no_lock(clk);
181 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
182 
183 	return res;
184 }
185 
186 void clk_disable(struct clk *clk)
187 {
188 	uint32_t exceptions = 0;
189 
190 	exceptions = cpu_spin_lock_xsave(&clk_lock);
191 	clk_disable_no_lock(clk);
192 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
193 }
194 
195 unsigned long clk_get_rate(struct clk *clk)
196 {
197 	return clk->rate;
198 }
199 
200 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate)
201 {
202 	TEE_Result res = TEE_ERROR_GENERIC;
203 	unsigned long parent_rate = 0;
204 
205 	if (clk->parent)
206 		parent_rate = clk_get_rate(clk->parent);
207 
208 	assert(!(clk->flags & CLK_SET_RATE_PARENT) || clk->parent);
209 	if (clk->flags & CLK_SET_RATE_PARENT) {
210 		res = clk_set_rate_no_lock(clk->parent, rate);
211 		if (res)
212 			return res;
213 		rate = clk_get_rate(clk->parent);
214 	}
215 
216 	if (clk->ops->set_rate) {
217 		if (clk->flags & CLK_SET_RATE_UNGATE) {
218 			res = clk_enable_no_lock(clk);
219 			if (res)
220 				return res;
221 		}
222 
223 		res = clk->ops->set_rate(clk, rate, parent_rate);
224 
225 		if (clk->flags & CLK_SET_RATE_UNGATE)
226 			clk_disable_no_lock(clk);
227 
228 		if (res)
229 			return res;
230 	}
231 
232 	clk_compute_rate_no_lock(clk);
233 
234 	return TEE_SUCCESS;
235 }
236 
237 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate)
238 {
239 	uint32_t exceptions = 0;
240 	TEE_Result res = TEE_ERROR_GENERIC;
241 
242 	exceptions =  cpu_spin_lock_xsave(&clk_lock);
243 
244 	if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk))
245 		res = TEE_ERROR_BAD_STATE;
246 	else
247 		res = clk_set_rate_no_lock(clk, rate);
248 
249 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
250 
251 	return res;
252 }
253 
254 struct clk *clk_get_parent(struct clk *clk)
255 {
256 	return clk->parent;
257 }
258 
259 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent,
260 				     size_t *pidx)
261 {
262 	size_t i = 0;
263 
264 	for (i = 0; i < clk_get_num_parents(clk); i++) {
265 		if (clk_get_parent_by_index(clk, i) == parent) {
266 			*pidx = i;
267 			return TEE_SUCCESS;
268 		}
269 	}
270 	EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name);
271 
272 	return TEE_ERROR_BAD_PARAMETERS;
273 }
274 
275 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent,
276 					 size_t pidx)
277 {
278 	TEE_Result res = TEE_ERROR_GENERIC;
279 	bool was_enabled = false;
280 
281 	/* Requested parent is already the one set */
282 	if (clk->parent == parent)
283 		return TEE_SUCCESS;
284 
285 	was_enabled = clk_is_enabled_no_lock(clk);
286 	/* Call is needed to decrement refcount on current parent tree */
287 	if (was_enabled) {
288 		if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
289 			res = clk_enable_no_lock(parent);
290 			if (res)
291 				return res;
292 		}
293 
294 		clk_disable_no_lock(clk);
295 	}
296 
297 	res = clk->ops->set_parent(clk, pidx);
298 	if (res)
299 		goto out;
300 
301 	clk->parent = parent;
302 
303 	/* The parent changed and the rate might also have changed */
304 	clk_compute_rate_no_lock(clk);
305 
306 out:
307 	/* Call is needed to increment refcount on the new parent tree */
308 	if (was_enabled) {
309 		res = clk_enable_no_lock(clk);
310 		if (res)
311 			panic("Failed to re-enable clock after setting parent");
312 
313 		if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
314 			/* Balance refcount when new parent was pre-enabled */
315 			clk_disable_no_lock(parent);
316 		}
317 	}
318 
319 	return res;
320 }
321 
322 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent)
323 {
324 	size_t pidx = 0;
325 	uint32_t exceptions = 0;
326 	TEE_Result res = TEE_ERROR_GENERIC;
327 
328 	if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent)
329 		return TEE_ERROR_BAD_PARAMETERS;
330 
331 	exceptions = cpu_spin_lock_xsave(&clk_lock);
332 	if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) {
333 		res = TEE_ERROR_BAD_STATE;
334 		goto out;
335 	}
336 
337 	res = clk_set_parent_no_lock(clk, parent, pidx);
338 out:
339 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
340 
341 	return res;
342 }
343 
344 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index,
345 			       unsigned long *rates, size_t *nb_elts)
346 {
347 	if (!clk->ops->get_rates_array)
348 		return TEE_ERROR_NOT_SUPPORTED;
349 
350 	return clk->ops->get_rates_array(clk, start_index, rates, nb_elts);
351 }
352 
353 TEE_Result clk_get_rates_steps(struct clk *clk, unsigned long *min,
354 			       unsigned long *max, unsigned long *step)
355 {
356 	if (!clk->ops->get_rates_steps)
357 		return TEE_ERROR_NOT_SUPPORTED;
358 
359 	return clk->ops->get_rates_steps(clk, min, max, step);
360 }
361 
362 TEE_Result clk_get_duty_cycle(struct clk *clk,
363 			      struct clk_duty_cycle *duty_cycle)
364 {
365 	if (clk->ops->get_duty_cycle)
366 		return clk->ops->get_duty_cycle(clk, duty_cycle);
367 
368 	if (clk->parent && (clk->flags & CLK_DUTY_CYCLE_PARENT))
369 		return clk_get_duty_cycle(clk->parent, duty_cycle);
370 
371 	/* Default set 50% duty cycle */
372 	duty_cycle->num = 1;
373 	duty_cycle->den = 2;
374 
375 	return TEE_SUCCESS;
376 }
377 
378 /* Return updated message buffer position of NULL on failure */
379 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...)
380 {
381 	va_list ap = { };
382 	int max_len = end - cur;
383 	int ret = 0;
384 
385 	va_start(ap, fmt);
386 	ret = vsnprintf(cur, max_len, fmt, ap);
387 	va_end(ap);
388 
389 	if (ret < 0 || ret >= max_len)
390 		return NULL;
391 
392 	return cur + ret;
393 }
394 
395 static struct clk *find_next_clk(struct clk *parent __maybe_unused,
396 				 struct clk *sibling __maybe_unused)
397 {
398 	struct clk *clk = NULL;
399 
400 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
401 	if (sibling)
402 		clk = SLIST_NEXT(sibling, link);
403 	else
404 		clk = SLIST_FIRST(&clock_list);
405 
406 	while (clk && clk->parent != parent)
407 		clk = SLIST_NEXT(clk, link);
408 #endif
409 
410 	return clk;
411 }
412 
413 static bool clk_is_parent_last_child(struct clk *clk)
414 {
415 	return !find_next_clk(clk->parent, clk);
416 }
417 
418 static bool indent_last_node_already_found(struct clk *node_clk,
419 					   int node_indent, int cur_indent)
420 {
421 	struct clk *clk = node_clk;
422 	int n = 0;
423 
424 	/* Find parent clock at level @node_indent - @cur_indent - 1 */
425 	for (n = 0; n < node_indent - cur_indent - 1; n++)
426 		clk = clk->parent;
427 
428 	return clk_is_parent_last_child(clk);
429 }
430 
431 static void __maybe_unused print_clk(struct clk *clk, int indent)
432 {
433 	static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" };
434 	int max_unit = ARRAY_SIZE(rate_unit);
435 	unsigned long rate = 0;
436 	char msg_buf[128] = { };
437 	char *msg_end = msg_buf + sizeof(msg_buf);
438 	char *msg = msg_buf;
439 	int n = 0;
440 
441 	/*
442 	 * Currently prints the clock state based on the clock refcount.
443 	 * A future change could print the hardware clock state when
444 	 * related clock driver provides a struct clk_ops::is_enabled handler
445 	 */
446 
447 	if (indent) {
448 		/* Indent for root clock level */
449 		msg = add_msg(msg, msg_end, "   ");
450 		if (!msg)
451 			goto out;
452 
453 		/* Indent for root parent to clock parent levels */
454 		for (n = 0; n < indent - 1; n++) {
455 			if (indent_last_node_already_found(clk, indent, n))
456 				msg = add_msg(msg, msg_end, "    ");
457 			else
458 				msg = add_msg(msg, msg_end, "|   ");
459 
460 			if (!msg)
461 				goto out;
462 		}
463 
464 		/* Clock indentation */
465 		if (clk_is_parent_last_child(clk))
466 			msg = add_msg(msg, msg_end, "`-- ");
467 		else
468 			msg = add_msg(msg, msg_end, "|-- ");
469 	} else {
470 		/* Root clock indentation */
471 		msg = add_msg(msg, msg_end, "o- ");
472 	}
473 	if (!msg)
474 		goto out;
475 
476 	rate = clk_get_rate(clk);
477 	for (n = 1; rate && !(rate % 1000) && n < max_unit; n++)
478 		rate /= 1000;
479 
480 	msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)",
481 		      clk_get_name(clk),
482 		      refcount_val(&clk->enabled_count) ? "on " : "off",
483 		      refcount_val(&clk->enabled_count),
484 		      rate, rate_unit[n - 1]);
485 	if (!msg)
486 		goto out;
487 
488 out:
489 	if (!msg)
490 		snprintf(msg_end - 4, 4, "...");
491 
492 	DMSG("%s", msg_buf);
493 }
494 
495 static void print_tree(void)
496 {
497 	struct clk *clk = NULL;
498 	struct clk *parent = NULL;
499 	struct clk *next = NULL;
500 	int indent = -1;
501 
502 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
503 	if (SLIST_EMPTY(&clock_list)) {
504 		DMSG("-- No registered clock");
505 		return;
506 	}
507 #endif
508 
509 	while (true) {
510 		next = find_next_clk(parent, clk);
511 		if (next) {
512 			print_clk(next, indent + 1);
513 			/* Enter the subtree of the next clock */
514 			parent = next;
515 			indent++;
516 			clk = NULL;
517 		} else {
518 			/*
519 			 * We've processed all children at this level.
520 			 * If parent is NULL we're at the top and are done.
521 			 */
522 			if (!parent)
523 				break;
524 			/*
525 			 * Move up one level to resume with the next
526 			 * child clock of the parent.
527 			 */
528 			clk = parent;
529 			parent = clk->parent;
530 			indent--;
531 		}
532 	}
533 }
534 
535 void clk_print_tree(void)
536 {
537 	if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) &&
538 	    TRACE_LEVEL >= TRACE_DEBUG) {
539 		DMSG("Clock tree summary (informative):");
540 		print_tree();
541 	}
542 }
543