xref: /optee_os/core/drivers/clk/clk.c (revision c95d740ab3604844575dc99dad8bd512781c5d07)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021, Bootlin
4  * Copyright (c) 2023, STMicroelectronics
5  */
6 
7 #include <config.h>
8 #include <drivers/clk.h>
9 #include <kernel/boot.h>
10 #include <kernel/mutex_pm_aware.h>
11 #include <kernel/panic.h>
12 #include <kernel/thread.h>
13 #include <malloc.h>
14 #include <stddef.h>
15 #include <stdio.h>
16 
17 /* Global clock tree access protection complying the power state transitions */
18 static struct mutex_pm_aware mu = MUTEX_PM_AWARE_INITIALIZER;
19 
20 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
21 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list);
22 #endif
23 
24 static void lock_clk(void)
25 {
26 	mutex_pm_aware_lock(&mu);
27 }
28 
29 static void unlock_clk(void)
30 {
31 	mutex_pm_aware_unlock(&mu);
32 }
33 
34 struct clk *clk_alloc(const char *name, const struct clk_ops *ops,
35 		      struct clk **parent_clks, size_t parent_count)
36 {
37 	struct clk *clk = NULL;
38 	size_t parent = 0;
39 
40 	clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk));
41 	if (!clk)
42 		return NULL;
43 
44 	clk->num_parents = parent_count;
45 	for (parent = 0; parent < parent_count; parent++)
46 		clk->parents[parent] = parent_clks[parent];
47 
48 	clk->name = name;
49 	clk->ops = ops;
50 	refcount_set(&clk->enabled_count, 0);
51 
52 	return clk;
53 }
54 
55 void clk_free(struct clk *clk)
56 {
57 	free(clk);
58 }
59 
60 static bool __maybe_unused clk_check(struct clk *clk)
61 {
62 	if (!clk || !clk->ops)
63 		return false;
64 
65 	if (clk->ops->set_parent && !clk->ops->get_parent)
66 		return false;
67 
68 	if (clk->num_parents > 1 && !clk->ops->get_parent)
69 		return false;
70 
71 	return true;
72 }
73 
74 static void clk_compute_rate_no_lock(struct clk *clk)
75 {
76 	unsigned long parent_rate = 0;
77 
78 	if (clk->parent) {
79 		clk_compute_rate_no_lock(clk->parent);
80 		parent_rate = clk->parent->rate;
81 	}
82 
83 	if (clk->ops->get_rate)
84 		clk->rate = clk->ops->get_rate(clk, parent_rate);
85 	else
86 		clk->rate = parent_rate;
87 }
88 
89 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx)
90 {
91 	if (pidx >= clk->num_parents)
92 		return NULL;
93 
94 	return clk->parents[pidx];
95 }
96 
97 static void clk_init_parent(struct clk *clk)
98 {
99 	size_t pidx = 0;
100 
101 	switch (clk->num_parents) {
102 	case 0:
103 		break;
104 	case 1:
105 		clk->parent = clk->parents[0];
106 		break;
107 	default:
108 		pidx = clk->ops->get_parent(clk);
109 		assert(pidx < clk->num_parents);
110 
111 		clk->parent = clk->parents[pidx];
112 		break;
113 	}
114 }
115 
116 TEE_Result clk_register(struct clk *clk)
117 {
118 	assert(clk_check(clk));
119 
120 	clk_init_parent(clk);
121 	clk_compute_rate_no_lock(clk);
122 
123 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
124 	SLIST_INSERT_HEAD(&clock_list, clk, link);
125 #endif
126 
127 	DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk));
128 
129 	return TEE_SUCCESS;
130 }
131 
132 static bool clk_is_enabled_no_lock(struct clk *clk)
133 {
134 	return refcount_val(&clk->enabled_count) != 0;
135 }
136 
137 bool clk_is_enabled(struct clk *clk)
138 {
139 	return clk_is_enabled_no_lock(clk);
140 }
141 
142 static void clk_disable_no_lock(struct clk *clk)
143 {
144 	struct clk *parent = NULL;
145 
146 	if (!refcount_dec(&clk->enabled_count))
147 		return;
148 
149 	if (clk->ops->disable)
150 		clk->ops->disable(clk);
151 
152 	parent = clk_get_parent(clk);
153 	if (parent)
154 		clk_disable_no_lock(parent);
155 }
156 
157 static TEE_Result clk_enable_no_lock(struct clk *clk)
158 {
159 	TEE_Result res = TEE_ERROR_GENERIC;
160 	struct clk *parent = NULL;
161 
162 	if (refcount_inc(&clk->enabled_count))
163 		return TEE_SUCCESS;
164 
165 	parent = clk_get_parent(clk);
166 	if (parent) {
167 		res = clk_enable_no_lock(parent);
168 		if (res)
169 			return res;
170 	}
171 
172 	if (clk->ops->enable) {
173 		res = clk->ops->enable(clk);
174 		if (res) {
175 			if (parent)
176 				clk_disable_no_lock(parent);
177 
178 			return res;
179 		}
180 	}
181 
182 	refcount_set(&clk->enabled_count, 1);
183 
184 	return TEE_SUCCESS;
185 }
186 
187 TEE_Result clk_enable(struct clk *clk)
188 {
189 	TEE_Result res = TEE_ERROR_GENERIC;
190 
191 	lock_clk();
192 	res = clk_enable_no_lock(clk);
193 	unlock_clk();
194 
195 	return res;
196 }
197 
198 void clk_disable(struct clk *clk)
199 {
200 	lock_clk();
201 	clk_disable_no_lock(clk);
202 	unlock_clk();
203 }
204 
205 unsigned long clk_get_rate(struct clk *clk)
206 {
207 	clk_compute_rate_no_lock(clk);
208 
209 	return clk->rate;
210 }
211 
212 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate)
213 {
214 	TEE_Result res = TEE_ERROR_GENERIC;
215 	unsigned long parent_rate = 0;
216 
217 	if (clk->parent)
218 		parent_rate = clk_get_rate(clk->parent);
219 
220 	assert(!(clk->flags & CLK_SET_RATE_PARENT) || clk->parent);
221 	if (clk->flags & CLK_SET_RATE_PARENT) {
222 		res = clk_set_rate_no_lock(clk->parent, rate);
223 		if (res)
224 			return res;
225 		rate = clk_get_rate(clk->parent);
226 	}
227 
228 	if (clk->ops->set_rate) {
229 		if (clk->flags & CLK_SET_RATE_UNGATE) {
230 			res = clk_enable_no_lock(clk);
231 			if (res)
232 				return res;
233 		}
234 
235 		res = clk->ops->set_rate(clk, rate, parent_rate);
236 
237 		if (clk->flags & CLK_SET_RATE_UNGATE)
238 			clk_disable_no_lock(clk);
239 
240 		if (res)
241 			return res;
242 	}
243 
244 	clk_compute_rate_no_lock(clk);
245 
246 	return TEE_SUCCESS;
247 }
248 
249 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate)
250 {
251 	TEE_Result res = TEE_ERROR_GENERIC;
252 
253 	lock_clk();
254 
255 	if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk))
256 		res = TEE_ERROR_BAD_STATE;
257 	else
258 		res = clk_set_rate_no_lock(clk, rate);
259 
260 	unlock_clk();
261 
262 	return res;
263 }
264 
265 struct clk *clk_get_parent(struct clk *clk)
266 {
267 	return clk->parent;
268 }
269 
270 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent,
271 				     size_t *pidx)
272 {
273 	size_t i = 0;
274 
275 	for (i = 0; i < clk_get_num_parents(clk); i++) {
276 		if (clk_get_parent_by_index(clk, i) == parent) {
277 			*pidx = i;
278 			return TEE_SUCCESS;
279 		}
280 	}
281 	EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name);
282 
283 	return TEE_ERROR_BAD_PARAMETERS;
284 }
285 
286 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent,
287 					 size_t pidx)
288 {
289 	TEE_Result res = TEE_ERROR_GENERIC;
290 	bool was_enabled = false;
291 
292 	/* Requested parent is already the one set */
293 	if (clk->parent == parent)
294 		return TEE_SUCCESS;
295 
296 	was_enabled = clk_is_enabled_no_lock(clk);
297 	/* Call is needed to decrement refcount on current parent tree */
298 	if (was_enabled) {
299 		if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
300 			res = clk_enable_no_lock(parent);
301 			if (res)
302 				return res;
303 		}
304 
305 		clk_disable_no_lock(clk);
306 	}
307 
308 	res = clk->ops->set_parent(clk, pidx);
309 	if (res)
310 		goto out;
311 
312 	clk->parent = parent;
313 
314 	/* The parent changed and the rate might also have changed */
315 	clk_compute_rate_no_lock(clk);
316 
317 out:
318 	/* Call is needed to increment refcount on the new parent tree */
319 	if (was_enabled) {
320 		res = clk_enable_no_lock(clk);
321 		if (res)
322 			panic("Failed to re-enable clock after setting parent");
323 
324 		if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
325 			/* Balance refcount when new parent was pre-enabled */
326 			clk_disable_no_lock(parent);
327 		}
328 	}
329 
330 	return res;
331 }
332 
333 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent)
334 {
335 	size_t pidx = 0;
336 	TEE_Result res = TEE_ERROR_GENERIC;
337 
338 	if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent)
339 		return TEE_ERROR_BAD_PARAMETERS;
340 
341 	lock_clk();
342 	if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) {
343 		res = TEE_ERROR_BAD_STATE;
344 		goto out;
345 	}
346 
347 	res = clk_set_parent_no_lock(clk, parent, pidx);
348 out:
349 	unlock_clk();
350 
351 	return res;
352 }
353 
354 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index,
355 			       unsigned long *rates, size_t *nb_elts)
356 {
357 	if (!clk->ops->get_rates_array)
358 		return TEE_ERROR_NOT_SUPPORTED;
359 
360 	return clk->ops->get_rates_array(clk, start_index, rates, nb_elts);
361 }
362 
363 TEE_Result clk_get_rates_steps(struct clk *clk, unsigned long *min,
364 			       unsigned long *max, unsigned long *step)
365 {
366 	if (!clk->ops->get_rates_steps)
367 		return TEE_ERROR_NOT_SUPPORTED;
368 
369 	return clk->ops->get_rates_steps(clk, min, max, step);
370 }
371 
372 TEE_Result clk_get_duty_cycle(struct clk *clk,
373 			      struct clk_duty_cycle *duty_cycle)
374 {
375 	if (clk->ops->get_duty_cycle)
376 		return clk->ops->get_duty_cycle(clk, duty_cycle);
377 
378 	if (clk->parent && (clk->flags & CLK_DUTY_CYCLE_PARENT))
379 		return clk_get_duty_cycle(clk->parent, duty_cycle);
380 
381 	/* Default set 50% duty cycle */
382 	duty_cycle->num = 1;
383 	duty_cycle->den = 2;
384 
385 	return TEE_SUCCESS;
386 }
387 
388 /* Return updated message buffer position of NULL on failure */
389 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...)
390 {
391 	va_list ap = { };
392 	int max_len = end - cur;
393 	int ret = 0;
394 
395 	va_start(ap, fmt);
396 	ret = vsnprintf(cur, max_len, fmt, ap);
397 	va_end(ap);
398 
399 	if (ret < 0 || ret >= max_len)
400 		return NULL;
401 
402 	return cur + ret;
403 }
404 
405 static struct clk *find_next_clk(struct clk *parent __maybe_unused,
406 				 struct clk *sibling __maybe_unused)
407 {
408 	struct clk *clk = NULL;
409 
410 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
411 	if (sibling)
412 		clk = SLIST_NEXT(sibling, link);
413 	else
414 		clk = SLIST_FIRST(&clock_list);
415 
416 	while (clk && clk->parent != parent)
417 		clk = SLIST_NEXT(clk, link);
418 #endif
419 
420 	return clk;
421 }
422 
423 static bool clk_is_parent_last_child(struct clk *clk)
424 {
425 	return !find_next_clk(clk->parent, clk);
426 }
427 
428 static bool indent_last_node_already_found(struct clk *node_clk,
429 					   int node_indent, int cur_indent)
430 {
431 	struct clk *clk = node_clk;
432 	int n = 0;
433 
434 	/* Find parent clock at level @node_indent - @cur_indent - 1 */
435 	for (n = 0; n < node_indent - cur_indent - 1; n++)
436 		clk = clk->parent;
437 
438 	return clk_is_parent_last_child(clk);
439 }
440 
441 static void __maybe_unused print_clk(struct clk *clk, int indent)
442 {
443 	static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" };
444 	int max_unit = ARRAY_SIZE(rate_unit);
445 	unsigned long rate = 0;
446 	char msg_buf[128] = { };
447 	char *msg_end = msg_buf + sizeof(msg_buf);
448 	char *msg = msg_buf;
449 	int n = 0;
450 
451 	/*
452 	 * Currently prints the clock state based on the clock refcount.
453 	 * A future change could print the hardware clock state when
454 	 * related clock driver provides a struct clk_ops::is_enabled handler
455 	 */
456 
457 	if (indent) {
458 		/* Indent for root clock level */
459 		msg = add_msg(msg, msg_end, "   ");
460 		if (!msg)
461 			goto out;
462 
463 		/* Indent for root parent to clock parent levels */
464 		for (n = 0; n < indent - 1; n++) {
465 			if (indent_last_node_already_found(clk, indent, n))
466 				msg = add_msg(msg, msg_end, "    ");
467 			else
468 				msg = add_msg(msg, msg_end, "|   ");
469 
470 			if (!msg)
471 				goto out;
472 		}
473 
474 		/* Clock indentation */
475 		if (clk_is_parent_last_child(clk))
476 			msg = add_msg(msg, msg_end, "`-- ");
477 		else
478 			msg = add_msg(msg, msg_end, "|-- ");
479 	} else {
480 		/* Root clock indentation */
481 		msg = add_msg(msg, msg_end, "o- ");
482 	}
483 	if (!msg)
484 		goto out;
485 
486 	rate = clk_get_rate(clk);
487 	for (n = 1; rate && !(rate % 1000) && n < max_unit; n++)
488 		rate /= 1000;
489 
490 	msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)",
491 		      clk_get_name(clk),
492 		      refcount_val(&clk->enabled_count) ? "on " : "off",
493 		      refcount_val(&clk->enabled_count),
494 		      rate, rate_unit[n - 1]);
495 	if (!msg)
496 		goto out;
497 
498 out:
499 	if (!msg)
500 		snprintf(msg_end - 4, 4, "...");
501 
502 	IMSG("%s", msg_buf);
503 }
504 
505 static void print_tree(void)
506 {
507 	struct clk *clk = NULL;
508 	struct clk *parent = NULL;
509 	struct clk *next = NULL;
510 	int indent = -1;
511 
512 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
513 	if (SLIST_EMPTY(&clock_list)) {
514 		IMSG("-- No registered clock");
515 		return;
516 	}
517 #endif
518 
519 	while (true) {
520 		next = find_next_clk(parent, clk);
521 		if (next) {
522 			print_clk(next, indent + 1);
523 			/* Enter the subtree of the next clock */
524 			parent = next;
525 			indent++;
526 			clk = NULL;
527 		} else {
528 			/*
529 			 * We've processed all children at this level.
530 			 * If parent is NULL we're at the top and are done.
531 			 */
532 			if (!parent)
533 				break;
534 			/*
535 			 * Move up one level to resume with the next
536 			 * child clock of the parent.
537 			 */
538 			clk = parent;
539 			parent = clk->parent;
540 			indent--;
541 		}
542 	}
543 }
544 
545 void clk_print_tree(void)
546 {
547 	if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) &&
548 	    TRACE_LEVEL >= TRACE_INFO) {
549 		IMSG("Clock tree summary (informative):");
550 		print_tree();
551 	}
552 }
553