xref: /optee_os/core/drivers/clk/clk.c (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021, Bootlin
4  * Copyright (c) 2023, STMicroelectronics
5  */
6 
7 #include <config.h>
8 #include <drivers/clk.h>
9 #include <kernel/boot.h>
10 #include <kernel/panic.h>
11 #include <kernel/spinlock.h>
12 #include <libfdt.h>
13 #include <malloc.h>
14 #include <stddef.h>
15 #include <stdio.h>
16 
17 /* Global clock tree lock */
18 static unsigned int clk_lock = SPINLOCK_UNLOCK;
19 
20 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
21 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list);
22 #endif
23 
24 struct clk *clk_alloc(const char *name, const struct clk_ops *ops,
25 		      struct clk **parent_clks, size_t parent_count)
26 {
27 	struct clk *clk = NULL;
28 	size_t parent = 0;
29 
30 	clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk));
31 	if (!clk)
32 		return NULL;
33 
34 	clk->num_parents = parent_count;
35 	for (parent = 0; parent < parent_count; parent++)
36 		clk->parents[parent] = parent_clks[parent];
37 
38 	clk->name = name;
39 	clk->ops = ops;
40 	refcount_set(&clk->enabled_count, 0);
41 
42 	return clk;
43 }
44 
45 void clk_free(struct clk *clk)
46 {
47 	free(clk);
48 }
49 
50 static bool __maybe_unused clk_check(struct clk *clk)
51 {
52 	if (!clk || !clk->ops)
53 		return false;
54 
55 	if (clk->ops->set_parent && !clk->ops->get_parent)
56 		return false;
57 
58 	if (clk->num_parents > 1 && !clk->ops->get_parent)
59 		return false;
60 
61 	return true;
62 }
63 
64 static void clk_compute_rate_no_lock(struct clk *clk)
65 {
66 	unsigned long parent_rate = 0;
67 
68 	if (clk->parent)
69 		parent_rate = clk->parent->rate;
70 
71 	if (clk->ops->get_rate)
72 		clk->rate = clk->ops->get_rate(clk, parent_rate);
73 	else
74 		clk->rate = parent_rate;
75 }
76 
77 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx)
78 {
79 	if (pidx >= clk->num_parents)
80 		return NULL;
81 
82 	return clk->parents[pidx];
83 }
84 
85 static void clk_init_parent(struct clk *clk)
86 {
87 	size_t pidx = 0;
88 
89 	switch (clk->num_parents) {
90 	case 0:
91 		break;
92 	case 1:
93 		clk->parent = clk->parents[0];
94 		break;
95 	default:
96 		pidx = clk->ops->get_parent(clk);
97 		assert(pidx < clk->num_parents);
98 
99 		clk->parent = clk->parents[pidx];
100 		break;
101 	}
102 }
103 
104 TEE_Result clk_register(struct clk *clk)
105 {
106 	assert(clk_check(clk));
107 
108 	clk_init_parent(clk);
109 	clk_compute_rate_no_lock(clk);
110 
111 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
112 	SLIST_INSERT_HEAD(&clock_list, clk, link);
113 #endif
114 
115 	DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk));
116 
117 	return TEE_SUCCESS;
118 }
119 
120 static bool clk_is_enabled_no_lock(struct clk *clk)
121 {
122 	return refcount_val(&clk->enabled_count) != 0;
123 }
124 
125 bool clk_is_enabled(struct clk *clk)
126 {
127 	return clk_is_enabled_no_lock(clk);
128 }
129 
130 static void clk_disable_no_lock(struct clk *clk)
131 {
132 	struct clk *parent = NULL;
133 
134 	if (!refcount_dec(&clk->enabled_count))
135 		return;
136 
137 	if (clk->ops->disable)
138 		clk->ops->disable(clk);
139 
140 	parent = clk_get_parent(clk);
141 	if (parent)
142 		clk_disable_no_lock(parent);
143 }
144 
145 static TEE_Result clk_enable_no_lock(struct clk *clk)
146 {
147 	TEE_Result res = TEE_ERROR_GENERIC;
148 	struct clk *parent = NULL;
149 
150 	if (refcount_inc(&clk->enabled_count))
151 		return TEE_SUCCESS;
152 
153 	parent = clk_get_parent(clk);
154 	if (parent) {
155 		res = clk_enable_no_lock(parent);
156 		if (res)
157 			return res;
158 	}
159 
160 	if (clk->ops->enable) {
161 		res = clk->ops->enable(clk);
162 		if (res) {
163 			if (parent)
164 				clk_disable_no_lock(parent);
165 
166 			return res;
167 		}
168 	}
169 
170 	refcount_set(&clk->enabled_count, 1);
171 
172 	return TEE_SUCCESS;
173 }
174 
175 TEE_Result clk_enable(struct clk *clk)
176 {
177 	uint32_t exceptions = 0;
178 	TEE_Result res = TEE_ERROR_GENERIC;
179 
180 	exceptions = cpu_spin_lock_xsave(&clk_lock);
181 	res = clk_enable_no_lock(clk);
182 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
183 
184 	return res;
185 }
186 
187 void clk_disable(struct clk *clk)
188 {
189 	uint32_t exceptions = 0;
190 
191 	exceptions = cpu_spin_lock_xsave(&clk_lock);
192 	clk_disable_no_lock(clk);
193 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
194 }
195 
196 unsigned long clk_get_rate(struct clk *clk)
197 {
198 	return clk->rate;
199 }
200 
201 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate)
202 {
203 	TEE_Result res = TEE_ERROR_GENERIC;
204 	unsigned long parent_rate = 0;
205 
206 	if (clk->parent)
207 		parent_rate = clk_get_rate(clk->parent);
208 
209 	res = clk->ops->set_rate(clk, rate, parent_rate);
210 	if (res)
211 		return res;
212 
213 	clk_compute_rate_no_lock(clk);
214 
215 	return TEE_SUCCESS;
216 }
217 
218 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate)
219 {
220 	uint32_t exceptions = 0;
221 	TEE_Result res = TEE_ERROR_GENERIC;
222 
223 	if (!clk->ops->set_rate)
224 		return TEE_ERROR_NOT_SUPPORTED;
225 
226 	exceptions =  cpu_spin_lock_xsave(&clk_lock);
227 
228 	if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk))
229 		res = TEE_ERROR_BAD_STATE;
230 	else
231 		res = clk_set_rate_no_lock(clk, rate);
232 
233 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
234 
235 	return res;
236 }
237 
238 struct clk *clk_get_parent(struct clk *clk)
239 {
240 	return clk->parent;
241 }
242 
243 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent,
244 				     size_t *pidx)
245 {
246 	size_t i = 0;
247 
248 	for (i = 0; i < clk_get_num_parents(clk); i++) {
249 		if (clk_get_parent_by_index(clk, i) == parent) {
250 			*pidx = i;
251 			return TEE_SUCCESS;
252 		}
253 	}
254 	EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name);
255 
256 	return TEE_ERROR_BAD_PARAMETERS;
257 }
258 
259 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent,
260 					 size_t pidx)
261 {
262 	TEE_Result res = TEE_ERROR_GENERIC;
263 	bool was_enabled = false;
264 
265 	/* Requested parent is already the one set */
266 	if (clk->parent == parent)
267 		return TEE_SUCCESS;
268 
269 	was_enabled = clk_is_enabled_no_lock(clk);
270 	/* Call is needed to decrement refcount on current parent tree */
271 	if (was_enabled)
272 		clk_disable_no_lock(clk);
273 
274 	res = clk->ops->set_parent(clk, pidx);
275 	if (res)
276 		goto out;
277 
278 	clk->parent = parent;
279 
280 	/* The parent changed and the rate might also have changed */
281 	clk_compute_rate_no_lock(clk);
282 
283 out:
284 	/* Call is needed to increment refcount on the new parent tree */
285 	if (was_enabled) {
286 		res = clk_enable_no_lock(clk);
287 		if (res)
288 			panic("Failed to re-enable clock after setting parent");
289 	}
290 
291 	return res;
292 }
293 
294 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent)
295 {
296 	size_t pidx = 0;
297 	uint32_t exceptions = 0;
298 	TEE_Result res = TEE_ERROR_GENERIC;
299 
300 	if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent)
301 		return TEE_ERROR_BAD_PARAMETERS;
302 
303 	exceptions = cpu_spin_lock_xsave(&clk_lock);
304 	if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) {
305 		res = TEE_ERROR_BAD_STATE;
306 		goto out;
307 	}
308 
309 	res = clk_set_parent_no_lock(clk, parent, pidx);
310 out:
311 	cpu_spin_unlock_xrestore(&clk_lock, exceptions);
312 
313 	return res;
314 }
315 
316 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index,
317 			       unsigned long *rates, size_t *nb_elts)
318 {
319 	if (!clk->ops->get_rates_array)
320 		return TEE_ERROR_NOT_SUPPORTED;
321 
322 	return clk->ops->get_rates_array(clk, start_index, rates, nb_elts);
323 }
324 
325 /* Return updated message buffer position of NULL on failure */
326 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...)
327 {
328 	va_list ap = { };
329 	int max_len = end - cur;
330 	int ret = 0;
331 
332 	va_start(ap, fmt);
333 	ret = vsnprintf(cur, max_len, fmt, ap);
334 	va_end(ap);
335 
336 	if (ret < 0 || ret >= max_len)
337 		return NULL;
338 
339 	return cur + ret;
340 }
341 
342 static struct clk *find_next_clk(struct clk *parent __maybe_unused,
343 				 struct clk *sibling __maybe_unused)
344 {
345 	struct clk *clk = NULL;
346 
347 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
348 	if (sibling)
349 		clk = SLIST_NEXT(sibling, link);
350 	else
351 		clk = SLIST_FIRST(&clock_list);
352 
353 	while (clk && clk->parent != parent)
354 		clk = SLIST_NEXT(clk, link);
355 #endif
356 
357 	return clk;
358 }
359 
360 static bool clk_is_parent_last_child(struct clk *clk)
361 {
362 	return !find_next_clk(clk->parent, clk);
363 }
364 
365 static bool indent_last_node_already_found(struct clk *node_clk,
366 					   int node_indent, int cur_indent)
367 {
368 	struct clk *clk = node_clk;
369 	int n = 0;
370 
371 	/* Find parent clock at level @node_indent - @cur_indent - 1 */
372 	for (n = 0; n < node_indent - cur_indent - 1; n++)
373 		clk = clk->parent;
374 
375 	return clk_is_parent_last_child(clk);
376 }
377 
378 static void __maybe_unused print_clk(struct clk *clk, int indent)
379 {
380 	static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" };
381 	int max_unit = ARRAY_SIZE(rate_unit);
382 	unsigned long rate = 0;
383 	char msg_buf[128] = { };
384 	char *msg_end = msg_buf + sizeof(msg_buf);
385 	char *msg = msg_buf;
386 	int n = 0;
387 
388 	/*
389 	 * Currently prints the clock state based on the clock refcount.
390 	 * A future change could print the hardware clock state when
391 	 * related clock driver provides a struct clk_ops::is_enabled handler
392 	 */
393 
394 	if (indent) {
395 		/* Indent for root clock level */
396 		msg = add_msg(msg, msg_end, "   ");
397 		if (!msg)
398 			goto out;
399 
400 		/* Indent for root parent to clock parent levels */
401 		for (n = 0; n < indent - 1; n++) {
402 			if (indent_last_node_already_found(clk, indent, n))
403 				msg = add_msg(msg, msg_end, "    ");
404 			else
405 				msg = add_msg(msg, msg_end, "|   ");
406 
407 			if (!msg)
408 				goto out;
409 		}
410 
411 		/* Clock indentation */
412 		if (clk_is_parent_last_child(clk))
413 			msg = add_msg(msg, msg_end, "`-- ");
414 		else
415 			msg = add_msg(msg, msg_end, "|-- ");
416 	} else {
417 		/* Root clock indentation */
418 		msg = add_msg(msg, msg_end, "o- ");
419 	}
420 	if (!msg)
421 		goto out;
422 
423 	rate = clk_get_rate(clk);
424 	for (n = 1; rate && !(rate % 1000) && n < max_unit; n++)
425 		rate /= 1000;
426 
427 	msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)",
428 		      clk_get_name(clk),
429 		      refcount_val(&clk->enabled_count) ? "on " : "off",
430 		      refcount_val(&clk->enabled_count),
431 		      rate, rate_unit[n - 1]);
432 	if (!msg)
433 		goto out;
434 
435 out:
436 	if (!msg)
437 		snprintf(msg_end - 4, 4, "...");
438 
439 	DMSG("%s", msg_buf);
440 }
441 
442 static void print_tree(void)
443 {
444 	struct clk *clk = NULL;
445 	struct clk *parent = NULL;
446 	struct clk *next = NULL;
447 	int indent = -1;
448 
449 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
450 	if (SLIST_EMPTY(&clock_list)) {
451 		DMSG("-- No registered clock");
452 		return;
453 	}
454 #endif
455 
456 	while (true) {
457 		next = find_next_clk(parent, clk);
458 		if (next) {
459 			print_clk(next, indent + 1);
460 			/* Enter the subtree of the next clock */
461 			parent = next;
462 			indent++;
463 			clk = NULL;
464 		} else {
465 			/*
466 			 * We've processed all children at this level.
467 			 * If parent is NULL we're at the top and are done.
468 			 */
469 			if (!parent)
470 				break;
471 			/*
472 			 * Move up one level to resume with the next
473 			 * child clock of the parent.
474 			 */
475 			clk = parent;
476 			parent = clk->parent;
477 			indent--;
478 		}
479 	}
480 }
481 
482 void clk_print_tree(void)
483 {
484 	if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) &&
485 	    TRACE_LEVEL >= TRACE_DEBUG) {
486 		DMSG("Clock tree summary (informative):");
487 		print_tree();
488 	}
489 }
490