xref: /rk3399_ARM-atf/drivers/ti/clk/ti_clk_div.c (revision a28114d66a6d43db4accef5fd5d6dab6c059e584)
1 /*
2  * Copyright (c) 2025-2026 Texas Instruments Incorporated - https://www.ti.com
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 /*
8  * TI Clock Divider Driver
9  *
10  * This driver implements programmable clock dividers that divide an input
11  * clock frequency by a configurable divisor. It handles divider value
12  * programming via register writes, validates divisor values, propagates
13  * frequency changes to parent clocks when requested, and supports both
14  * power-of-2 and arbitrary integer division ratios.
15  */
16 
17 #include <assert.h>
18 #include <errno.h>
19 
20 #include <lib/mmio.h>
21 #include <ti_clk_div.h>
22 #include <ti_clk_mux.h>
23 #include <ti_container_of.h>
24 
ti_clk_get_div(struct ti_clk * clkp)25 uint32_t ti_clk_get_div(struct ti_clk *clkp)
26 {
27 	const struct ti_clk_drv_div *divp;
28 
29 	assert(clkp != NULL);
30 
31 	if (clkp->type != TI_CLK_TYPE_DIV) {
32 		return 1U;
33 	}
34 
35 	divp = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
36 	return divp->get_div(clkp);
37 }
38 
ti_clk_div_set_freq_dyn_parent(struct ti_clk * clkp,uint32_t target_hz,uint32_t min_hz,uint32_t max_hz,bool * changed)39 static uint32_t ti_clk_div_set_freq_dyn_parent(struct ti_clk *clkp, uint32_t target_hz,
40 					       uint32_t min_hz, uint32_t max_hz,
41 					       bool *changed)
42 {
43 	const struct ti_clk_parent *p;
44 	const struct ti_clk_data_div *data_div;
45 	const struct ti_clk_drv_div *drv_div;
46 	uint32_t old_div;
47 	uint32_t best_div = 0U;
48 	uint32_t best_parent_freq = 0U;
49 	uint32_t updated_min_hz = min_hz;
50 	uint32_t updated_max_hz = max_hz;
51 	bool best_changed = false;
52 	uint32_t min_delta = (uint32_t) UINT_MAX;
53 	uint32_t new_target, new_min, new_max, new_parent_freq;
54 	uint32_t delta;
55 	uint32_t divider;
56 	bool new_target_overflow;
57 	uint32_t i;
58 	uint32_t ret;
59 	uint32_t new_parent_freq_div;
60 	uint32_t hz;
61 	struct ti_clk *parent = NULL;
62 
63 	assert(clkp != NULL);
64 	assert(changed != NULL);
65 
66 	*changed = false;
67 
68 	p = ti_clk_mux_get_parent(clkp);
69 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div, data);
70 	drv_div = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
71 
72 	/* p and ti_clk_lookup verified by caller */
73 	if (p != NULL) {
74 		parent = ti_clk_lookup((ti_clk_idx_t) p->clk);
75 	}
76 
77 	old_div = drv_div->get_div(clkp);
78 
79 	for (i = 1U; (i <= data_div->max_div) && (p != NULL) && (min_delta != 0U); i++) {
80 		if ((drv_div->valid_div != NULL) && !drv_div->valid_div(clkp, i)) {
81 			continue;
82 		}
83 
84 		divider = i * p->div;
85 
86 		/* Make sure target fits within out clock frequency type */
87 		if ((UINT_MAX / divider) < min_hz) {
88 			continue;
89 		}
90 
91 		new_min = updated_min_hz * divider;
92 		new_target = target_hz * divider;
93 		new_max = updated_max_hz * divider;
94 
95 		/* If an overflow occurs in min, we are outside the range */
96 		if (new_min < updated_min_hz) {
97 			break;
98 		}
99 
100 		/* Cap overflow in target */
101 		if (new_target < target_hz) {
102 			if (best_div != 0U) {
103 				/*
104 				 * Already found a working combination, don't
105 				 * bother with target overflows.
106 				 */
107 				break;
108 			}
109 			new_target = UINT_MAX;
110 			new_target_overflow = true;
111 		} else {
112 			new_target_overflow = false;
113 		}
114 
115 		/* Cap overflow in max */
116 		if (new_max < updated_max_hz) {
117 			new_max = UINT_MAX;
118 		}
119 		if (parent != NULL) {
120 			new_parent_freq = ti_clk_set_freq(parent, new_target,
121 							  new_min, new_max, changed);
122 		} else {
123 			new_parent_freq = 0U;
124 		}
125 
126 		if (new_parent_freq == 0U) {
127 			continue;
128 		}
129 
130 		if (new_target_overflow) {
131 			/*
132 			 * The target frequency was capped to UINT_MAX, our
133 			 * delta will be slightly less accurate.
134 			 */
135 			new_parent_freq_div = new_parent_freq / divider;
136 			if (new_parent_freq_div > target_hz) {
137 				delta = new_parent_freq_div - target_hz;
138 			} else {
139 				delta = target_hz - new_parent_freq_div;
140 			}
141 		} else {
142 			if (new_parent_freq > new_target) {
143 				delta = new_parent_freq - new_target;
144 			} else {
145 				delta = new_target - new_parent_freq;
146 			}
147 			delta /= divider;
148 		}
149 		if (delta >= min_delta) {
150 			continue;
151 		}
152 
153 		min_delta = delta;
154 		best_div = i;
155 		best_changed = *changed;
156 		best_parent_freq = new_parent_freq;
157 
158 		/*
159 		 * Tighten min/max to decrease search space.
160 		 * Any new frequency must be an improvement by at least 1Hz.
161 		 * Note that we stop searching when min_delta reaches zero.
162 		 * Ensure that subtraction for min and addition for max do
163 		 * not overflow.
164 		 */
165 		if (min_delta != 0U) {
166 			hz = target_hz - (min_delta - 1U);
167 			if ((hz <= target_hz) && (updated_min_hz < hz)) {
168 				updated_min_hz = hz;
169 			}
170 
171 			hz = target_hz + (min_delta - 1U);
172 			if ((hz >= target_hz) && (updated_max_hz > hz)) {
173 				updated_max_hz = hz;
174 			}
175 		}
176 	}
177 
178 	if (best_div != 0U) {
179 		/* Actually program our own register */
180 		if (old_div != best_div) {
181 			(void)drv_div->set_div(clkp, best_div);
182 		}
183 
184 		*changed = best_changed || (old_div != best_div);
185 		ret = best_parent_freq / (best_div * p->div);
186 	} else {
187 		ret = 0U;
188 	}
189 
190 	return ret;
191 }
192 
ti_clk_div_set_freq_static_parent(struct ti_clk * clkp,uint32_t target_hz,uint32_t min_hz,uint32_t max_hz,bool * changed __maybe_unused)193 uint32_t ti_clk_div_set_freq_static_parent(struct ti_clk *clkp, uint32_t target_hz,
194 					   uint32_t min_hz, uint32_t max_hz,
195 					   bool *changed __maybe_unused)
196 {
197 	const struct ti_clk_data_div *data_div;
198 	const struct ti_clk_drv_div *drv_div;
199 	uint32_t parent_freq_hz;
200 	uint32_t div0, div1;
201 	bool div0_ok, div1_ok;
202 	uint32_t div0_delta = 0U, div1_delta = 0U;
203 	uint32_t div0_hz, div1_hz;
204 	uint32_t n;
205 
206 	assert(clkp != NULL);
207 
208 	parent_freq_hz = ti_clk_get_parent_freq(clkp);
209 
210 	/* Calculate 2 best potential frequencies */
211 	div0 = parent_freq_hz / target_hz;
212 
213 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div, data);
214 	drv_div = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
215 
216 	n = data_div->max_div;
217 
218 	/*
219 	 * Prevent out-of-bounds divider value. Rest of the code in the
220 	 * function will check if the resulting divider value is within
221 	 * the allowable min/max range.
222 	 */
223 	if (div0 > (n - 1U)) {
224 		div0 = n - 1U;
225 	}
226 
227 	div1 = div0 + 1U;
228 
229 	if (drv_div->valid_div != NULL) {
230 		for (; (div0 > 0UL) && !drv_div->valid_div(clkp, div0); div0--) {
231 			/* Step through loop until valid div is found */
232 		}
233 
234 		for (; (div1 <= n) && !drv_div->valid_div(clkp, div1); div1++) {
235 			/* Step through loop until valid div is found */
236 		}
237 	}
238 
239 	div0_ok = false;
240 	div0_hz = 0U;
241 	if (div0 != 0UL) {
242 		div0_hz = parent_freq_hz / div0;
243 		/* Check for in range */
244 		if (div0_hz <= max_hz) {
245 			div0_ok = true;
246 			div0_delta = div0_hz - target_hz;
247 		} else {
248 			div0_hz = 0U;
249 		}
250 	}
251 
252 	div1_ok = false;
253 	div1_hz = 0U;
254 	if (div1 <= n) {
255 		div1_hz = parent_freq_hz / div1;
256 		if (div1_hz >= min_hz) {
257 			div1_ok = true;
258 			div1_delta = target_hz - div1_hz;
259 		} else {
260 			div1_hz = 0U;
261 		}
262 	}
263 
264 	/* Make sure at least one of them is acceptable */
265 	if ((div1_ok != 0U) && ((div0_ok == 0U) || (div1_delta < div0_delta))) {
266 		div0_ok = true;
267 		div0 = div1;
268 		div0_hz = div1_hz;
269 	}
270 
271 	if (div0_ok != 0U) {
272 		/* Actually program our own register */
273 		(void)drv_div->set_div(clkp, div0);
274 	}
275 
276 	return div0_hz;
277 }
278 
ti_clk_div_set_freq(struct ti_clk * clkp,uint32_t target_hz,uint32_t min_hz,uint32_t max_hz,bool * changed)279 uint32_t ti_clk_div_set_freq(struct ti_clk *clkp, uint32_t target_hz,
280 			     uint32_t min_hz, uint32_t max_hz,
281 			     bool *changed)
282 {
283 	const struct ti_clk_parent *p;
284 
285 	assert(clkp != NULL);
286 	assert(changed != NULL);
287 
288 	p = ti_clk_mux_get_parent(clkp);
289 
290 	*changed = false;
291 
292 	if ((p == NULL) || (ti_clk_lookup((ti_clk_idx_t) p->clk) == NULL)) {
293 		/* Cannot function without parent */
294 		return 0;
295 	}
296 
297 	if ((clkp->data_flags & TI_CLK_DATA_FLAG_MODIFY_PARENT_FREQ) != 0U) {
298 		return ti_clk_div_set_freq_dyn_parent(clkp, target_hz, min_hz,
299 						      max_hz, changed);
300 	}
301 
302 	return ti_clk_div_set_freq_static_parent(clkp, target_hz, min_hz,
303 						 max_hz, changed);
304 }
305 
ti_clk_div_get_freq(struct ti_clk * clkp)306 uint32_t ti_clk_div_get_freq(struct ti_clk *clkp)
307 {
308 	uint32_t parent_freq_hz;
309 
310 	assert(clkp != NULL);
311 
312 	parent_freq_hz = ti_clk_get_parent_freq(clkp);
313 
314 	return parent_freq_hz / ti_clk_get_div(clkp);
315 }
316 
ti_clk_div_init(struct ti_clk * clkp)317 int32_t ti_clk_div_init(struct ti_clk *clkp)
318 {
319 	const struct ti_clk_data_div *data_div;
320 	const struct ti_clk_drv_div *drv_div;
321 
322 	assert(clkp != NULL);
323 
324 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div, data);
325 	drv_div = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
326 
327 	if ((clkp->data_flags & TI_CLK_DATA_FLAG_NO_HW_REINIT) != 0U) {
328 		if (drv_div->get_div != NULL) {
329 			if (drv_div->get_div(clkp) != 1U) {
330 				return 0;
331 			}
332 		}
333 	}
334 
335 	if ((data_div->default_div != 0U) && (drv_div->set_div != NULL)) {
336 		if (!drv_div->set_div(clkp, data_div->default_div)) {
337 			return -EINVAL;
338 		}
339 	}
340 
341 	return 0;
342 }
343 
ti_clk_div_reg_get_div(struct ti_clk * clkp)344 uint32_t ti_clk_div_reg_get_div(struct ti_clk *clkp)
345 {
346 	const struct ti_clk_data_div *data_div;
347 	const struct ti_clk_data_div_reg *data_reg;
348 	uint32_t reg_val;
349 	uint32_t n;
350 
351 	assert(clkp != NULL);
352 
353 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div,
354 				   data);
355 	data_reg = ti_container_of(data_div, const struct ti_clk_data_div_reg,
356 				   data_div);
357 
358 	/*
359 	 * Hack, temporarily return parent 0 for muxes without register
360 	 * assignments.
361 	 */
362 	if (data_reg->reg == 0U) {
363 		reg_val = 1U;
364 	} else {
365 		n = data_div->max_div;
366 
367 		if (data_reg->start_at_1 == 0U) {
368 			n -= 1U;
369 		}
370 		reg_val = mmio_read_32(data_reg->reg) >> data_reg->bit;
371 
372 		reg_val &= (uint32_t) TI_MASK_COVER_FOR_NUMBER(n);
373 		if (data_reg->start_at_1 == 0U) {
374 			reg_val += 1U;
375 		}
376 	}
377 
378 	return reg_val;
379 }
380 
ti_clk_div_reg_set_div(struct ti_clk * clkp,uint32_t div)381 bool ti_clk_div_reg_set_div(struct ti_clk *clkp, uint32_t div)
382 {
383 	const struct ti_clk_data_div *data_div;
384 	const struct ti_clk_data_div_reg *data_reg;
385 	const struct ti_clk_drv_div *drv_div;
386 	bool ret = false;
387 	uint32_t n;
388 	uint32_t d_val_p = div;
389 	uint32_t reg_val;
390 	uint32_t val;
391 
392 	assert(clkp != NULL);
393 
394 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div,
395 				   data);
396 	data_reg = ti_container_of(data_div, const struct ti_clk_data_div_reg,
397 				   data_div);
398 	drv_div = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
399 
400 	n = data_div->max_div;
401 	if ((d_val_p <= n) &&
402 	    ((drv_div->valid_div == NULL) || drv_div->valid_div(clkp, d_val_p))) {
403 		if (data_reg->start_at_1 == 0U) {
404 			d_val_p -= 1U;
405 			n -= 1U;
406 		}
407 
408 		reg_val = mmio_read_32((uintptr_t)data_reg->reg);
409 		val = reg_val & ~(TI_MASK_COVER_FOR_NUMBER(n) << data_reg->bit);
410 		val |= d_val_p << data_reg->bit;
411 		if (val != reg_val) {
412 			mmio_write_32((uintptr_t)data_reg->reg, val);
413 		}
414 		ret = true;
415 	}
416 	return ret;
417 }
418 
419 const struct ti_clk_drv_div ti_clk_drv_div_reg_ro = {
420 	.drv = {
421 		.get_freq = ti_clk_div_get_freq,
422 	},
423 	.get_div = ti_clk_div_reg_get_div,
424 };
425 
426 const struct ti_clk_drv_div ti_clk_drv_div_reg = {
427 	.drv = {
428 		.set_freq	= ti_clk_div_set_freq,
429 		.get_freq	= ti_clk_div_get_freq,
430 		.init		= ti_clk_div_init,
431 	},
432 	.set_div = ti_clk_div_reg_set_div,
433 	.get_div = ti_clk_div_reg_get_div,
434 };
435 
ti_clk_div_reg_go_get_div(struct ti_clk * clkp)436 uint32_t ti_clk_div_reg_go_get_div(struct ti_clk *clkp)
437 {
438 	const struct ti_clk_data_div *data_div;
439 	const struct ti_clk_data_div_reg_go *data_reg;
440 	uint32_t reg_val;
441 	uint32_t n;
442 
443 	assert(clkp != NULL);
444 
445 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div,
446 				   data);
447 	data_reg = ti_container_of(data_div, const struct ti_clk_data_div_reg_go,
448 				   data_div);
449 
450 	/*
451 	 * Hack, temporarily return parent 0 for muxes without register
452 	 * assignments.
453 	 */
454 	if (data_reg->reg == 0U) {
455 		reg_val = 1U;
456 	} else {
457 		n = data_div->max_div;
458 
459 		if (data_reg->start_at_1 == 0U) {
460 			n -= 1U;
461 		}
462 		reg_val = mmio_read_32(data_reg->reg) >> data_reg->bit;
463 		reg_val &= (uint32_t) TI_MASK_COVER_FOR_NUMBER(n);
464 		if (data_reg->start_at_1 == 0U) {
465 			reg_val += 1U;
466 		}
467 	}
468 
469 	return reg_val;
470 }
471 
ti_clk_div_reg_go_set_div(struct ti_clk * clkp,uint32_t div)472 bool ti_clk_div_reg_go_set_div(struct ti_clk *clkp, uint32_t div)
473 {
474 	const struct ti_clk_data_div *data_div;
475 	const struct ti_clk_data_div_reg_go *data_reg;
476 	const struct ti_clk_drv_div *drv_div;
477 	bool ret = false;
478 	uint32_t reg_val;
479 	uint32_t n;
480 	uint32_t d_val_p = div;
481 
482 	assert(clkp != NULL);
483 
484 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div,
485 				   data);
486 	data_reg = ti_container_of(data_div, const struct ti_clk_data_div_reg_go,
487 				   data_div);
488 	drv_div = ti_container_of(clkp->drv, const struct ti_clk_drv_div, drv);
489 
490 	n = data_div->max_div;
491 	if ((d_val_p <= n) &&
492 	    ((drv_div->valid_div == NULL) || drv_div->valid_div(clkp, d_val_p))) {
493 		if (data_reg->start_at_1 == 0U) {
494 			d_val_p -= 1U;
495 			n -= 1U;
496 		}
497 
498 		reg_val = mmio_read_32(data_reg->reg);
499 		reg_val &= (uint32_t) (~(TI_MASK_COVER_FOR_NUMBER(n) << (data_reg->bit)));
500 		reg_val &= (uint32_t) ~BIT(data_reg->go);
501 		reg_val |= d_val_p << data_reg->bit;
502 		mmio_write_32((uintptr_t)data_reg->reg, reg_val);
503 		ret = true;
504 
505 		/* Go bit registers typically do not read back */
506 		reg_val |= BIT(data_reg->go);
507 		mmio_write_32((uintptr_t)data_reg->reg, reg_val);
508 		reg_val &= (uint32_t) ~BIT(data_reg->go);
509 		mmio_write_32((uintptr_t)data_reg->reg, reg_val);
510 	}
511 
512 	return ret;
513 }
514 
515 const struct ti_clk_drv_div ti_clk_drv_div_reg_go = {
516 	.drv = {
517 		.set_freq = ti_clk_div_set_freq,
518 		.get_freq = ti_clk_div_get_freq,
519 		.init = ti_clk_div_init,
520 	},
521 	.set_div = ti_clk_div_reg_go_set_div,
522 	.get_div = ti_clk_div_reg_go_get_div,
523 };
524 
ti_clk_div_fixed_get_div(struct ti_clk * clkp)525 static uint32_t ti_clk_div_fixed_get_div(struct ti_clk *clkp)
526 {
527 	const struct ti_clk_data_div *data_div;
528 
529 	assert(clkp != NULL);
530 
531 	data_div = ti_container_of(clkp->data, const struct ti_clk_data_div, data);
532 	return (uint32_t) data_div->max_div;
533 }
534 
535 const struct ti_clk_drv_div ti_clk_drv_div_fixed = {
536 	.drv = {
537 		.get_freq = ti_clk_div_get_freq,
538 	},
539 	.get_div = ti_clk_div_fixed_get_div,
540 };
541