1 /*
2 * Copyright (c) 2025-2026 Texas Instruments Incorporated - https://www.ti.com
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 /*
8 * Device Clock Management
9 *
10 * This module provides software support for managing clocks attached to
11 * devices, including enable/disable, frequency scaling, parent selection,
12 * and clock gating operations.
13 */
14
15 #include <limits.h>
16 #include <stddef.h>
17
18 #include <ti_clk_mux.h>
19 #include <ti_device.h>
20 #include <ti_device_clk.h>
21
ti_get_dev_clk(struct ti_device * dev,ti_dev_clk_idx_t idx)22 struct ti_dev_clk *ti_get_dev_clk(struct ti_device *dev, ti_dev_clk_idx_t idx)
23 {
24 const struct ti_dev_data *data = ti_get_dev_data(dev);
25 const struct ti_devgroup *devgrp = ti_dev_data_lookup_devgroup(data);
26 struct ti_dev_clk *ret = NULL;
27
28 if ((idx < data->n_clocks) && (devgrp != NULL)) {
29 uint32_t offset = data->dev_clk_idx;
30
31 if (ti_clk_id_valid(devgrp->dev_clk_data[offset + idx].clk)) {
32 ret = &devgrp->dev_clk[offset + idx];
33 }
34 }
35
36 return ret;
37 }
38
ti_get_dev_clk_data(struct ti_device * dev,ti_dev_clk_idx_t idx)39 const struct ti_dev_clk_data *ti_get_dev_clk_data(struct ti_device *dev,
40 ti_dev_clk_idx_t idx)
41 {
42 const struct ti_dev_data *data = ti_get_dev_data(dev);
43 const struct ti_devgroup *devgrp = ti_dev_data_lookup_devgroup(data);
44 const struct ti_dev_clk_data *ret = NULL;
45
46 if ((idx < data->n_clocks) && (devgrp != NULL)) {
47 uint32_t offset = data->dev_clk_idx;
48
49 ret = &devgrp->dev_clk_data[offset + idx];
50 }
51
52 return ret;
53 }
54
ti_dev_get_clk(struct ti_device * dev,ti_dev_clk_idx_t idx)55 struct ti_clk *ti_dev_get_clk(struct ti_device *dev, ti_dev_clk_idx_t idx)
56 {
57 const struct ti_dev_clk_data *entry = ti_get_dev_clk_data(dev, idx);
58
59 return entry ? ti_clk_lookup((ti_clk_idx_t) entry->clk) : NULL;
60 }
61
ti_device_clk_set_gated(struct ti_device * dev,ti_dev_clk_idx_t clk_idx,bool gated)62 bool ti_device_clk_set_gated(struct ti_device *dev, ti_dev_clk_idx_t clk_idx, bool gated)
63 {
64 const struct ti_dev_data *data = ti_get_dev_data(dev);
65 const struct ti_devgroup *devgrp = ti_dev_data_lookup_devgroup(data);
66 struct ti_dev_clk *dev_clkp = ti_get_dev_clk(dev, clk_idx);
67 struct ti_clk *clkp = NULL;
68 bool is_enabled = false;
69 bool is_gated = false;
70 bool ret = true;
71 ti_clk_idx_t id;
72
73 if ((dev_clkp == NULL) || (devgrp == NULL)) {
74 ret = false;
75 } else {
76 is_gated = ((((uint32_t) (dev_clkp->flags) & TI_DEV_CLK_FLAG_DISABLE) > 0U) ?
77 true : false);
78 if (is_gated != gated) {
79 is_enabled = (dev->flags & TI_DEV_FLAG_ENABLED_MASK) != 0UL;
80 id = (ti_clk_idx_t) devgrp->dev_clk_data[data->dev_clk_idx + clk_idx].clk;
81 clkp = ti_clk_lookup(id);
82 if (clkp == NULL) {
83 /* Clock lookup failed */
84 ret = false;
85 }
86 }
87 }
88
89 if ((clkp != NULL) && ((clkp->flags & TI_CLK_FLAG_INITIALIZED) == 0U)) {
90 /* Clock not yet initialized (outside devgroup) */
91 ret = false;
92 } else if (clkp && gated) {
93 dev_clkp->flags |= TI_DEV_CLK_FLAG_DISABLE;
94 if (is_enabled) {
95 ti_clk_put(clkp);
96 }
97 } else if (clkp != NULL) {
98 dev_clkp->flags &= (uint8_t) ~TI_DEV_CLK_FLAG_DISABLE;
99 if (is_enabled) {
100 if (ti_clk_get(clkp)) {
101 ret = true;
102 } else {
103 ret = false;
104 }
105 }
106 } else {
107 /* Do Nothing */
108 }
109
110 return ret;
111 }
112
ti_device_clk_get_sw_gated(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)113 bool ti_device_clk_get_sw_gated(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
114 {
115 struct ti_dev_clk *dev_clkp = ti_get_dev_clk(dev, clk_idx);
116
117 return (dev_clkp != NULL) && ((dev_clkp->flags & TI_DEV_CLK_FLAG_DISABLE) != 0U);
118 }
119
ti_device_clk_set_parent(struct ti_device * dev,ti_dev_clk_idx_t clk_idx,ti_dev_clk_idx_t parent_idx)120 bool ti_device_clk_set_parent(struct ti_device *dev, ti_dev_clk_idx_t clk_idx,
121 ti_dev_clk_idx_t parent_idx)
122 {
123 struct ti_clk *clkp;
124 const struct ti_dev_clk_data *clock_data;
125 const struct ti_dev_clk_data *parent_data = NULL;
126 uint32_t depth;
127 bool ret = true;
128
129 clkp = ti_dev_get_clk(dev, clk_idx);
130 clock_data = ti_get_dev_clk_data(dev, clk_idx);
131 if ((clkp == NULL) || (clock_data == NULL)) {
132 ret = false;
133 }
134
135 if (ret && (clock_data->type != TI_DEV_CLK_TABLE_TYPE_MUX)) {
136 ret = false;
137 }
138
139 if (ret) {
140 parent_data = ti_get_dev_clk_data(dev, parent_idx);
141 if (parent_data == NULL) {
142 ret = false;
143 }
144 }
145
146 if (ret && (parent_data->type != TI_DEV_CLK_TABLE_TYPE_PARENT)) {
147 ret = false;
148 }
149
150 /* Make sure it's within this clock muxes parents */
151 if (ret && ((parent_idx - clk_idx) > clock_data->idx)) {
152 ret = false;
153 }
154
155 depth = soc_clock_count;
156 while (ret && (clkp != NULL) && (clkp->type != TI_CLK_TYPE_MUX) && (depth > 0U)) {
157 const struct ti_clk_parent *p;
158
159 depth--;
160 p = ti_clk_mux_get_parent(clkp);
161 if (p != NULL) {
162 clkp = ti_clk_lookup((ti_clk_idx_t) p->clk);
163 } else {
164 clkp = NULL;
165 }
166 }
167
168 if (depth == 0U) {
169 /* Clock tree cycle detected or depth exceeded */
170 ret = false;
171 }
172
173 if (clkp == NULL) {
174 ret = false;
175 }
176
177 if (ret) {
178 ret = ti_clk_mux_set_parent(clkp, parent_data->idx);
179 }
180
181 return ret;
182 }
183
ti_device_clk_get_parent(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)184 ti_dev_clk_idx_t ti_device_clk_get_parent(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
185 {
186 const struct ti_dev_data *data = ti_get_dev_data(dev);
187 const struct ti_devgroup *devgroup_ptr = ti_dev_data_lookup_devgroup(data);
188 const struct ti_clk_parent *p = NULL;
189 struct ti_clk *clkp;
190 const struct ti_dev_clk_data *clock_data;
191 uint32_t depth;
192 uint32_t offset;
193 ti_dev_clk_idx_t i;
194
195 clkp = ti_dev_get_clk(dev, clk_idx);
196 clock_data = ti_get_dev_clk_data(dev, clk_idx);
197 if ((clkp == NULL) || (clock_data == NULL) || (devgroup_ptr == NULL)) {
198 return TI_DEV_CLK_ID_NONE;
199 }
200
201 if (clock_data->type != TI_DEV_CLK_TABLE_TYPE_MUX) {
202 return TI_DEV_CLK_ID_NONE;
203 }
204
205 depth = soc_clock_count;
206 while ((clkp != NULL) && (clkp->type != TI_CLK_TYPE_MUX) && (depth > 0U)) {
207 depth--;
208 p = ti_clk_mux_get_parent(clkp);
209 if (p != NULL) {
210 clkp = ti_clk_lookup((ti_clk_idx_t) p->clk);
211 } else {
212 clkp = NULL;
213 }
214 }
215
216 if (depth == 0U) {
217 /* Clock tree cycle detected or depth exceeded */
218 return TI_DEV_CLK_ID_NONE;
219 }
220
221 if (clkp == NULL) {
222 return TI_DEV_CLK_ID_NONE;
223 }
224
225 p = ti_clk_mux_get_parent(clkp);
226 if (p == NULL) {
227 return TI_DEV_CLK_ID_NONE;
228 }
229
230 offset = data->dev_clk_idx;
231 for (i = 0U; i < clock_data->idx; i++) {
232 if (devgroup_ptr->dev_clk_data[offset + i + clk_idx + 1U].clk == p->clk) {
233 return i + clk_idx + 1U;
234 }
235 }
236
237 return TI_DEV_CLK_ID_NONE;
238 }
239
ti_device_clk_get_num_parents(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)240 ti_dev_clk_idx_t ti_device_clk_get_num_parents(struct ti_device *dev,
241 ti_dev_clk_idx_t clk_idx)
242 {
243 struct ti_clk *clkp;
244 const struct ti_dev_clk_data *clock_data;
245 ti_dev_clk_idx_t ret;
246
247 clkp = ti_dev_get_clk(dev, clk_idx);
248 clock_data = ti_get_dev_clk_data(dev, clk_idx);
249 if ((clkp == NULL) || (clock_data == NULL)) {
250 ret = TI_DEV_CLK_ID_NONE;
251 } else if (clock_data->type != TI_DEV_CLK_TABLE_TYPE_MUX) {
252 if (ti_clk_mux_get_parent(clkp) != NULL) {
253 ret = 1U;
254 } else {
255 ret = 0U;
256 }
257 } else {
258 ret = (ti_dev_clk_idx_t) clock_data->idx;
259 /*
260 * Encode both total parents and reserved count:
261 * Bits 0-7: total parent count (including reserved)
262 * Bits 8-15: reserved parent count
263 */
264 ret = (ti_dev_clk_idx_t) ((clock_data->n_reserved_parents << 8) | clock_data->idx);
265 VERBOSE("%s total=%d reserved=%d encoded=0x%x\n", __func__,
266 clock_data->idx, clock_data->n_reserved_parents, ret);
267 }
268
269 return ret;
270 }
271
272 /**
273 * ti_dev_clk_set_freq() - Set the frequency of a device's clock.
274 * @dev: The device that the clock is connected to.
275 * @clk_idx: The index of the clock on this device.
276 * @min_freq_hz: The minimum acceptable frequency in Hz.
277 * @target_freq_hz: The target frequency in Hz.
278 * @max_freq_hz: The maximum acceptable frequency in Hz.
279 *
280 * Locates the correct clock and calls the internal clock API set frequency
281 * function. If the clock is a mux type, the request is sent to the parent
282 * to avoid switching the mux.
283 *
284 * Return: The actual frequency set, or 0 if no frequency could be found
285 * within the limits.
286 */
ti_dev_clk_set_freq(struct ti_device * dev,ti_dev_clk_idx_t clk_idx,uint32_t min_freq_hz,uint32_t target_freq_hz,uint32_t max_freq_hz)287 static uint32_t ti_dev_clk_set_freq(struct ti_device *dev, ti_dev_clk_idx_t clk_idx,
288 uint32_t min_freq_hz, uint32_t target_freq_hz,
289 uint32_t max_freq_hz)
290 {
291 const struct ti_dev_data *data = ti_get_dev_data(dev);
292 const struct ti_devgroup *devgroup_ptr = ti_dev_data_lookup_devgroup(data);
293 const struct ti_dev_clk_data *clock_data;
294 struct ti_clk *parent = NULL;
295 uint32_t div_var = 1U;
296 uint32_t ret_freq = 0U;
297 ti_dev_clk_idx_t clk_idx_val = clk_idx;
298 bool changed;
299
300 if (devgroup_ptr == NULL) {
301 return 0U;
302 }
303
304 clock_data = ti_get_dev_clk_data(dev, clk_idx_val);
305 if (clock_data == NULL) {
306 /* Invalid clock idx */
307 return 0U;
308 }
309
310 parent = ti_dev_get_clk(dev, clk_idx_val);
311 if (parent == NULL) {
312 /* Parent not present */
313 return 0U;
314 }
315
316 /* Assign div based on selected clock */
317 div_var = clock_data->div;
318
319 if ((parent->type == TI_CLK_TYPE_MUX) &&
320 (clock_data->type == TI_DEV_CLK_TABLE_TYPE_MUX)) {
321 const struct ti_dev_clk_data *parent_clk_data;
322 /* Send to parent */
323 clk_idx_val = ti_device_clk_get_parent(dev, clk_idx_val);
324 parent = ti_dev_get_clk(dev, clk_idx_val);
325 parent_clk_data = ti_get_dev_clk_data(dev, clk_idx_val);
326 if (parent_clk_data != NULL) {
327 /* We are sending to parent, so use that div instead */
328 div_var = parent_clk_data->div;
329 }
330 if (parent == NULL) {
331 /* Mux parent clock not present */
332 return 0U;
333 }
334 }
335
336 if (clock_data->type == TI_DEV_CLK_TABLE_TYPE_OUTPUT) {
337 /* div is only for input clocks */
338 div_var = 1U;
339 }
340
341 if (clock_data->modify_parent_freq == 0U) {
342 if (div_var != 0U) {
343 ret_freq = ti_clk_get_freq(parent) / div_var;
344 if ((ret_freq < min_freq_hz) || (ret_freq > max_freq_hz)) {
345 ret_freq = 0U;
346 }
347 }
348 return ret_freq;
349 }
350
351 /* Try to modify the frequency */
352 changed = false;
353 if (clock_data->type == TI_DEV_CLK_TABLE_TYPE_OUTPUT) {
354 /*
355 * This is the only place device output clocks can have their
356 * frequency changed, from their own device.
357 */
358 ret_freq = ti_clk_value_set_freq(parent, target_freq_hz,
359 min_freq_hz, max_freq_hz,
360 &changed);
361 } else {
362 ret_freq = ti_clk_generic_set_freq_parent(NULL, parent,
363 target_freq_hz,
364 min_freq_hz,
365 max_freq_hz,
366 &changed,
367 div_var);
368 }
369
370 return ret_freq;
371 }
372
ti_device_clk_set_freq(struct ti_device * dev,ti_dev_clk_idx_t clk_idx,uint32_t min_freq_hz,uint32_t target_freq_hz,uint32_t max_freq_hz)373 bool ti_device_clk_set_freq(struct ti_device *dev, ti_dev_clk_idx_t clk_idx,
374 uint32_t min_freq_hz, uint32_t target_freq_hz,
375 uint32_t max_freq_hz)
376 {
377 return ti_dev_clk_set_freq(dev, clk_idx, min_freq_hz, target_freq_hz,
378 max_freq_hz) != 0U;
379 }
380
ti_device_clk_get_freq(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)381 uint32_t ti_device_clk_get_freq(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
382 {
383 struct ti_clk *clkp;
384 const struct ti_dev_clk_data *clock_data;
385 uint32_t freq_hz;
386
387 clkp = ti_dev_get_clk(dev, clk_idx);
388 clock_data = ti_get_dev_clk_data(dev, clk_idx);
389 if ((clkp == NULL) || (clock_data == NULL)) {
390 freq_hz = 0U;
391 } else {
392 freq_hz = ti_clk_get_freq(clkp);
393 if (clock_data->type != TI_DEV_CLK_TABLE_TYPE_OUTPUT) {
394 if (clock_data->div != 0U) {
395 freq_hz /= clock_data->div;
396 } else {
397 freq_hz = 0U;
398 }
399 }
400 }
401
402 return freq_hz;
403 }
404
ti_device_clk_enable(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)405 void ti_device_clk_enable(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
406 {
407 struct ti_dev_clk *dev_clkp;
408 struct ti_clk *clkp = NULL;
409
410 dev_clkp = ti_get_dev_clk(dev, clk_idx);
411 if ((dev_clkp != NULL) && (0U == (dev_clkp->flags & TI_DEV_CLK_FLAG_DISABLE))) {
412 clkp = ti_dev_get_clk(dev, clk_idx);
413 }
414
415 if (clkp != NULL) {
416 (void)ti_clk_get(clkp);
417 }
418 }
419
ti_device_clk_disable(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)420 void ti_device_clk_disable(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
421 {
422 struct ti_dev_clk *dev_clkp;
423 struct ti_clk *clkp = NULL;
424
425 dev_clkp = ti_get_dev_clk(dev, clk_idx);
426 if ((dev_clkp != NULL) && (0U == (dev_clkp->flags & TI_DEV_CLK_FLAG_DISABLE))) {
427 clkp = ti_dev_get_clk(dev, clk_idx);
428 }
429
430 if (clkp != NULL) {
431 ti_clk_put(clkp);
432 }
433 }
434
ti_device_clk_init(struct ti_device * dev,ti_dev_clk_idx_t clk_idx)435 void ti_device_clk_init(struct ti_device *dev, ti_dev_clk_idx_t clk_idx)
436 {
437 struct ti_clk *clkp = NULL;
438 struct ti_dev_clk *dev_clkp;
439 const struct ti_dev_clk_data *dev_clk_datap;
440
441 dev_clkp = ti_get_dev_clk(dev, clk_idx);
442 dev_clk_datap = ti_get_dev_clk_data(dev, clk_idx);
443
444 /* Don't configure parent clocks for anything until host does */
445 if ((dev_clk_datap != NULL) && (dev_clkp != NULL)) {
446 if (dev_clk_datap->type == TI_DEV_CLK_TABLE_TYPE_PARENT) {
447 dev_clkp->flags |= TI_DEV_CLK_FLAG_DISABLE;
448 }
449 }
450
451 if ((dev_clk_datap != NULL) && (dev_clkp != NULL)) {
452 clkp = ti_clk_lookup((ti_clk_idx_t) dev_clk_datap->clk);
453 }
454 if (clkp != NULL) {
455 /* It's in another devgroup, don't attempt to bring it up */
456 if ((clkp->flags & TI_CLK_FLAG_INITIALIZED) == 0U) {
457 dev_clkp->flags |= TI_DEV_CLK_FLAG_DISABLE;
458 }
459 }
460 }
461