1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2021, Bootlin 4 * Copyright (c) 2023, STMicroelectronics 5 */ 6 7 #include <config.h> 8 #include <drivers/clk.h> 9 #include <kernel/boot.h> 10 #include <kernel/panic.h> 11 #include <kernel/spinlock.h> 12 #include <malloc.h> 13 #include <stddef.h> 14 #include <stdio.h> 15 16 /* Global clock tree lock */ 17 static unsigned int clk_lock = SPINLOCK_UNLOCK; 18 19 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 20 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list); 21 #endif 22 23 struct clk *clk_alloc(const char *name, const struct clk_ops *ops, 24 struct clk **parent_clks, size_t parent_count) 25 { 26 struct clk *clk = NULL; 27 size_t parent = 0; 28 29 clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk)); 30 if (!clk) 31 return NULL; 32 33 clk->num_parents = parent_count; 34 for (parent = 0; parent < parent_count; parent++) 35 clk->parents[parent] = parent_clks[parent]; 36 37 clk->name = name; 38 clk->ops = ops; 39 refcount_set(&clk->enabled_count, 0); 40 41 return clk; 42 } 43 44 void clk_free(struct clk *clk) 45 { 46 free(clk); 47 } 48 49 static bool __maybe_unused clk_check(struct clk *clk) 50 { 51 if (!clk || !clk->ops) 52 return false; 53 54 if (clk->ops->set_parent && !clk->ops->get_parent) 55 return false; 56 57 if (clk->num_parents > 1 && !clk->ops->get_parent) 58 return false; 59 60 return true; 61 } 62 63 static void clk_compute_rate_no_lock(struct clk *clk) 64 { 65 unsigned long parent_rate = 0; 66 67 if (clk->parent) 68 parent_rate = clk->parent->rate; 69 70 if (clk->ops->get_rate) 71 clk->rate = clk->ops->get_rate(clk, parent_rate); 72 else 73 clk->rate = parent_rate; 74 } 75 76 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx) 77 { 78 if (pidx >= clk->num_parents) 79 return NULL; 80 81 return clk->parents[pidx]; 82 } 83 84 static void clk_init_parent(struct clk *clk) 85 { 86 size_t pidx = 0; 87 88 switch (clk->num_parents) { 89 case 0: 90 break; 91 case 1: 92 clk->parent = clk->parents[0]; 93 break; 94 default: 95 pidx = clk->ops->get_parent(clk); 96 assert(pidx < clk->num_parents); 97 98 clk->parent = clk->parents[pidx]; 99 break; 100 } 101 } 102 103 TEE_Result clk_register(struct clk *clk) 104 { 105 assert(clk_check(clk)); 106 107 clk_init_parent(clk); 108 clk_compute_rate_no_lock(clk); 109 110 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 111 SLIST_INSERT_HEAD(&clock_list, clk, link); 112 #endif 113 114 DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk)); 115 116 return TEE_SUCCESS; 117 } 118 119 static bool clk_is_enabled_no_lock(struct clk *clk) 120 { 121 return refcount_val(&clk->enabled_count) != 0; 122 } 123 124 bool clk_is_enabled(struct clk *clk) 125 { 126 return clk_is_enabled_no_lock(clk); 127 } 128 129 static void clk_disable_no_lock(struct clk *clk) 130 { 131 struct clk *parent = NULL; 132 133 if (!refcount_dec(&clk->enabled_count)) 134 return; 135 136 if (clk->ops->disable) 137 clk->ops->disable(clk); 138 139 parent = clk_get_parent(clk); 140 if (parent) 141 clk_disable_no_lock(parent); 142 } 143 144 static TEE_Result clk_enable_no_lock(struct clk *clk) 145 { 146 TEE_Result res = TEE_ERROR_GENERIC; 147 struct clk *parent = NULL; 148 149 if (refcount_inc(&clk->enabled_count)) 150 return TEE_SUCCESS; 151 152 parent = clk_get_parent(clk); 153 if (parent) { 154 res = clk_enable_no_lock(parent); 155 if (res) 156 return res; 157 } 158 159 if (clk->ops->enable) { 160 res = clk->ops->enable(clk); 161 if (res) { 162 if (parent) 163 clk_disable_no_lock(parent); 164 165 return res; 166 } 167 } 168 169 refcount_set(&clk->enabled_count, 1); 170 171 return TEE_SUCCESS; 172 } 173 174 TEE_Result clk_enable(struct clk *clk) 175 { 176 uint32_t exceptions = 0; 177 TEE_Result res = TEE_ERROR_GENERIC; 178 179 exceptions = cpu_spin_lock_xsave(&clk_lock); 180 res = clk_enable_no_lock(clk); 181 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 182 183 return res; 184 } 185 186 void clk_disable(struct clk *clk) 187 { 188 uint32_t exceptions = 0; 189 190 exceptions = cpu_spin_lock_xsave(&clk_lock); 191 clk_disable_no_lock(clk); 192 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 193 } 194 195 unsigned long clk_get_rate(struct clk *clk) 196 { 197 return clk->rate; 198 } 199 200 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate) 201 { 202 TEE_Result res = TEE_ERROR_GENERIC; 203 unsigned long parent_rate = 0; 204 205 if (clk->parent) 206 parent_rate = clk_get_rate(clk->parent); 207 208 res = clk->ops->set_rate(clk, rate, parent_rate); 209 if (res) 210 return res; 211 212 clk_compute_rate_no_lock(clk); 213 214 return TEE_SUCCESS; 215 } 216 217 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate) 218 { 219 uint32_t exceptions = 0; 220 TEE_Result res = TEE_ERROR_GENERIC; 221 222 if (!clk->ops->set_rate) 223 return TEE_ERROR_NOT_SUPPORTED; 224 225 exceptions = cpu_spin_lock_xsave(&clk_lock); 226 227 if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk)) 228 res = TEE_ERROR_BAD_STATE; 229 else 230 res = clk_set_rate_no_lock(clk, rate); 231 232 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 233 234 return res; 235 } 236 237 struct clk *clk_get_parent(struct clk *clk) 238 { 239 return clk->parent; 240 } 241 242 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent, 243 size_t *pidx) 244 { 245 size_t i = 0; 246 247 for (i = 0; i < clk_get_num_parents(clk); i++) { 248 if (clk_get_parent_by_index(clk, i) == parent) { 249 *pidx = i; 250 return TEE_SUCCESS; 251 } 252 } 253 EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name); 254 255 return TEE_ERROR_BAD_PARAMETERS; 256 } 257 258 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent, 259 size_t pidx) 260 { 261 TEE_Result res = TEE_ERROR_GENERIC; 262 bool was_enabled = false; 263 264 /* Requested parent is already the one set */ 265 if (clk->parent == parent) 266 return TEE_SUCCESS; 267 268 was_enabled = clk_is_enabled_no_lock(clk); 269 /* Call is needed to decrement refcount on current parent tree */ 270 if (was_enabled) 271 clk_disable_no_lock(clk); 272 273 res = clk->ops->set_parent(clk, pidx); 274 if (res) 275 goto out; 276 277 clk->parent = parent; 278 279 /* The parent changed and the rate might also have changed */ 280 clk_compute_rate_no_lock(clk); 281 282 out: 283 /* Call is needed to increment refcount on the new parent tree */ 284 if (was_enabled) { 285 res = clk_enable_no_lock(clk); 286 if (res) 287 panic("Failed to re-enable clock after setting parent"); 288 } 289 290 return res; 291 } 292 293 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent) 294 { 295 size_t pidx = 0; 296 uint32_t exceptions = 0; 297 TEE_Result res = TEE_ERROR_GENERIC; 298 299 if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent) 300 return TEE_ERROR_BAD_PARAMETERS; 301 302 exceptions = cpu_spin_lock_xsave(&clk_lock); 303 if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) { 304 res = TEE_ERROR_BAD_STATE; 305 goto out; 306 } 307 308 res = clk_set_parent_no_lock(clk, parent, pidx); 309 out: 310 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 311 312 return res; 313 } 314 315 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index, 316 unsigned long *rates, size_t *nb_elts) 317 { 318 if (!clk->ops->get_rates_array) 319 return TEE_ERROR_NOT_SUPPORTED; 320 321 return clk->ops->get_rates_array(clk, start_index, rates, nb_elts); 322 } 323 324 TEE_Result clk_get_duty_cycle(struct clk *clk, 325 struct clk_duty_cycle *duty_cycle) 326 { 327 if (clk->ops->get_duty_cycle) 328 return clk->ops->get_duty_cycle(clk, duty_cycle); 329 330 if (clk->parent && (clk->flags & CLK_DUTY_CYCLE_PARENT)) 331 return clk_get_duty_cycle(clk->parent, duty_cycle); 332 333 /* Default set 50% duty cycle */ 334 duty_cycle->num = 1; 335 duty_cycle->den = 2; 336 337 return TEE_SUCCESS; 338 } 339 340 /* Return updated message buffer position of NULL on failure */ 341 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...) 342 { 343 va_list ap = { }; 344 int max_len = end - cur; 345 int ret = 0; 346 347 va_start(ap, fmt); 348 ret = vsnprintf(cur, max_len, fmt, ap); 349 va_end(ap); 350 351 if (ret < 0 || ret >= max_len) 352 return NULL; 353 354 return cur + ret; 355 } 356 357 static struct clk *find_next_clk(struct clk *parent __maybe_unused, 358 struct clk *sibling __maybe_unused) 359 { 360 struct clk *clk = NULL; 361 362 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 363 if (sibling) 364 clk = SLIST_NEXT(sibling, link); 365 else 366 clk = SLIST_FIRST(&clock_list); 367 368 while (clk && clk->parent != parent) 369 clk = SLIST_NEXT(clk, link); 370 #endif 371 372 return clk; 373 } 374 375 static bool clk_is_parent_last_child(struct clk *clk) 376 { 377 return !find_next_clk(clk->parent, clk); 378 } 379 380 static bool indent_last_node_already_found(struct clk *node_clk, 381 int node_indent, int cur_indent) 382 { 383 struct clk *clk = node_clk; 384 int n = 0; 385 386 /* Find parent clock at level @node_indent - @cur_indent - 1 */ 387 for (n = 0; n < node_indent - cur_indent - 1; n++) 388 clk = clk->parent; 389 390 return clk_is_parent_last_child(clk); 391 } 392 393 static void __maybe_unused print_clk(struct clk *clk, int indent) 394 { 395 static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" }; 396 int max_unit = ARRAY_SIZE(rate_unit); 397 unsigned long rate = 0; 398 char msg_buf[128] = { }; 399 char *msg_end = msg_buf + sizeof(msg_buf); 400 char *msg = msg_buf; 401 int n = 0; 402 403 /* 404 * Currently prints the clock state based on the clock refcount. 405 * A future change could print the hardware clock state when 406 * related clock driver provides a struct clk_ops::is_enabled handler 407 */ 408 409 if (indent) { 410 /* Indent for root clock level */ 411 msg = add_msg(msg, msg_end, " "); 412 if (!msg) 413 goto out; 414 415 /* Indent for root parent to clock parent levels */ 416 for (n = 0; n < indent - 1; n++) { 417 if (indent_last_node_already_found(clk, indent, n)) 418 msg = add_msg(msg, msg_end, " "); 419 else 420 msg = add_msg(msg, msg_end, "| "); 421 422 if (!msg) 423 goto out; 424 } 425 426 /* Clock indentation */ 427 if (clk_is_parent_last_child(clk)) 428 msg = add_msg(msg, msg_end, "`-- "); 429 else 430 msg = add_msg(msg, msg_end, "|-- "); 431 } else { 432 /* Root clock indentation */ 433 msg = add_msg(msg, msg_end, "o- "); 434 } 435 if (!msg) 436 goto out; 437 438 rate = clk_get_rate(clk); 439 for (n = 1; rate && !(rate % 1000) && n < max_unit; n++) 440 rate /= 1000; 441 442 msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)", 443 clk_get_name(clk), 444 refcount_val(&clk->enabled_count) ? "on " : "off", 445 refcount_val(&clk->enabled_count), 446 rate, rate_unit[n - 1]); 447 if (!msg) 448 goto out; 449 450 out: 451 if (!msg) 452 snprintf(msg_end - 4, 4, "..."); 453 454 DMSG("%s", msg_buf); 455 } 456 457 static void print_tree(void) 458 { 459 struct clk *clk = NULL; 460 struct clk *parent = NULL; 461 struct clk *next = NULL; 462 int indent = -1; 463 464 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 465 if (SLIST_EMPTY(&clock_list)) { 466 DMSG("-- No registered clock"); 467 return; 468 } 469 #endif 470 471 while (true) { 472 next = find_next_clk(parent, clk); 473 if (next) { 474 print_clk(next, indent + 1); 475 /* Enter the subtree of the next clock */ 476 parent = next; 477 indent++; 478 clk = NULL; 479 } else { 480 /* 481 * We've processed all children at this level. 482 * If parent is NULL we're at the top and are done. 483 */ 484 if (!parent) 485 break; 486 /* 487 * Move up one level to resume with the next 488 * child clock of the parent. 489 */ 490 clk = parent; 491 parent = clk->parent; 492 indent--; 493 } 494 } 495 } 496 497 void clk_print_tree(void) 498 { 499 if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) && 500 TRACE_LEVEL >= TRACE_DEBUG) { 501 DMSG("Clock tree summary (informative):"); 502 print_tree(); 503 } 504 } 505