1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2021, Bootlin 4 * Copyright (c) 2023, STMicroelectronics 5 */ 6 7 #include <config.h> 8 #include <drivers/clk.h> 9 #include <kernel/boot.h> 10 #include <kernel/panic.h> 11 #include <kernel/spinlock.h> 12 #include <malloc.h> 13 #include <stddef.h> 14 #include <stdio.h> 15 16 /* Global clock tree lock */ 17 static unsigned int clk_lock = SPINLOCK_UNLOCK; 18 19 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 20 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list); 21 #endif 22 23 struct clk *clk_alloc(const char *name, const struct clk_ops *ops, 24 struct clk **parent_clks, size_t parent_count) 25 { 26 struct clk *clk = NULL; 27 size_t parent = 0; 28 29 clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk)); 30 if (!clk) 31 return NULL; 32 33 clk->num_parents = parent_count; 34 for (parent = 0; parent < parent_count; parent++) 35 clk->parents[parent] = parent_clks[parent]; 36 37 clk->name = name; 38 clk->ops = ops; 39 refcount_set(&clk->enabled_count, 0); 40 41 return clk; 42 } 43 44 void clk_free(struct clk *clk) 45 { 46 free(clk); 47 } 48 49 static bool __maybe_unused clk_check(struct clk *clk) 50 { 51 if (!clk || !clk->ops) 52 return false; 53 54 if (clk->ops->set_parent && !clk->ops->get_parent) 55 return false; 56 57 if (clk->num_parents > 1 && !clk->ops->get_parent) 58 return false; 59 60 return true; 61 } 62 63 static void clk_compute_rate_no_lock(struct clk *clk) 64 { 65 unsigned long parent_rate = 0; 66 67 if (clk->parent) 68 parent_rate = clk->parent->rate; 69 70 if (clk->ops->get_rate) 71 clk->rate = clk->ops->get_rate(clk, parent_rate); 72 else 73 clk->rate = parent_rate; 74 } 75 76 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx) 77 { 78 if (pidx >= clk->num_parents) 79 return NULL; 80 81 return clk->parents[pidx]; 82 } 83 84 static void clk_init_parent(struct clk *clk) 85 { 86 size_t pidx = 0; 87 88 switch (clk->num_parents) { 89 case 0: 90 break; 91 case 1: 92 clk->parent = clk->parents[0]; 93 break; 94 default: 95 pidx = clk->ops->get_parent(clk); 96 assert(pidx < clk->num_parents); 97 98 clk->parent = clk->parents[pidx]; 99 break; 100 } 101 } 102 103 TEE_Result clk_register(struct clk *clk) 104 { 105 assert(clk_check(clk)); 106 107 clk_init_parent(clk); 108 clk_compute_rate_no_lock(clk); 109 110 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 111 SLIST_INSERT_HEAD(&clock_list, clk, link); 112 #endif 113 114 DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk)); 115 116 return TEE_SUCCESS; 117 } 118 119 static bool clk_is_enabled_no_lock(struct clk *clk) 120 { 121 return refcount_val(&clk->enabled_count) != 0; 122 } 123 124 bool clk_is_enabled(struct clk *clk) 125 { 126 return clk_is_enabled_no_lock(clk); 127 } 128 129 static void clk_disable_no_lock(struct clk *clk) 130 { 131 struct clk *parent = NULL; 132 133 if (!refcount_dec(&clk->enabled_count)) 134 return; 135 136 if (clk->ops->disable) 137 clk->ops->disable(clk); 138 139 parent = clk_get_parent(clk); 140 if (parent) 141 clk_disable_no_lock(parent); 142 } 143 144 static TEE_Result clk_enable_no_lock(struct clk *clk) 145 { 146 TEE_Result res = TEE_ERROR_GENERIC; 147 struct clk *parent = NULL; 148 149 if (refcount_inc(&clk->enabled_count)) 150 return TEE_SUCCESS; 151 152 parent = clk_get_parent(clk); 153 if (parent) { 154 res = clk_enable_no_lock(parent); 155 if (res) 156 return res; 157 } 158 159 if (clk->ops->enable) { 160 res = clk->ops->enable(clk); 161 if (res) { 162 if (parent) 163 clk_disable_no_lock(parent); 164 165 return res; 166 } 167 } 168 169 refcount_set(&clk->enabled_count, 1); 170 171 return TEE_SUCCESS; 172 } 173 174 TEE_Result clk_enable(struct clk *clk) 175 { 176 uint32_t exceptions = 0; 177 TEE_Result res = TEE_ERROR_GENERIC; 178 179 exceptions = cpu_spin_lock_xsave(&clk_lock); 180 res = clk_enable_no_lock(clk); 181 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 182 183 return res; 184 } 185 186 void clk_disable(struct clk *clk) 187 { 188 uint32_t exceptions = 0; 189 190 exceptions = cpu_spin_lock_xsave(&clk_lock); 191 clk_disable_no_lock(clk); 192 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 193 } 194 195 unsigned long clk_get_rate(struct clk *clk) 196 { 197 return clk->rate; 198 } 199 200 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate) 201 { 202 TEE_Result res = TEE_ERROR_GENERIC; 203 unsigned long parent_rate = 0; 204 205 if (clk->parent) 206 parent_rate = clk_get_rate(clk->parent); 207 208 assert(!(clk->flags & CLK_SET_RATE_PARENT) || clk->parent); 209 if (clk->flags & CLK_SET_RATE_PARENT) { 210 res = clk_set_rate_no_lock(clk->parent, rate); 211 if (res) 212 return res; 213 rate = clk_get_rate(clk->parent); 214 } 215 216 if (clk->ops->set_rate) { 217 if (clk->flags & CLK_SET_RATE_UNGATE) { 218 res = clk_enable_no_lock(clk); 219 if (res) 220 return res; 221 } 222 223 res = clk->ops->set_rate(clk, rate, parent_rate); 224 225 if (clk->flags & CLK_SET_RATE_UNGATE) 226 clk_disable_no_lock(clk); 227 228 if (res) 229 return res; 230 } 231 232 clk_compute_rate_no_lock(clk); 233 234 return TEE_SUCCESS; 235 } 236 237 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate) 238 { 239 uint32_t exceptions = 0; 240 TEE_Result res = TEE_ERROR_GENERIC; 241 242 exceptions = cpu_spin_lock_xsave(&clk_lock); 243 244 if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk)) 245 res = TEE_ERROR_BAD_STATE; 246 else 247 res = clk_set_rate_no_lock(clk, rate); 248 249 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 250 251 return res; 252 } 253 254 struct clk *clk_get_parent(struct clk *clk) 255 { 256 return clk->parent; 257 } 258 259 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent, 260 size_t *pidx) 261 { 262 size_t i = 0; 263 264 for (i = 0; i < clk_get_num_parents(clk); i++) { 265 if (clk_get_parent_by_index(clk, i) == parent) { 266 *pidx = i; 267 return TEE_SUCCESS; 268 } 269 } 270 EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name); 271 272 return TEE_ERROR_BAD_PARAMETERS; 273 } 274 275 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent, 276 size_t pidx) 277 { 278 TEE_Result res = TEE_ERROR_GENERIC; 279 bool was_enabled = false; 280 281 /* Requested parent is already the one set */ 282 if (clk->parent == parent) 283 return TEE_SUCCESS; 284 285 was_enabled = clk_is_enabled_no_lock(clk); 286 /* Call is needed to decrement refcount on current parent tree */ 287 if (was_enabled) 288 clk_disable_no_lock(clk); 289 290 res = clk->ops->set_parent(clk, pidx); 291 if (res) 292 goto out; 293 294 clk->parent = parent; 295 296 /* The parent changed and the rate might also have changed */ 297 clk_compute_rate_no_lock(clk); 298 299 out: 300 /* Call is needed to increment refcount on the new parent tree */ 301 if (was_enabled) { 302 res = clk_enable_no_lock(clk); 303 if (res) 304 panic("Failed to re-enable clock after setting parent"); 305 } 306 307 return res; 308 } 309 310 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent) 311 { 312 size_t pidx = 0; 313 uint32_t exceptions = 0; 314 TEE_Result res = TEE_ERROR_GENERIC; 315 316 if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent) 317 return TEE_ERROR_BAD_PARAMETERS; 318 319 exceptions = cpu_spin_lock_xsave(&clk_lock); 320 if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) { 321 res = TEE_ERROR_BAD_STATE; 322 goto out; 323 } 324 325 res = clk_set_parent_no_lock(clk, parent, pidx); 326 out: 327 cpu_spin_unlock_xrestore(&clk_lock, exceptions); 328 329 return res; 330 } 331 332 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index, 333 unsigned long *rates, size_t *nb_elts) 334 { 335 if (!clk->ops->get_rates_array) 336 return TEE_ERROR_NOT_SUPPORTED; 337 338 return clk->ops->get_rates_array(clk, start_index, rates, nb_elts); 339 } 340 341 TEE_Result clk_get_duty_cycle(struct clk *clk, 342 struct clk_duty_cycle *duty_cycle) 343 { 344 if (clk->ops->get_duty_cycle) 345 return clk->ops->get_duty_cycle(clk, duty_cycle); 346 347 if (clk->parent && (clk->flags & CLK_DUTY_CYCLE_PARENT)) 348 return clk_get_duty_cycle(clk->parent, duty_cycle); 349 350 /* Default set 50% duty cycle */ 351 duty_cycle->num = 1; 352 duty_cycle->den = 2; 353 354 return TEE_SUCCESS; 355 } 356 357 /* Return updated message buffer position of NULL on failure */ 358 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...) 359 { 360 va_list ap = { }; 361 int max_len = end - cur; 362 int ret = 0; 363 364 va_start(ap, fmt); 365 ret = vsnprintf(cur, max_len, fmt, ap); 366 va_end(ap); 367 368 if (ret < 0 || ret >= max_len) 369 return NULL; 370 371 return cur + ret; 372 } 373 374 static struct clk *find_next_clk(struct clk *parent __maybe_unused, 375 struct clk *sibling __maybe_unused) 376 { 377 struct clk *clk = NULL; 378 379 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 380 if (sibling) 381 clk = SLIST_NEXT(sibling, link); 382 else 383 clk = SLIST_FIRST(&clock_list); 384 385 while (clk && clk->parent != parent) 386 clk = SLIST_NEXT(clk, link); 387 #endif 388 389 return clk; 390 } 391 392 static bool clk_is_parent_last_child(struct clk *clk) 393 { 394 return !find_next_clk(clk->parent, clk); 395 } 396 397 static bool indent_last_node_already_found(struct clk *node_clk, 398 int node_indent, int cur_indent) 399 { 400 struct clk *clk = node_clk; 401 int n = 0; 402 403 /* Find parent clock at level @node_indent - @cur_indent - 1 */ 404 for (n = 0; n < node_indent - cur_indent - 1; n++) 405 clk = clk->parent; 406 407 return clk_is_parent_last_child(clk); 408 } 409 410 static void __maybe_unused print_clk(struct clk *clk, int indent) 411 { 412 static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" }; 413 int max_unit = ARRAY_SIZE(rate_unit); 414 unsigned long rate = 0; 415 char msg_buf[128] = { }; 416 char *msg_end = msg_buf + sizeof(msg_buf); 417 char *msg = msg_buf; 418 int n = 0; 419 420 /* 421 * Currently prints the clock state based on the clock refcount. 422 * A future change could print the hardware clock state when 423 * related clock driver provides a struct clk_ops::is_enabled handler 424 */ 425 426 if (indent) { 427 /* Indent for root clock level */ 428 msg = add_msg(msg, msg_end, " "); 429 if (!msg) 430 goto out; 431 432 /* Indent for root parent to clock parent levels */ 433 for (n = 0; n < indent - 1; n++) { 434 if (indent_last_node_already_found(clk, indent, n)) 435 msg = add_msg(msg, msg_end, " "); 436 else 437 msg = add_msg(msg, msg_end, "| "); 438 439 if (!msg) 440 goto out; 441 } 442 443 /* Clock indentation */ 444 if (clk_is_parent_last_child(clk)) 445 msg = add_msg(msg, msg_end, "`-- "); 446 else 447 msg = add_msg(msg, msg_end, "|-- "); 448 } else { 449 /* Root clock indentation */ 450 msg = add_msg(msg, msg_end, "o- "); 451 } 452 if (!msg) 453 goto out; 454 455 rate = clk_get_rate(clk); 456 for (n = 1; rate && !(rate % 1000) && n < max_unit; n++) 457 rate /= 1000; 458 459 msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)", 460 clk_get_name(clk), 461 refcount_val(&clk->enabled_count) ? "on " : "off", 462 refcount_val(&clk->enabled_count), 463 rate, rate_unit[n - 1]); 464 if (!msg) 465 goto out; 466 467 out: 468 if (!msg) 469 snprintf(msg_end - 4, 4, "..."); 470 471 DMSG("%s", msg_buf); 472 } 473 474 static void print_tree(void) 475 { 476 struct clk *clk = NULL; 477 struct clk *parent = NULL; 478 struct clk *next = NULL; 479 int indent = -1; 480 481 #ifdef CFG_DRIVERS_CLK_PRINT_TREE 482 if (SLIST_EMPTY(&clock_list)) { 483 DMSG("-- No registered clock"); 484 return; 485 } 486 #endif 487 488 while (true) { 489 next = find_next_clk(parent, clk); 490 if (next) { 491 print_clk(next, indent + 1); 492 /* Enter the subtree of the next clock */ 493 parent = next; 494 indent++; 495 clk = NULL; 496 } else { 497 /* 498 * We've processed all children at this level. 499 * If parent is NULL we're at the top and are done. 500 */ 501 if (!parent) 502 break; 503 /* 504 * Move up one level to resume with the next 505 * child clock of the parent. 506 */ 507 clk = parent; 508 parent = clk->parent; 509 indent--; 510 } 511 } 512 } 513 514 void clk_print_tree(void) 515 { 516 if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) && 517 TRACE_LEVEL >= TRACE_DEBUG) { 518 DMSG("Clock tree summary (informative):"); 519 print_tree(); 520 } 521 } 522