1 /* 2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <drivers/arm/cci.h> 13 #include <drivers/arm/gicv2.h> 14 #include <drivers/ti/uart/uart_16550.h> 15 #include <lib/bakery_lock.h> 16 #include <lib/mmio.h> 17 #include <lib/psci/psci.h> 18 #include <plat/arm/common/plat_arm.h> 19 20 #include <mcucfg.h> 21 #include <mt8173_def.h> 22 #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 23 #include <plat_private.h> 24 #include <power_tracer.h> 25 #include <rtc.h> 26 #include <scu.h> 27 #include <spm_hotplug.h> 28 #include <spm_mcdi.h> 29 #include <spm_suspend.h> 30 #include <wdt.h> 31 32 #define MTK_PWR_LVL0 0 33 #define MTK_PWR_LVL1 1 34 #define MTK_PWR_LVL2 2 35 36 /* Macros to read the MTK power domain state */ 37 #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 38 #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 39 #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 40 (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 41 42 #if PSCI_EXTENDED_STATE_ID 43 /* 44 * The table storing the valid idle power states. Ensure that the 45 * array entries are populated in ascending order of state-id to 46 * enable us to use binary search during power state validation. 47 * The table must be terminated by a NULL entry. 48 */ 49 const unsigned int mtk_pm_idle_states[] = { 50 /* State-id - 0x001 */ 51 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 52 MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 53 /* State-id - 0x002 */ 54 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 55 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 56 /* State-id - 0x022 */ 57 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 58 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 59 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 60 /* State-id - 0x222 */ 61 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 62 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 63 #endif 64 0, 65 }; 66 #endif 67 68 struct core_context { 69 unsigned long timer_data[8]; 70 unsigned int count; 71 unsigned int rst; 72 unsigned int abt; 73 unsigned int brk; 74 }; 75 76 struct cluster_context { 77 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 78 }; 79 80 /* 81 * Top level structure to hold the complete context of a multi cluster system 82 */ 83 struct system_context { 84 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 85 }; 86 87 /* 88 * Top level structure which encapsulates the context of the entire system 89 */ 90 static struct system_context dormant_data[1]; 91 92 static inline struct cluster_context *system_cluster( 93 struct system_context *system, 94 uint32_t clusterid) 95 { 96 return &system->cluster[clusterid]; 97 } 98 99 static inline struct core_context *cluster_core(struct cluster_context *cluster, 100 uint32_t cpuid) 101 { 102 return &cluster->core[cpuid]; 103 } 104 105 static struct cluster_context *get_cluster_data(unsigned long mpidr) 106 { 107 uint32_t clusterid; 108 109 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 110 111 return system_cluster(dormant_data, clusterid); 112 } 113 114 static struct core_context *get_core_data(unsigned long mpidr) 115 { 116 struct cluster_context *cluster; 117 uint32_t cpuid; 118 119 cluster = get_cluster_data(mpidr); 120 cpuid = mpidr & MPIDR_CPU_MASK; 121 122 return cluster_core(cluster, cpuid); 123 } 124 125 static void mt_save_generic_timer(unsigned long *container) 126 { 127 uint64_t ctl; 128 uint64_t val; 129 130 __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 131 "mrs %x1, cntp_cval_el0\n\t" 132 "stp %x0, %x1, [%2, #0]" 133 : "=&r" (ctl), "=&r" (val) 134 : "r" (container) 135 : "memory"); 136 137 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 138 "mrs %x1, cntp_ctl_el0\n\t" 139 "stp %x0, %x1, [%2, #16]" 140 : "=&r" (val), "=&r" (ctl) 141 : "r" (container) 142 : "memory"); 143 144 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 145 "mrs %x1, cntv_ctl_el0\n\t" 146 "stp %x0, %x1, [%2, #32]" 147 : "=&r" (val), "=&r" (ctl) 148 : "r" (container) 149 : "memory"); 150 } 151 152 static void mt_restore_generic_timer(unsigned long *container) 153 { 154 uint64_t ctl; 155 uint64_t val; 156 157 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 158 "msr cntkctl_el1, %x0\n\t" 159 "msr cntp_cval_el0, %x1" 160 : "=&r" (ctl), "=&r" (val) 161 : "r" (container) 162 : "memory"); 163 164 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 165 "msr cntp_tval_el0, %x0\n\t" 166 "msr cntp_ctl_el0, %x1" 167 : "=&r" (val), "=&r" (ctl) 168 : "r" (container) 169 : "memory"); 170 171 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 172 "msr cntv_tval_el0, %x0\n\t" 173 "msr cntv_ctl_el0, %x1" 174 : "=&r" (val), "=&r" (ctl) 175 : "r" (container) 176 : "memory"); 177 } 178 179 static inline uint64_t read_cntpctl(void) 180 { 181 uint64_t cntpctl; 182 183 __asm__ volatile("mrs %x0, cntp_ctl_el0" 184 : "=r" (cntpctl) : : "memory"); 185 186 return cntpctl; 187 } 188 189 static inline void write_cntpctl(uint64_t cntpctl) 190 { 191 __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 192 } 193 194 static void stop_generic_timer(void) 195 { 196 /* 197 * Disable the timer and mask the irq to prevent 198 * suprious interrupts on this cpu interface. It 199 * will bite us when we come back if we don't. It 200 * will be replayed on the inbound cluster. 201 */ 202 uint64_t cntpctl = read_cntpctl(); 203 204 write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 205 } 206 207 static void mt_cpu_save(unsigned long mpidr) 208 { 209 struct core_context *core; 210 211 core = get_core_data(mpidr); 212 mt_save_generic_timer(core->timer_data); 213 214 /* disable timer irq, and upper layer should enable it again. */ 215 stop_generic_timer(); 216 } 217 218 static void mt_cpu_restore(unsigned long mpidr) 219 { 220 struct core_context *core; 221 222 core = get_core_data(mpidr); 223 mt_restore_generic_timer(core->timer_data); 224 } 225 226 static void mt_platform_save_context(unsigned long mpidr) 227 { 228 /* mcusys_save_context: */ 229 mt_cpu_save(mpidr); 230 } 231 232 static void mt_platform_restore_context(unsigned long mpidr) 233 { 234 /* mcusys_restore_context: */ 235 mt_cpu_restore(mpidr); 236 } 237 238 static void plat_cpu_standby(plat_local_state_t cpu_state) 239 { 240 u_register_t scr; 241 242 scr = read_scr_el3(); 243 write_scr_el3(scr | SCR_IRQ_BIT); 244 isb(); 245 dsb(); 246 wfi(); 247 write_scr_el3(scr); 248 } 249 250 /******************************************************************************* 251 * MTK_platform handler called when an affinity instance is about to be turned 252 * on. The level and mpidr determine the affinity instance. 253 ******************************************************************************/ 254 static uintptr_t secure_entrypoint; 255 256 static int plat_power_domain_on(unsigned long mpidr) 257 { 258 int rc = PSCI_E_SUCCESS; 259 unsigned long cpu_id; 260 unsigned long cluster_id; 261 uintptr_t rv; 262 263 cpu_id = mpidr & MPIDR_CPU_MASK; 264 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 265 266 if (cluster_id) 267 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 268 else 269 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 270 271 mmio_write_32(rv, secure_entrypoint); 272 INFO("mt_on[%ld:%ld], entry %x\n", 273 cluster_id, cpu_id, mmio_read_32(rv)); 274 275 spm_hotplug_on(mpidr); 276 return rc; 277 } 278 279 /******************************************************************************* 280 * MTK_platform handler called when an affinity instance is about to be turned 281 * off. The level and mpidr determine the affinity instance. The 'state' arg. 282 * allows the platform to decide whether the cluster is being turned off and 283 * take apt actions. 284 * 285 * CAUTION: This function is called with coherent stacks so that caches can be 286 * turned off, flushed and coherency disabled. There is no guarantee that caches 287 * will remain turned on across calls to this function as each affinity level is 288 * dealt with. So do not write & read global variables across calls. It will be 289 * wise to do flush a write to the global to prevent unpredictable results. 290 ******************************************************************************/ 291 static void plat_power_domain_off(const psci_power_state_t *state) 292 { 293 unsigned long mpidr = read_mpidr_el1(); 294 295 /* Prevent interrupts from spuriously waking up this cpu */ 296 gicv2_cpuif_disable(); 297 298 spm_hotplug_off(mpidr); 299 300 trace_power_flow(mpidr, CPU_DOWN); 301 302 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 303 /* Disable coherency if this cluster is to be turned off */ 304 plat_cci_disable(); 305 306 trace_power_flow(mpidr, CLUSTER_DOWN); 307 } 308 } 309 310 /******************************************************************************* 311 * MTK_platform handler called when an affinity instance is about to be 312 * suspended. The level and mpidr determine the affinity instance. The 'state' 313 * arg. allows the platform to decide whether the cluster is being turned off 314 * and take apt actions. 315 * 316 * CAUTION: This function is called with coherent stacks so that caches can be 317 * turned off, flushed and coherency disabled. There is no guarantee that caches 318 * will remain turned on across calls to this function as each affinity level is 319 * dealt with. So do not write & read global variables across calls. It will be 320 * wise to do flush a write to the global to prevent unpredictable results. 321 ******************************************************************************/ 322 static void plat_power_domain_suspend(const psci_power_state_t *state) 323 { 324 unsigned long mpidr = read_mpidr_el1(); 325 unsigned long cluster_id; 326 unsigned long cpu_id; 327 uintptr_t rv; 328 329 cpu_id = mpidr & MPIDR_CPU_MASK; 330 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 331 332 if (cluster_id) 333 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 334 else 335 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 336 337 mmio_write_32(rv, secure_entrypoint); 338 339 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 340 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 341 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 342 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 343 } 344 345 mt_platform_save_context(mpidr); 346 347 /* Perform the common cluster specific operations */ 348 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 349 /* Disable coherency if this cluster is to be turned off */ 350 plat_cci_disable(); 351 } 352 353 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 354 wdt_suspend(); 355 disable_scu(mpidr); 356 generic_timer_backup(); 357 spm_system_suspend(); 358 /* Prevent interrupts from spuriously waking up this cpu */ 359 gicv2_cpuif_disable(); 360 } 361 } 362 363 /******************************************************************************* 364 * MTK_platform handler called when an affinity instance has just been powered 365 * on after being turned off earlier. The level and mpidr determine the affinity 366 * instance. The 'state' arg. allows the platform to decide whether the cluster 367 * was turned off prior to wakeup and do what's necessary to setup it up 368 * correctly. 369 ******************************************************************************/ 370 void mtk_system_pwr_domain_resume(void); 371 372 static void plat_power_domain_on_finish(const psci_power_state_t *state) 373 { 374 unsigned long mpidr = read_mpidr_el1(); 375 376 assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 377 378 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 379 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 380 mtk_system_pwr_domain_resume(); 381 382 if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 383 plat_cci_enable(); 384 trace_power_flow(mpidr, CLUSTER_UP); 385 } 386 387 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 388 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 389 return; 390 391 /* Enable the gic cpu interface */ 392 gicv2_cpuif_enable(); 393 gicv2_pcpu_distif_init(); 394 trace_power_flow(mpidr, CPU_UP); 395 } 396 397 /******************************************************************************* 398 * MTK_platform handler called when an affinity instance has just been powered 399 * on after having been suspended earlier. The level and mpidr determine the 400 * affinity instance. 401 ******************************************************************************/ 402 static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 403 { 404 unsigned long mpidr = read_mpidr_el1(); 405 406 if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 407 return; 408 409 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 410 /* Enable the gic cpu interface */ 411 plat_arm_gic_init(); 412 spm_system_suspend_finish(); 413 enable_scu(mpidr); 414 wdt_resume(); 415 } 416 417 /* Perform the common cluster specific operations */ 418 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 419 /* Enable coherency if this cluster was off */ 420 plat_cci_enable(); 421 } 422 423 mt_platform_restore_context(mpidr); 424 425 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 426 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 427 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 428 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 429 } 430 431 gicv2_pcpu_distif_init(); 432 } 433 434 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 435 { 436 assert(PLAT_MAX_PWR_LVL >= 2); 437 438 for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 439 req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 440 } 441 442 /******************************************************************************* 443 * MTK handlers to shutdown/reboot the system 444 ******************************************************************************/ 445 static void __dead2 plat_system_off(void) 446 { 447 INFO("MTK System Off\n"); 448 449 rtc_bbpu_power_down(); 450 451 wfi(); 452 ERROR("MTK System Off: operation not handled.\n"); 453 panic(); 454 } 455 456 static void __dead2 plat_system_reset(void) 457 { 458 /* Write the System Configuration Control Register */ 459 INFO("MTK System Reset\n"); 460 461 wdt_trigger_reset(); 462 463 wfi(); 464 ERROR("MTK System Reset: operation not handled.\n"); 465 panic(); 466 } 467 468 #if !PSCI_EXTENDED_STATE_ID 469 static int plat_validate_power_state(unsigned int power_state, 470 psci_power_state_t *req_state) 471 { 472 int pstate = psci_get_pstate_type(power_state); 473 int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 474 int i; 475 476 assert(req_state); 477 478 if (pwr_lvl > PLAT_MAX_PWR_LVL) 479 return PSCI_E_INVALID_PARAMS; 480 481 /* Sanity check the requested state */ 482 if (pstate == PSTATE_TYPE_STANDBY) { 483 /* 484 * It's possible to enter standby only on power level 0 485 * Ignore any other power level. 486 */ 487 if (pwr_lvl != 0) 488 return PSCI_E_INVALID_PARAMS; 489 490 req_state->pwr_domain_state[MTK_PWR_LVL0] = 491 MTK_LOCAL_STATE_RET; 492 } else { 493 for (i = 0; i <= pwr_lvl; i++) 494 req_state->pwr_domain_state[i] = 495 MTK_LOCAL_STATE_OFF; 496 } 497 498 /* 499 * We expect the 'state id' to be zero. 500 */ 501 if (psci_get_pstate_id(power_state)) 502 return PSCI_E_INVALID_PARAMS; 503 504 return PSCI_E_SUCCESS; 505 } 506 #else 507 int plat_validate_power_state(unsigned int power_state, 508 psci_power_state_t *req_state) 509 { 510 unsigned int state_id; 511 int i; 512 513 assert(req_state); 514 515 /* 516 * Currently we are using a linear search for finding the matching 517 * entry in the idle power state array. This can be made a binary 518 * search if the number of entries justify the additional complexity. 519 */ 520 for (i = 0; !!mtk_pm_idle_states[i]; i++) { 521 if (power_state == mtk_pm_idle_states[i]) 522 break; 523 } 524 525 /* Return error if entry not found in the idle state array */ 526 if (!mtk_pm_idle_states[i]) 527 return PSCI_E_INVALID_PARAMS; 528 529 i = 0; 530 state_id = psci_get_pstate_id(power_state); 531 532 /* Parse the State ID and populate the state info parameter */ 533 while (state_id) { 534 req_state->pwr_domain_state[i++] = state_id & 535 MTK_LOCAL_PSTATE_MASK; 536 state_id >>= MTK_LOCAL_PSTATE_WIDTH; 537 } 538 539 return PSCI_E_SUCCESS; 540 } 541 #endif 542 543 void mtk_system_pwr_domain_resume(void) 544 { 545 console_switch_state(CONSOLE_FLAG_BOOT); 546 547 /* Assert system power domain is available on the platform */ 548 assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 549 550 plat_arm_gic_init(); 551 552 console_switch_state(CONSOLE_FLAG_RUNTIME); 553 } 554 555 static const plat_psci_ops_t plat_plat_pm_ops = { 556 .cpu_standby = plat_cpu_standby, 557 .pwr_domain_on = plat_power_domain_on, 558 .pwr_domain_on_finish = plat_power_domain_on_finish, 559 .pwr_domain_off = plat_power_domain_off, 560 .pwr_domain_suspend = plat_power_domain_suspend, 561 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 562 .system_off = plat_system_off, 563 .system_reset = plat_system_reset, 564 .validate_power_state = plat_validate_power_state, 565 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 566 }; 567 568 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 569 const plat_psci_ops_t **psci_ops) 570 { 571 *psci_ops = &plat_plat_pm_ops; 572 secure_entrypoint = sec_entrypoint; 573 return 0; 574 } 575 576 /* 577 * The PSCI generic code uses this API to let the platform participate in state 578 * coordination during a power management operation. It compares the platform 579 * specific local power states requested by each cpu for a given power domain 580 * and returns the coordinated target power state that the domain should 581 * enter. A platform assigns a number to a local power state. This default 582 * implementation assumes that the platform assigns these numbers in order of 583 * increasing depth of the power state i.e. for two power states X & Y, if X < Y 584 * then X represents a shallower power state than Y. As a result, the 585 * coordinated target local power state for a power domain will be the minimum 586 * of the requested local power states. 587 */ 588 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 589 const plat_local_state_t *states, 590 unsigned int ncpu) 591 { 592 plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 593 594 assert(ncpu); 595 596 do { 597 temp = *states++; 598 if (temp < target) 599 target = temp; 600 } while (--ncpu); 601 602 return target; 603 } 604