1 /* 2 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <drivers/arm/cci.h> 13 #include <drivers/arm/gicv2.h> 14 #include <drivers/ti/uart/uart_16550.h> 15 #include <lib/bakery_lock.h> 16 #include <lib/mmio.h> 17 #include <lib/psci/psci.h> 18 #include <plat/arm/common/plat_arm.h> 19 20 #include <mcucfg.h> 21 #include <mt8173_def.h> 22 #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 23 #include <plat_private.h> 24 #include <power_tracer.h> 25 #include <rtc.h> 26 #include <scu.h> 27 #include <spm_hotplug.h> 28 #include <spm_mcdi.h> 29 #include <spm_suspend.h> 30 31 #define MTK_PWR_LVL0 0 32 #define MTK_PWR_LVL1 1 33 #define MTK_PWR_LVL2 2 34 35 /* Macros to read the MTK power domain state */ 36 #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 37 #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 38 #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 39 (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 40 41 #if PSCI_EXTENDED_STATE_ID 42 /* 43 * The table storing the valid idle power states. Ensure that the 44 * array entries are populated in ascending order of state-id to 45 * enable us to use binary search during power state validation. 46 * The table must be terminated by a NULL entry. 47 */ 48 const unsigned int mtk_pm_idle_states[] = { 49 /* State-id - 0x001 */ 50 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 51 MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 52 /* State-id - 0x002 */ 53 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 54 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 55 /* State-id - 0x022 */ 56 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 57 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 58 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 59 /* State-id - 0x222 */ 60 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 61 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 62 #endif 63 0, 64 }; 65 #endif 66 67 struct core_context { 68 unsigned long timer_data[8]; 69 unsigned int count; 70 unsigned int rst; 71 unsigned int abt; 72 unsigned int brk; 73 }; 74 75 struct cluster_context { 76 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 77 }; 78 79 /* 80 * Top level structure to hold the complete context of a multi cluster system 81 */ 82 struct system_context { 83 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 84 }; 85 86 /* 87 * Top level structure which encapsulates the context of the entire system 88 */ 89 static struct system_context dormant_data[1]; 90 91 static inline struct cluster_context *system_cluster( 92 struct system_context *system, 93 uint32_t clusterid) 94 { 95 return &system->cluster[clusterid]; 96 } 97 98 static inline struct core_context *cluster_core(struct cluster_context *cluster, 99 uint32_t cpuid) 100 { 101 return &cluster->core[cpuid]; 102 } 103 104 static struct cluster_context *get_cluster_data(unsigned long mpidr) 105 { 106 uint32_t clusterid; 107 108 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 109 110 return system_cluster(dormant_data, clusterid); 111 } 112 113 static struct core_context *get_core_data(unsigned long mpidr) 114 { 115 struct cluster_context *cluster; 116 uint32_t cpuid; 117 118 cluster = get_cluster_data(mpidr); 119 cpuid = mpidr & MPIDR_CPU_MASK; 120 121 return cluster_core(cluster, cpuid); 122 } 123 124 static void mt_save_generic_timer(unsigned long *container) 125 { 126 uint64_t ctl; 127 uint64_t val; 128 129 __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 130 "mrs %x1, cntp_cval_el0\n\t" 131 "stp %x0, %x1, [%2, #0]" 132 : "=&r" (ctl), "=&r" (val) 133 : "r" (container) 134 : "memory"); 135 136 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 137 "mrs %x1, cntp_ctl_el0\n\t" 138 "stp %x0, %x1, [%2, #16]" 139 : "=&r" (val), "=&r" (ctl) 140 : "r" (container) 141 : "memory"); 142 143 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 144 "mrs %x1, cntv_ctl_el0\n\t" 145 "stp %x0, %x1, [%2, #32]" 146 : "=&r" (val), "=&r" (ctl) 147 : "r" (container) 148 : "memory"); 149 } 150 151 static void mt_restore_generic_timer(unsigned long *container) 152 { 153 uint64_t ctl; 154 uint64_t val; 155 156 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 157 "msr cntkctl_el1, %x0\n\t" 158 "msr cntp_cval_el0, %x1" 159 : "=&r" (ctl), "=&r" (val) 160 : "r" (container) 161 : "memory"); 162 163 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 164 "msr cntp_tval_el0, %x0\n\t" 165 "msr cntp_ctl_el0, %x1" 166 : "=&r" (val), "=&r" (ctl) 167 : "r" (container) 168 : "memory"); 169 170 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 171 "msr cntv_tval_el0, %x0\n\t" 172 "msr cntv_ctl_el0, %x1" 173 : "=&r" (val), "=&r" (ctl) 174 : "r" (container) 175 : "memory"); 176 } 177 178 static inline uint64_t read_cntpctl(void) 179 { 180 uint64_t cntpctl; 181 182 __asm__ volatile("mrs %x0, cntp_ctl_el0" 183 : "=r" (cntpctl) : : "memory"); 184 185 return cntpctl; 186 } 187 188 static inline void write_cntpctl(uint64_t cntpctl) 189 { 190 __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 191 } 192 193 static void stop_generic_timer(void) 194 { 195 /* 196 * Disable the timer and mask the irq to prevent 197 * suprious interrupts on this cpu interface. It 198 * will bite us when we come back if we don't. It 199 * will be replayed on the inbound cluster. 200 */ 201 uint64_t cntpctl = read_cntpctl(); 202 203 write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 204 } 205 206 static void mt_cpu_save(unsigned long mpidr) 207 { 208 struct core_context *core; 209 210 core = get_core_data(mpidr); 211 mt_save_generic_timer(core->timer_data); 212 213 /* disable timer irq, and upper layer should enable it again. */ 214 stop_generic_timer(); 215 } 216 217 static void mt_cpu_restore(unsigned long mpidr) 218 { 219 struct core_context *core; 220 221 core = get_core_data(mpidr); 222 mt_restore_generic_timer(core->timer_data); 223 } 224 225 static void mt_platform_save_context(unsigned long mpidr) 226 { 227 /* mcusys_save_context: */ 228 mt_cpu_save(mpidr); 229 } 230 231 static void mt_platform_restore_context(unsigned long mpidr) 232 { 233 /* mcusys_restore_context: */ 234 mt_cpu_restore(mpidr); 235 } 236 237 static void plat_cpu_standby(plat_local_state_t cpu_state) 238 { 239 unsigned int scr; 240 241 scr = read_scr_el3(); 242 write_scr_el3(scr | SCR_IRQ_BIT); 243 isb(); 244 dsb(); 245 wfi(); 246 write_scr_el3(scr); 247 } 248 249 /******************************************************************************* 250 * MTK_platform handler called when an affinity instance is about to be turned 251 * on. The level and mpidr determine the affinity instance. 252 ******************************************************************************/ 253 static uintptr_t secure_entrypoint; 254 255 static int plat_power_domain_on(unsigned long mpidr) 256 { 257 int rc = PSCI_E_SUCCESS; 258 unsigned long cpu_id; 259 unsigned long cluster_id; 260 uintptr_t rv; 261 262 cpu_id = mpidr & MPIDR_CPU_MASK; 263 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 264 265 if (cluster_id) 266 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 267 else 268 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 269 270 mmio_write_32(rv, secure_entrypoint); 271 INFO("mt_on[%ld:%ld], entry %x\n", 272 cluster_id, cpu_id, mmio_read_32(rv)); 273 274 spm_hotplug_on(mpidr); 275 return rc; 276 } 277 278 /******************************************************************************* 279 * MTK_platform handler called when an affinity instance is about to be turned 280 * off. The level and mpidr determine the affinity instance. The 'state' arg. 281 * allows the platform to decide whether the cluster is being turned off and 282 * take apt actions. 283 * 284 * CAUTION: This function is called with coherent stacks so that caches can be 285 * turned off, flushed and coherency disabled. There is no guarantee that caches 286 * will remain turned on across calls to this function as each affinity level is 287 * dealt with. So do not write & read global variables across calls. It will be 288 * wise to do flush a write to the global to prevent unpredictable results. 289 ******************************************************************************/ 290 static void plat_power_domain_off(const psci_power_state_t *state) 291 { 292 unsigned long mpidr = read_mpidr_el1(); 293 294 /* Prevent interrupts from spuriously waking up this cpu */ 295 gicv2_cpuif_disable(); 296 297 spm_hotplug_off(mpidr); 298 299 trace_power_flow(mpidr, CPU_DOWN); 300 301 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 302 /* Disable coherency if this cluster is to be turned off */ 303 plat_cci_disable(); 304 305 trace_power_flow(mpidr, CLUSTER_DOWN); 306 } 307 } 308 309 /******************************************************************************* 310 * MTK_platform handler called when an affinity instance is about to be 311 * suspended. The level and mpidr determine the affinity instance. The 'state' 312 * arg. allows the platform to decide whether the cluster is being turned off 313 * and take apt actions. 314 * 315 * CAUTION: This function is called with coherent stacks so that caches can be 316 * turned off, flushed and coherency disabled. There is no guarantee that caches 317 * will remain turned on across calls to this function as each affinity level is 318 * dealt with. So do not write & read global variables across calls. It will be 319 * wise to do flush a write to the global to prevent unpredictable results. 320 ******************************************************************************/ 321 static void plat_power_domain_suspend(const psci_power_state_t *state) 322 { 323 unsigned long mpidr = read_mpidr_el1(); 324 unsigned long cluster_id; 325 unsigned long cpu_id; 326 uintptr_t rv; 327 328 cpu_id = mpidr & MPIDR_CPU_MASK; 329 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 330 331 if (cluster_id) 332 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 333 else 334 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 335 336 mmio_write_32(rv, secure_entrypoint); 337 338 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 339 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 340 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 341 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 342 } 343 344 mt_platform_save_context(mpidr); 345 346 /* Perform the common cluster specific operations */ 347 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 348 /* Disable coherency if this cluster is to be turned off */ 349 plat_cci_disable(); 350 } 351 352 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 353 disable_scu(mpidr); 354 generic_timer_backup(); 355 spm_system_suspend(); 356 /* Prevent interrupts from spuriously waking up this cpu */ 357 gicv2_cpuif_disable(); 358 } 359 } 360 361 /******************************************************************************* 362 * MTK_platform handler called when an affinity instance has just been powered 363 * on after being turned off earlier. The level and mpidr determine the affinity 364 * instance. The 'state' arg. allows the platform to decide whether the cluster 365 * was turned off prior to wakeup and do what's necessary to setup it up 366 * correctly. 367 ******************************************************************************/ 368 void mtk_system_pwr_domain_resume(void); 369 370 static void plat_power_domain_on_finish(const psci_power_state_t *state) 371 { 372 unsigned long mpidr = read_mpidr_el1(); 373 374 assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 375 376 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 377 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 378 mtk_system_pwr_domain_resume(); 379 380 if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 381 plat_cci_enable(); 382 trace_power_flow(mpidr, CLUSTER_UP); 383 } 384 385 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 386 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 387 return; 388 389 /* Enable the gic cpu interface */ 390 gicv2_cpuif_enable(); 391 gicv2_pcpu_distif_init(); 392 trace_power_flow(mpidr, CPU_UP); 393 } 394 395 /******************************************************************************* 396 * MTK_platform handler called when an affinity instance has just been powered 397 * on after having been suspended earlier. The level and mpidr determine the 398 * affinity instance. 399 ******************************************************************************/ 400 static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 401 { 402 unsigned long mpidr = read_mpidr_el1(); 403 404 if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 405 return; 406 407 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 408 /* Enable the gic cpu interface */ 409 plat_arm_gic_init(); 410 spm_system_suspend_finish(); 411 enable_scu(mpidr); 412 } 413 414 /* Perform the common cluster specific operations */ 415 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 416 /* Enable coherency if this cluster was off */ 417 plat_cci_enable(); 418 } 419 420 mt_platform_restore_context(mpidr); 421 422 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 423 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 424 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 425 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 426 } 427 428 gicv2_pcpu_distif_init(); 429 } 430 431 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 432 { 433 assert(PLAT_MAX_PWR_LVL >= 2); 434 435 for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 436 req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 437 } 438 439 /******************************************************************************* 440 * MTK handlers to shutdown/reboot the system 441 ******************************************************************************/ 442 static void __dead2 plat_system_off(void) 443 { 444 INFO("MTK System Off\n"); 445 446 rtc_bbpu_power_down(); 447 448 wfi(); 449 ERROR("MTK System Off: operation not handled.\n"); 450 panic(); 451 } 452 453 static void __dead2 plat_system_reset(void) 454 { 455 /* Write the System Configuration Control Register */ 456 INFO("MTK System Reset\n"); 457 458 mmio_clrsetbits_32(MTK_WDT_BASE, 459 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ), 460 MTK_WDT_MODE_KEY); 461 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN)); 462 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY); 463 464 wfi(); 465 ERROR("MTK System Reset: operation not handled.\n"); 466 panic(); 467 } 468 469 #if !PSCI_EXTENDED_STATE_ID 470 static int plat_validate_power_state(unsigned int power_state, 471 psci_power_state_t *req_state) 472 { 473 int pstate = psci_get_pstate_type(power_state); 474 int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 475 int i; 476 477 assert(req_state); 478 479 if (pwr_lvl > PLAT_MAX_PWR_LVL) 480 return PSCI_E_INVALID_PARAMS; 481 482 /* Sanity check the requested state */ 483 if (pstate == PSTATE_TYPE_STANDBY) { 484 /* 485 * It's possible to enter standby only on power level 0 486 * Ignore any other power level. 487 */ 488 if (pwr_lvl != 0) 489 return PSCI_E_INVALID_PARAMS; 490 491 req_state->pwr_domain_state[MTK_PWR_LVL0] = 492 MTK_LOCAL_STATE_RET; 493 } else { 494 for (i = 0; i <= pwr_lvl; i++) 495 req_state->pwr_domain_state[i] = 496 MTK_LOCAL_STATE_OFF; 497 } 498 499 /* 500 * We expect the 'state id' to be zero. 501 */ 502 if (psci_get_pstate_id(power_state)) 503 return PSCI_E_INVALID_PARAMS; 504 505 return PSCI_E_SUCCESS; 506 } 507 #else 508 int plat_validate_power_state(unsigned int power_state, 509 psci_power_state_t *req_state) 510 { 511 unsigned int state_id; 512 int i; 513 514 assert(req_state); 515 516 /* 517 * Currently we are using a linear search for finding the matching 518 * entry in the idle power state array. This can be made a binary 519 * search if the number of entries justify the additional complexity. 520 */ 521 for (i = 0; !!mtk_pm_idle_states[i]; i++) { 522 if (power_state == mtk_pm_idle_states[i]) 523 break; 524 } 525 526 /* Return error if entry not found in the idle state array */ 527 if (!mtk_pm_idle_states[i]) 528 return PSCI_E_INVALID_PARAMS; 529 530 i = 0; 531 state_id = psci_get_pstate_id(power_state); 532 533 /* Parse the State ID and populate the state info parameter */ 534 while (state_id) { 535 req_state->pwr_domain_state[i++] = state_id & 536 MTK_LOCAL_PSTATE_MASK; 537 state_id >>= MTK_LOCAL_PSTATE_WIDTH; 538 } 539 540 return PSCI_E_SUCCESS; 541 } 542 #endif 543 544 void mtk_system_pwr_domain_resume(void) 545 { 546 console_switch_state(CONSOLE_FLAG_BOOT); 547 548 /* Assert system power domain is available on the platform */ 549 assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 550 551 plat_arm_gic_init(); 552 553 console_switch_state(CONSOLE_FLAG_RUNTIME); 554 } 555 556 static const plat_psci_ops_t plat_plat_pm_ops = { 557 .cpu_standby = plat_cpu_standby, 558 .pwr_domain_on = plat_power_domain_on, 559 .pwr_domain_on_finish = plat_power_domain_on_finish, 560 .pwr_domain_off = plat_power_domain_off, 561 .pwr_domain_suspend = plat_power_domain_suspend, 562 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 563 .system_off = plat_system_off, 564 .system_reset = plat_system_reset, 565 .validate_power_state = plat_validate_power_state, 566 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 567 }; 568 569 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 570 const plat_psci_ops_t **psci_ops) 571 { 572 *psci_ops = &plat_plat_pm_ops; 573 secure_entrypoint = sec_entrypoint; 574 return 0; 575 } 576 577 /* 578 * The PSCI generic code uses this API to let the platform participate in state 579 * coordination during a power management operation. It compares the platform 580 * specific local power states requested by each cpu for a given power domain 581 * and returns the coordinated target power state that the domain should 582 * enter. A platform assigns a number to a local power state. This default 583 * implementation assumes that the platform assigns these numbers in order of 584 * increasing depth of the power state i.e. for two power states X & Y, if X < Y 585 * then X represents a shallower power state than Y. As a result, the 586 * coordinated target local power state for a power domain will be the minimum 587 * of the requested local power states. 588 */ 589 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 590 const plat_local_state_t *states, 591 unsigned int ncpu) 592 { 593 plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 594 595 assert(ncpu); 596 597 do { 598 temp = *states++; 599 if (temp < target) 600 target = temp; 601 } while (--ncpu); 602 603 return target; 604 } 605