1 /* 2 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <arm_gic.h> 33 #include <assert.h> 34 #include <bakery_lock.h> 35 #include <cci.h> 36 #include <console.h> 37 #include <debug.h> 38 #include <errno.h> 39 #include <mcucfg.h> 40 #include <mmio.h> 41 #include <mt8173_def.h> 42 #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 43 #include <plat_private.h> 44 #include <power_tracer.h> 45 #include <psci.h> 46 #include <rtc.h> 47 #include <scu.h> 48 #include <spm_hotplug.h> 49 #include <spm_mcdi.h> 50 #include <spm_suspend.h> 51 52 #if !ENABLE_PLAT_COMPAT 53 #define MTK_PWR_LVL0 0 54 #define MTK_PWR_LVL1 1 55 #define MTK_PWR_LVL2 2 56 57 /* Macros to read the MTK power domain state */ 58 #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 59 #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 60 #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 61 (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 62 #endif 63 64 #if PSCI_EXTENDED_STATE_ID 65 /* 66 * The table storing the valid idle power states. Ensure that the 67 * array entries are populated in ascending order of state-id to 68 * enable us to use binary search during power state validation. 69 * The table must be terminated by a NULL entry. 70 */ 71 const unsigned int mtk_pm_idle_states[] = { 72 /* State-id - 0x001 */ 73 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 74 MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 75 /* State-id - 0x002 */ 76 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 77 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 78 /* State-id - 0x022 */ 79 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 80 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 81 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 82 /* State-id - 0x222 */ 83 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 84 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 85 #endif 86 0, 87 }; 88 #endif 89 90 struct core_context { 91 unsigned long timer_data[8]; 92 unsigned int count; 93 unsigned int rst; 94 unsigned int abt; 95 unsigned int brk; 96 }; 97 98 struct cluster_context { 99 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 100 }; 101 102 /* 103 * Top level structure to hold the complete context of a multi cluster system 104 */ 105 struct system_context { 106 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 107 }; 108 109 /* 110 * Top level structure which encapsulates the context of the entire system 111 */ 112 static struct system_context dormant_data[1]; 113 114 static inline struct cluster_context *system_cluster( 115 struct system_context *system, 116 uint32_t clusterid) 117 { 118 return &system->cluster[clusterid]; 119 } 120 121 static inline struct core_context *cluster_core(struct cluster_context *cluster, 122 uint32_t cpuid) 123 { 124 return &cluster->core[cpuid]; 125 } 126 127 static struct cluster_context *get_cluster_data(unsigned long mpidr) 128 { 129 uint32_t clusterid; 130 131 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 132 133 return system_cluster(dormant_data, clusterid); 134 } 135 136 static struct core_context *get_core_data(unsigned long mpidr) 137 { 138 struct cluster_context *cluster; 139 uint32_t cpuid; 140 141 cluster = get_cluster_data(mpidr); 142 cpuid = mpidr & MPIDR_CPU_MASK; 143 144 return cluster_core(cluster, cpuid); 145 } 146 147 static void mt_save_generic_timer(unsigned long *container) 148 { 149 uint64_t ctl; 150 uint64_t val; 151 152 __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 153 "mrs %x1, cntp_cval_el0\n\t" 154 "stp %x0, %x1, [%2, #0]" 155 : "=&r" (ctl), "=&r" (val) 156 : "r" (container) 157 : "memory"); 158 159 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 160 "mrs %x1, cntp_ctl_el0\n\t" 161 "stp %x0, %x1, [%2, #16]" 162 : "=&r" (val), "=&r" (ctl) 163 : "r" (container) 164 : "memory"); 165 166 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 167 "mrs %x1, cntv_ctl_el0\n\t" 168 "stp %x0, %x1, [%2, #32]" 169 : "=&r" (val), "=&r" (ctl) 170 : "r" (container) 171 : "memory"); 172 } 173 174 static void mt_restore_generic_timer(unsigned long *container) 175 { 176 uint64_t ctl; 177 uint64_t val; 178 179 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 180 "msr cntkctl_el1, %x0\n\t" 181 "msr cntp_cval_el0, %x1" 182 : "=&r" (ctl), "=&r" (val) 183 : "r" (container) 184 : "memory"); 185 186 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 187 "msr cntp_tval_el0, %x0\n\t" 188 "msr cntp_ctl_el0, %x1" 189 : "=&r" (val), "=&r" (ctl) 190 : "r" (container) 191 : "memory"); 192 193 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 194 "msr cntv_tval_el0, %x0\n\t" 195 "msr cntv_ctl_el0, %x1" 196 : "=&r" (val), "=&r" (ctl) 197 : "r" (container) 198 : "memory"); 199 } 200 201 static inline uint64_t read_cntpctl(void) 202 { 203 uint64_t cntpctl; 204 205 __asm__ volatile("mrs %x0, cntp_ctl_el0" 206 : "=r" (cntpctl) : : "memory"); 207 208 return cntpctl; 209 } 210 211 static inline void write_cntpctl(uint64_t cntpctl) 212 { 213 __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 214 } 215 216 static void stop_generic_timer(void) 217 { 218 /* 219 * Disable the timer and mask the irq to prevent 220 * suprious interrupts on this cpu interface. It 221 * will bite us when we come back if we don't. It 222 * will be replayed on the inbound cluster. 223 */ 224 uint64_t cntpctl = read_cntpctl(); 225 226 write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 227 } 228 229 static void mt_cpu_save(unsigned long mpidr) 230 { 231 struct core_context *core; 232 233 core = get_core_data(mpidr); 234 mt_save_generic_timer(core->timer_data); 235 236 /* disable timer irq, and upper layer should enable it again. */ 237 stop_generic_timer(); 238 } 239 240 static void mt_cpu_restore(unsigned long mpidr) 241 { 242 struct core_context *core; 243 244 core = get_core_data(mpidr); 245 mt_restore_generic_timer(core->timer_data); 246 } 247 248 static void mt_platform_save_context(unsigned long mpidr) 249 { 250 /* mcusys_save_context: */ 251 mt_cpu_save(mpidr); 252 } 253 254 static void mt_platform_restore_context(unsigned long mpidr) 255 { 256 /* mcusys_restore_context: */ 257 mt_cpu_restore(mpidr); 258 } 259 260 #if ENABLE_PLAT_COMPAT 261 /******************************************************************************* 262 * Private function which is used to determine if any platform actions 263 * should be performed for the specified affinity instance given its 264 * state. Nothing needs to be done if the 'state' is not off or if this is not 265 * the highest affinity level which will enter the 'state'. 266 *******************************************************************************/ 267 static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state) 268 { 269 unsigned int max_phys_off_afflvl; 270 271 assert(afflvl <= MPIDR_AFFLVL2); 272 273 if (state != PSCI_STATE_OFF) 274 return -EAGAIN; 275 276 /* 277 * Find the highest affinity level which will be suspended and postpone 278 * all the platform specific actions until that level is hit. 279 */ 280 max_phys_off_afflvl = psci_get_max_phys_off_afflvl(); 281 assert(max_phys_off_afflvl != PSCI_INVALID_DATA); 282 if (afflvl != max_phys_off_afflvl) 283 return -EAGAIN; 284 285 return 0; 286 } 287 288 /******************************************************************************* 289 * MTK_platform handler called when an affinity instance is about to enter 290 * standby. 291 ******************************************************************************/ 292 static void plat_affinst_standby(unsigned int power_state) 293 { 294 unsigned int target_afflvl; 295 296 /* Sanity check the requested state */ 297 target_afflvl = psci_get_pstate_afflvl(power_state); 298 299 /* 300 * It's possible to enter standby only on affinity level 0 i.e. a cpu 301 * on the MTK_platform. Ignore any other affinity level. 302 */ 303 if (target_afflvl == MPIDR_AFFLVL0) { 304 /* 305 * Enter standby state. dsb is good practice before using wfi 306 * to enter low power states. 307 */ 308 dsb(); 309 wfi(); 310 } 311 } 312 #else 313 static void plat_cpu_standby(plat_local_state_t cpu_state) 314 { 315 unsigned int scr; 316 317 scr = read_scr_el3(); 318 write_scr_el3(scr | SCR_IRQ_BIT); 319 isb(); 320 dsb(); 321 wfi(); 322 write_scr_el3(scr); 323 } 324 #endif 325 326 /******************************************************************************* 327 * MTK_platform handler called when an affinity instance is about to be turned 328 * on. The level and mpidr determine the affinity instance. 329 ******************************************************************************/ 330 #if ENABLE_PLAT_COMPAT 331 static int plat_affinst_on(unsigned long mpidr, 332 unsigned long sec_entrypoint, 333 unsigned int afflvl, 334 unsigned int state) 335 { 336 int rc = PSCI_E_SUCCESS; 337 unsigned long cpu_id; 338 unsigned long cluster_id; 339 uintptr_t rv; 340 341 /* 342 * It's possible to turn on only affinity level 0 i.e. a cpu 343 * on the MTK_platform. Ignore any other affinity level. 344 */ 345 if (afflvl != MPIDR_AFFLVL0) 346 return rc; 347 348 cpu_id = mpidr & MPIDR_CPU_MASK; 349 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 350 351 if (cluster_id) 352 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 353 else 354 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 355 356 mmio_write_32(rv, sec_entrypoint); 357 INFO("mt_on[%ld:%ld], entry %x\n", 358 cluster_id, cpu_id, mmio_read_32(rv)); 359 360 spm_hotplug_on(mpidr); 361 362 return rc; 363 } 364 #else 365 static uintptr_t secure_entrypoint; 366 367 static int plat_power_domain_on(unsigned long mpidr) 368 { 369 int rc = PSCI_E_SUCCESS; 370 unsigned long cpu_id; 371 unsigned long cluster_id; 372 uintptr_t rv; 373 374 cpu_id = mpidr & MPIDR_CPU_MASK; 375 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 376 377 if (cluster_id) 378 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 379 else 380 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 381 382 mmio_write_32(rv, secure_entrypoint); 383 INFO("mt_on[%ld:%ld], entry %x\n", 384 cluster_id, cpu_id, mmio_read_32(rv)); 385 386 spm_hotplug_on(mpidr); 387 return rc; 388 } 389 #endif 390 391 /******************************************************************************* 392 * MTK_platform handler called when an affinity instance is about to be turned 393 * off. The level and mpidr determine the affinity instance. The 'state' arg. 394 * allows the platform to decide whether the cluster is being turned off and 395 * take apt actions. 396 * 397 * CAUTION: This function is called with coherent stacks so that caches can be 398 * turned off, flushed and coherency disabled. There is no guarantee that caches 399 * will remain turned on across calls to this function as each affinity level is 400 * dealt with. So do not write & read global variables across calls. It will be 401 * wise to do flush a write to the global to prevent unpredictable results. 402 ******************************************************************************/ 403 #if ENABLE_PLAT_COMPAT 404 static void plat_affinst_off(unsigned int afflvl, unsigned int state) 405 { 406 unsigned long mpidr = read_mpidr_el1(); 407 408 /* Determine if any platform actions need to be executed. */ 409 if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 410 return; 411 412 /* Prevent interrupts from spuriously waking up this cpu */ 413 arm_gic_cpuif_deactivate(); 414 415 spm_hotplug_off(mpidr); 416 417 trace_power_flow(mpidr, CPU_DOWN); 418 419 if (afflvl != MPIDR_AFFLVL0) { 420 /* Disable coherency if this cluster is to be turned off */ 421 plat_cci_disable(); 422 423 trace_power_flow(mpidr, CLUSTER_DOWN); 424 } 425 } 426 #else 427 static void plat_power_domain_off(const psci_power_state_t *state) 428 { 429 unsigned long mpidr = read_mpidr_el1(); 430 431 /* Prevent interrupts from spuriously waking up this cpu */ 432 arm_gic_cpuif_deactivate(); 433 434 spm_hotplug_off(mpidr); 435 436 trace_power_flow(mpidr, CPU_DOWN); 437 438 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 439 /* Disable coherency if this cluster is to be turned off */ 440 plat_cci_disable(); 441 442 trace_power_flow(mpidr, CLUSTER_DOWN); 443 } 444 } 445 #endif 446 447 /******************************************************************************* 448 * MTK_platform handler called when an affinity instance is about to be 449 * suspended. The level and mpidr determine the affinity instance. The 'state' 450 * arg. allows the platform to decide whether the cluster is being turned off 451 * and take apt actions. 452 * 453 * CAUTION: This function is called with coherent stacks so that caches can be 454 * turned off, flushed and coherency disabled. There is no guarantee that caches 455 * will remain turned on across calls to this function as each affinity level is 456 * dealt with. So do not write & read global variables across calls. It will be 457 * wise to do flush a write to the global to prevent unpredictable results. 458 ******************************************************************************/ 459 #if ENABLE_PLAT_COMPAT 460 static void plat_affinst_suspend(unsigned long sec_entrypoint, 461 unsigned int afflvl, 462 unsigned int state) 463 { 464 unsigned long mpidr = read_mpidr_el1(); 465 unsigned long cluster_id; 466 unsigned long cpu_id; 467 uintptr_t rv; 468 469 /* Determine if any platform actions need to be executed. */ 470 if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 471 return; 472 473 cpu_id = mpidr & MPIDR_CPU_MASK; 474 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 475 476 if (cluster_id) 477 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 478 else 479 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 480 481 mmio_write_32(rv, sec_entrypoint); 482 483 if (afflvl < MPIDR_AFFLVL2) 484 spm_mcdi_prepare_for_off_state(mpidr, afflvl); 485 486 if (afflvl >= MPIDR_AFFLVL0) 487 mt_platform_save_context(mpidr); 488 489 /* Perform the common cluster specific operations */ 490 if (afflvl >= MPIDR_AFFLVL1) { 491 /* Disable coherency if this cluster is to be turned off */ 492 plat_cci_disable(); 493 } 494 495 if (afflvl >= MPIDR_AFFLVL2) { 496 disable_scu(mpidr); 497 generic_timer_backup(); 498 spm_system_suspend(); 499 /* Prevent interrupts from spuriously waking up this cpu */ 500 arm_gic_cpuif_deactivate(); 501 } 502 } 503 #else 504 static void plat_power_domain_suspend(const psci_power_state_t *state) 505 { 506 unsigned long mpidr = read_mpidr_el1(); 507 unsigned long cluster_id; 508 unsigned long cpu_id; 509 uintptr_t rv; 510 511 cpu_id = mpidr & MPIDR_CPU_MASK; 512 cluster_id = mpidr & MPIDR_CLUSTER_MASK; 513 514 if (cluster_id) 515 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 516 else 517 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 518 519 mmio_write_32(rv, secure_entrypoint); 520 521 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 522 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 523 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 524 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 525 } 526 527 mt_platform_save_context(mpidr); 528 529 /* Perform the common cluster specific operations */ 530 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 531 /* Disable coherency if this cluster is to be turned off */ 532 plat_cci_disable(); 533 } 534 535 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 536 disable_scu(mpidr); 537 generic_timer_backup(); 538 spm_system_suspend(); 539 /* Prevent interrupts from spuriously waking up this cpu */ 540 arm_gic_cpuif_deactivate(); 541 } 542 } 543 #endif 544 545 /******************************************************************************* 546 * MTK_platform handler called when an affinity instance has just been powered 547 * on after being turned off earlier. The level and mpidr determine the affinity 548 * instance. The 'state' arg. allows the platform to decide whether the cluster 549 * was turned off prior to wakeup and do what's necessary to setup it up 550 * correctly. 551 ******************************************************************************/ 552 #if ENABLE_PLAT_COMPAT 553 static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state) 554 { 555 unsigned long mpidr = read_mpidr_el1(); 556 557 /* Determine if any platform actions need to be executed. */ 558 if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 559 return; 560 561 /* Perform the common cluster specific operations */ 562 if (afflvl >= MPIDR_AFFLVL1) { 563 /* Enable coherency if this cluster was off */ 564 plat_cci_enable(); 565 trace_power_flow(mpidr, CLUSTER_UP); 566 } 567 568 /* Enable the gic cpu interface */ 569 arm_gic_cpuif_setup(); 570 arm_gic_pcpu_distif_setup(); 571 trace_power_flow(mpidr, CPU_UP); 572 } 573 #else 574 void mtk_system_pwr_domain_resume(void); 575 576 static void plat_power_domain_on_finish(const psci_power_state_t *state) 577 { 578 unsigned long mpidr = read_mpidr_el1(); 579 580 assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 581 582 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 583 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 584 mtk_system_pwr_domain_resume(); 585 586 if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 587 plat_cci_enable(); 588 trace_power_flow(mpidr, CLUSTER_UP); 589 } 590 591 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 592 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 593 return; 594 595 /* Enable the gic cpu interface */ 596 arm_gic_cpuif_setup(); 597 arm_gic_pcpu_distif_setup(); 598 trace_power_flow(mpidr, CPU_UP); 599 } 600 #endif 601 602 /******************************************************************************* 603 * MTK_platform handler called when an affinity instance has just been powered 604 * on after having been suspended earlier. The level and mpidr determine the 605 * affinity instance. 606 ******************************************************************************/ 607 #if ENABLE_PLAT_COMPAT 608 static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state) 609 { 610 unsigned long mpidr = read_mpidr_el1(); 611 612 /* Determine if any platform actions need to be executed. */ 613 if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 614 return; 615 616 if (afflvl >= MPIDR_AFFLVL2) { 617 /* Enable the gic cpu interface */ 618 arm_gic_setup(); 619 arm_gic_cpuif_setup(); 620 spm_system_suspend_finish(); 621 enable_scu(mpidr); 622 } 623 624 /* Perform the common cluster specific operations */ 625 if (afflvl >= MPIDR_AFFLVL1) { 626 /* Enable coherency if this cluster was off */ 627 plat_cci_enable(); 628 } 629 630 if (afflvl >= MPIDR_AFFLVL0) 631 mt_platform_restore_context(mpidr); 632 633 if (afflvl < MPIDR_AFFLVL2) 634 spm_mcdi_finish_for_on_state(mpidr, afflvl); 635 636 arm_gic_pcpu_distif_setup(); 637 } 638 #else 639 static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 640 { 641 unsigned long mpidr = read_mpidr_el1(); 642 643 if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 644 return; 645 646 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 647 /* Enable the gic cpu interface */ 648 arm_gic_setup(); 649 arm_gic_cpuif_setup(); 650 spm_system_suspend_finish(); 651 enable_scu(mpidr); 652 } 653 654 /* Perform the common cluster specific operations */ 655 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 656 /* Enable coherency if this cluster was off */ 657 plat_cci_enable(); 658 } 659 660 mt_platform_restore_context(mpidr); 661 662 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 663 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 664 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 665 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 666 } 667 668 arm_gic_pcpu_distif_setup(); 669 } 670 #endif 671 672 #if ENABLE_PLAT_COMPAT 673 static unsigned int plat_get_sys_suspend_power_state(void) 674 { 675 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */ 676 return psci_make_powerstate(0, 1, 2); 677 } 678 #else 679 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 680 { 681 assert(PLAT_MAX_PWR_LVL >= 2); 682 683 for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 684 req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 685 } 686 #endif 687 688 /******************************************************************************* 689 * MTK handlers to shutdown/reboot the system 690 ******************************************************************************/ 691 static void __dead2 plat_system_off(void) 692 { 693 INFO("MTK System Off\n"); 694 695 rtc_bbpu_power_down(); 696 697 wfi(); 698 ERROR("MTK System Off: operation not handled.\n"); 699 panic(); 700 } 701 702 static void __dead2 plat_system_reset(void) 703 { 704 /* Write the System Configuration Control Register */ 705 INFO("MTK System Reset\n"); 706 707 mmio_clrsetbits_32(MTK_WDT_BASE, 708 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ), 709 MTK_WDT_MODE_KEY); 710 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN)); 711 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY); 712 713 wfi(); 714 ERROR("MTK System Reset: operation not handled.\n"); 715 panic(); 716 } 717 718 #if !ENABLE_PLAT_COMPAT 719 #if !PSCI_EXTENDED_STATE_ID 720 static int plat_validate_power_state(unsigned int power_state, 721 psci_power_state_t *req_state) 722 { 723 int pstate = psci_get_pstate_type(power_state); 724 int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 725 int i; 726 727 assert(req_state); 728 729 if (pwr_lvl > PLAT_MAX_PWR_LVL) 730 return PSCI_E_INVALID_PARAMS; 731 732 /* Sanity check the requested state */ 733 if (pstate == PSTATE_TYPE_STANDBY) { 734 /* 735 * It's possible to enter standby only on power level 0 736 * Ignore any other power level. 737 */ 738 if (pwr_lvl != 0) 739 return PSCI_E_INVALID_PARAMS; 740 741 req_state->pwr_domain_state[MTK_PWR_LVL0] = 742 MTK_LOCAL_STATE_RET; 743 } else { 744 for (i = 0; i <= pwr_lvl; i++) 745 req_state->pwr_domain_state[i] = 746 MTK_LOCAL_STATE_OFF; 747 } 748 749 /* 750 * We expect the 'state id' to be zero. 751 */ 752 if (psci_get_pstate_id(power_state)) 753 return PSCI_E_INVALID_PARAMS; 754 755 return PSCI_E_SUCCESS; 756 } 757 #else 758 int plat_validate_power_state(unsigned int power_state, 759 psci_power_state_t *req_state) 760 { 761 unsigned int state_id; 762 int i; 763 764 assert(req_state); 765 766 /* 767 * Currently we are using a linear search for finding the matching 768 * entry in the idle power state array. This can be made a binary 769 * search if the number of entries justify the additional complexity. 770 */ 771 for (i = 0; !!mtk_pm_idle_states[i]; i++) { 772 if (power_state == mtk_pm_idle_states[i]) 773 break; 774 } 775 776 /* Return error if entry not found in the idle state array */ 777 if (!mtk_pm_idle_states[i]) 778 return PSCI_E_INVALID_PARAMS; 779 780 i = 0; 781 state_id = psci_get_pstate_id(power_state); 782 783 /* Parse the State ID and populate the state info parameter */ 784 while (state_id) { 785 req_state->pwr_domain_state[i++] = state_id & 786 MTK_LOCAL_PSTATE_MASK; 787 state_id >>= MTK_LOCAL_PSTATE_WIDTH; 788 } 789 790 return PSCI_E_SUCCESS; 791 } 792 #endif 793 794 void mtk_system_pwr_domain_resume(void) 795 { 796 console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE); 797 798 /* Assert system power domain is available on the platform */ 799 assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 800 801 arm_gic_cpuif_setup(); 802 arm_gic_pcpu_distif_setup(); 803 } 804 #endif 805 806 #if ENABLE_PLAT_COMPAT 807 /******************************************************************************* 808 * Export the platform handlers to enable psci to invoke them 809 ******************************************************************************/ 810 static const plat_pm_ops_t plat_plat_pm_ops = { 811 .affinst_standby = plat_affinst_standby, 812 .affinst_on = plat_affinst_on, 813 .affinst_off = plat_affinst_off, 814 .affinst_suspend = plat_affinst_suspend, 815 .affinst_on_finish = plat_affinst_on_finish, 816 .affinst_suspend_finish = plat_affinst_suspend_finish, 817 .system_off = plat_system_off, 818 .system_reset = plat_system_reset, 819 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 820 }; 821 822 /******************************************************************************* 823 * Export the platform specific power ops & initialize the mtk_platform power 824 * controller 825 ******************************************************************************/ 826 int platform_setup_pm(const plat_pm_ops_t **plat_ops) 827 { 828 *plat_ops = &plat_plat_pm_ops; 829 return 0; 830 } 831 #else 832 static const plat_psci_ops_t plat_plat_pm_ops = { 833 .cpu_standby = plat_cpu_standby, 834 .pwr_domain_on = plat_power_domain_on, 835 .pwr_domain_on_finish = plat_power_domain_on_finish, 836 .pwr_domain_off = plat_power_domain_off, 837 .pwr_domain_suspend = plat_power_domain_suspend, 838 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 839 .system_off = plat_system_off, 840 .system_reset = plat_system_reset, 841 .validate_power_state = plat_validate_power_state, 842 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 843 }; 844 845 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 846 const plat_psci_ops_t **psci_ops) 847 { 848 *psci_ops = &plat_plat_pm_ops; 849 secure_entrypoint = sec_entrypoint; 850 return 0; 851 } 852 853 /* 854 * The PSCI generic code uses this API to let the platform participate in state 855 * coordination during a power management operation. It compares the platform 856 * specific local power states requested by each cpu for a given power domain 857 * and returns the coordinated target power state that the domain should 858 * enter. A platform assigns a number to a local power state. This default 859 * implementation assumes that the platform assigns these numbers in order of 860 * increasing depth of the power state i.e. for two power states X & Y, if X < Y 861 * then X represents a shallower power state than Y. As a result, the 862 * coordinated target local power state for a power domain will be the minimum 863 * of the requested local power states. 864 */ 865 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 866 const plat_local_state_t *states, 867 unsigned int ncpu) 868 { 869 plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 870 871 assert(ncpu); 872 873 do { 874 temp = *states++; 875 if (temp < target) 876 target = temp; 877 } while (--ncpu); 878 879 return target; 880 } 881 #endif 882