17d116dccSCC Ma /* 27d116dccSCC Ma * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. 37d116dccSCC Ma * 47d116dccSCC Ma * Redistribution and use in source and binary forms, with or without 57d116dccSCC Ma * modification, are permitted provided that the following conditions are met: 67d116dccSCC Ma * 77d116dccSCC Ma * Redistributions of source code must retain the above copyright notice, this 87d116dccSCC Ma * list of conditions and the following disclaimer. 97d116dccSCC Ma * 107d116dccSCC Ma * Redistributions in binary form must reproduce the above copyright notice, 117d116dccSCC Ma * this list of conditions and the following disclaimer in the documentation 127d116dccSCC Ma * and/or other materials provided with the distribution. 137d116dccSCC Ma * 147d116dccSCC Ma * Neither the name of ARM nor the names of its contributors may be used 157d116dccSCC Ma * to endorse or promote products derived from this software without specific 167d116dccSCC Ma * prior written permission. 177d116dccSCC Ma * 187d116dccSCC Ma * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 197d116dccSCC Ma * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 207d116dccSCC Ma * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 217d116dccSCC Ma * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 227d116dccSCC Ma * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 237d116dccSCC Ma * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 247d116dccSCC Ma * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 257d116dccSCC Ma * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 267d116dccSCC Ma * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 277d116dccSCC Ma * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 287d116dccSCC Ma * POSSIBILITY OF SUCH DAMAGE. 297d116dccSCC Ma */ 307d116dccSCC Ma 317d116dccSCC Ma #include <arch_helpers.h> 327d116dccSCC Ma #include <arm_gic.h> 337d116dccSCC Ma #include <assert.h> 347d116dccSCC Ma #include <bakery_lock.h> 357d116dccSCC Ma #include <cci.h> 367d116dccSCC Ma #include <console.h> 377d116dccSCC Ma #include <debug.h> 387d116dccSCC Ma #include <errno.h> 397d116dccSCC Ma #include <mcucfg.h> 407d116dccSCC Ma #include <mmio.h> 417d116dccSCC Ma #include <mt8173_def.h> 427d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 437d116dccSCC Ma #include <plat_private.h> 447d116dccSCC Ma #include <power_tracer.h> 457d116dccSCC Ma #include <psci.h> 467d116dccSCC Ma #include <rtc.h> 477d116dccSCC Ma #include <scu.h> 487d116dccSCC Ma #include <spm_hotplug.h> 497d116dccSCC Ma #include <spm_mcdi.h> 507d116dccSCC Ma #include <spm_suspend.h> 517d116dccSCC Ma 52*3fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT 53*3fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL0 0 54*3fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL1 1 55*3fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL2 2 56*3fc26aa0SKoan-Sin Tan 57*3fc26aa0SKoan-Sin Tan /* Macros to read the MTK power domain state */ 58*3fc26aa0SKoan-Sin Tan #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 59*3fc26aa0SKoan-Sin Tan #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 60*3fc26aa0SKoan-Sin Tan #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 61*3fc26aa0SKoan-Sin Tan (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 62*3fc26aa0SKoan-Sin Tan #endif 63*3fc26aa0SKoan-Sin Tan 647d116dccSCC Ma struct core_context { 657d116dccSCC Ma unsigned long timer_data[8]; 667d116dccSCC Ma unsigned int count; 677d116dccSCC Ma unsigned int rst; 687d116dccSCC Ma unsigned int abt; 697d116dccSCC Ma unsigned int brk; 707d116dccSCC Ma }; 717d116dccSCC Ma 727d116dccSCC Ma struct cluster_context { 737d116dccSCC Ma struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 747d116dccSCC Ma }; 757d116dccSCC Ma 767d116dccSCC Ma /* 777d116dccSCC Ma * Top level structure to hold the complete context of a multi cluster system 787d116dccSCC Ma */ 797d116dccSCC Ma struct system_context { 807d116dccSCC Ma struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 817d116dccSCC Ma }; 827d116dccSCC Ma 837d116dccSCC Ma /* 847d116dccSCC Ma * Top level structure which encapsulates the context of the entire system 857d116dccSCC Ma */ 867d116dccSCC Ma static struct system_context dormant_data[1]; 877d116dccSCC Ma 887d116dccSCC Ma static inline struct cluster_context *system_cluster( 897d116dccSCC Ma struct system_context *system, 907d116dccSCC Ma uint32_t clusterid) 917d116dccSCC Ma { 927d116dccSCC Ma return &system->cluster[clusterid]; 937d116dccSCC Ma } 947d116dccSCC Ma 957d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster, 967d116dccSCC Ma uint32_t cpuid) 977d116dccSCC Ma { 987d116dccSCC Ma return &cluster->core[cpuid]; 997d116dccSCC Ma } 1007d116dccSCC Ma 1017d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr) 1027d116dccSCC Ma { 1037d116dccSCC Ma uint32_t clusterid; 1047d116dccSCC Ma 1057d116dccSCC Ma clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 1067d116dccSCC Ma 1077d116dccSCC Ma return system_cluster(dormant_data, clusterid); 1087d116dccSCC Ma } 1097d116dccSCC Ma 1107d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr) 1117d116dccSCC Ma { 1127d116dccSCC Ma struct cluster_context *cluster; 1137d116dccSCC Ma uint32_t cpuid; 1147d116dccSCC Ma 1157d116dccSCC Ma cluster = get_cluster_data(mpidr); 1167d116dccSCC Ma cpuid = mpidr & MPIDR_CPU_MASK; 1177d116dccSCC Ma 1187d116dccSCC Ma return cluster_core(cluster, cpuid); 1197d116dccSCC Ma } 1207d116dccSCC Ma 1217d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container) 1227d116dccSCC Ma { 1237d116dccSCC Ma uint64_t ctl; 1247d116dccSCC Ma uint64_t val; 1257d116dccSCC Ma 1267d116dccSCC Ma __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 1277d116dccSCC Ma "mrs %x1, cntp_cval_el0\n\t" 1287d116dccSCC Ma "stp %x0, %x1, [%2, #0]" 1297d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1307d116dccSCC Ma : "r" (container) 1317d116dccSCC Ma : "memory"); 1327d116dccSCC Ma 1337d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 1347d116dccSCC Ma "mrs %x1, cntp_ctl_el0\n\t" 1357d116dccSCC Ma "stp %x0, %x1, [%2, #16]" 1367d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1377d116dccSCC Ma : "r" (container) 1387d116dccSCC Ma : "memory"); 1397d116dccSCC Ma 1407d116dccSCC Ma __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 1417d116dccSCC Ma "mrs %x1, cntv_ctl_el0\n\t" 1427d116dccSCC Ma "stp %x0, %x1, [%2, #32]" 1437d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1447d116dccSCC Ma : "r" (container) 1457d116dccSCC Ma : "memory"); 1467d116dccSCC Ma } 1477d116dccSCC Ma 1487d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container) 1497d116dccSCC Ma { 1507d116dccSCC Ma uint64_t ctl; 1517d116dccSCC Ma uint64_t val; 1527d116dccSCC Ma 1537d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 1547d116dccSCC Ma "msr cntkctl_el1, %x0\n\t" 1557d116dccSCC Ma "msr cntp_cval_el0, %x1" 1567d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1577d116dccSCC Ma : "r" (container) 1587d116dccSCC Ma : "memory"); 1597d116dccSCC Ma 1607d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 1617d116dccSCC Ma "msr cntp_tval_el0, %x0\n\t" 1627d116dccSCC Ma "msr cntp_ctl_el0, %x1" 1637d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1647d116dccSCC Ma : "r" (container) 1657d116dccSCC Ma : "memory"); 1667d116dccSCC Ma 1677d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 1687d116dccSCC Ma "msr cntv_tval_el0, %x0\n\t" 1697d116dccSCC Ma "msr cntv_ctl_el0, %x1" 1707d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1717d116dccSCC Ma : "r" (container) 1727d116dccSCC Ma : "memory"); 1737d116dccSCC Ma } 1747d116dccSCC Ma 1757d116dccSCC Ma static inline uint64_t read_cntpctl(void) 1767d116dccSCC Ma { 1777d116dccSCC Ma uint64_t cntpctl; 1787d116dccSCC Ma 1797d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_ctl_el0" 1807d116dccSCC Ma : "=r" (cntpctl) : : "memory"); 1817d116dccSCC Ma 1827d116dccSCC Ma return cntpctl; 1837d116dccSCC Ma } 1847d116dccSCC Ma 1857d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl) 1867d116dccSCC Ma { 1877d116dccSCC Ma __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 1887d116dccSCC Ma } 1897d116dccSCC Ma 1907d116dccSCC Ma static void stop_generic_timer(void) 1917d116dccSCC Ma { 1927d116dccSCC Ma /* 1937d116dccSCC Ma * Disable the timer and mask the irq to prevent 1947d116dccSCC Ma * suprious interrupts on this cpu interface. It 1957d116dccSCC Ma * will bite us when we come back if we don't. It 1967d116dccSCC Ma * will be replayed on the inbound cluster. 1977d116dccSCC Ma */ 1987d116dccSCC Ma uint64_t cntpctl = read_cntpctl(); 1997d116dccSCC Ma 2007d116dccSCC Ma write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 2017d116dccSCC Ma } 2027d116dccSCC Ma 2037d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr) 2047d116dccSCC Ma { 2057d116dccSCC Ma struct core_context *core; 2067d116dccSCC Ma 2077d116dccSCC Ma core = get_core_data(mpidr); 2087d116dccSCC Ma mt_save_generic_timer(core->timer_data); 2097d116dccSCC Ma 2107d116dccSCC Ma /* disable timer irq, and upper layer should enable it again. */ 2117d116dccSCC Ma stop_generic_timer(); 2127d116dccSCC Ma } 2137d116dccSCC Ma 2147d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr) 2157d116dccSCC Ma { 2167d116dccSCC Ma struct core_context *core; 2177d116dccSCC Ma 2187d116dccSCC Ma core = get_core_data(mpidr); 2197d116dccSCC Ma mt_restore_generic_timer(core->timer_data); 2207d116dccSCC Ma } 2217d116dccSCC Ma 2227d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr) 2237d116dccSCC Ma { 2247d116dccSCC Ma /* mcusys_save_context: */ 2257d116dccSCC Ma mt_cpu_save(mpidr); 2267d116dccSCC Ma } 2277d116dccSCC Ma 2287d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr) 2297d116dccSCC Ma { 2307d116dccSCC Ma /* mcusys_restore_context: */ 2317d116dccSCC Ma mt_cpu_restore(mpidr); 2327d116dccSCC Ma } 2337d116dccSCC Ma 234*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 2357d116dccSCC Ma /******************************************************************************* 2367d116dccSCC Ma * Private function which is used to determine if any platform actions 2377d116dccSCC Ma * should be performed for the specified affinity instance given its 2387d116dccSCC Ma * state. Nothing needs to be done if the 'state' is not off or if this is not 2397d116dccSCC Ma * the highest affinity level which will enter the 'state'. 2407d116dccSCC Ma *******************************************************************************/ 2417d116dccSCC Ma static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state) 2427d116dccSCC Ma { 2437d116dccSCC Ma unsigned int max_phys_off_afflvl; 2447d116dccSCC Ma 2457d116dccSCC Ma assert(afflvl <= MPIDR_AFFLVL2); 2467d116dccSCC Ma 2477d116dccSCC Ma if (state != PSCI_STATE_OFF) 2487d116dccSCC Ma return -EAGAIN; 2497d116dccSCC Ma 2507d116dccSCC Ma /* 2517d116dccSCC Ma * Find the highest affinity level which will be suspended and postpone 2527d116dccSCC Ma * all the platform specific actions until that level is hit. 2537d116dccSCC Ma */ 2547d116dccSCC Ma max_phys_off_afflvl = psci_get_max_phys_off_afflvl(); 2557d116dccSCC Ma assert(max_phys_off_afflvl != PSCI_INVALID_DATA); 2567d116dccSCC Ma if (afflvl != max_phys_off_afflvl) 2577d116dccSCC Ma return -EAGAIN; 2587d116dccSCC Ma 2597d116dccSCC Ma return 0; 2607d116dccSCC Ma } 2617d116dccSCC Ma 2627d116dccSCC Ma /******************************************************************************* 2637d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to enter 2647d116dccSCC Ma * standby. 2657d116dccSCC Ma ******************************************************************************/ 2667d116dccSCC Ma static void plat_affinst_standby(unsigned int power_state) 2677d116dccSCC Ma { 2687d116dccSCC Ma unsigned int target_afflvl; 2697d116dccSCC Ma 2707d116dccSCC Ma /* Sanity check the requested state */ 2717d116dccSCC Ma target_afflvl = psci_get_pstate_afflvl(power_state); 2727d116dccSCC Ma 2737d116dccSCC Ma /* 2747d116dccSCC Ma * It's possible to enter standby only on affinity level 0 i.e. a cpu 2757d116dccSCC Ma * on the MTK_platform. Ignore any other affinity level. 2767d116dccSCC Ma */ 2777d116dccSCC Ma if (target_afflvl == MPIDR_AFFLVL0) { 2787d116dccSCC Ma /* 2797d116dccSCC Ma * Enter standby state. dsb is good practice before using wfi 2807d116dccSCC Ma * to enter low power states. 2817d116dccSCC Ma */ 2827d116dccSCC Ma dsb(); 2837d116dccSCC Ma wfi(); 2847d116dccSCC Ma } 2857d116dccSCC Ma } 286*3fc26aa0SKoan-Sin Tan #else 287*3fc26aa0SKoan-Sin Tan static void plat_cpu_standby(plat_local_state_t cpu_state) 288*3fc26aa0SKoan-Sin Tan { 289*3fc26aa0SKoan-Sin Tan unsigned int scr; 290*3fc26aa0SKoan-Sin Tan 291*3fc26aa0SKoan-Sin Tan scr = read_scr_el3(); 292*3fc26aa0SKoan-Sin Tan write_scr_el3(scr | SCR_IRQ_BIT); 293*3fc26aa0SKoan-Sin Tan isb(); 294*3fc26aa0SKoan-Sin Tan dsb(); 295*3fc26aa0SKoan-Sin Tan wfi(); 296*3fc26aa0SKoan-Sin Tan write_scr_el3(scr); 297*3fc26aa0SKoan-Sin Tan } 298*3fc26aa0SKoan-Sin Tan #endif 2997d116dccSCC Ma 3007d116dccSCC Ma /******************************************************************************* 3017d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 3027d116dccSCC Ma * on. The level and mpidr determine the affinity instance. 3037d116dccSCC Ma ******************************************************************************/ 304*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 3057d116dccSCC Ma static int plat_affinst_on(unsigned long mpidr, 3067d116dccSCC Ma unsigned long sec_entrypoint, 3077d116dccSCC Ma unsigned int afflvl, 3087d116dccSCC Ma unsigned int state) 3097d116dccSCC Ma { 3107d116dccSCC Ma int rc = PSCI_E_SUCCESS; 3117d116dccSCC Ma unsigned long cpu_id; 3127d116dccSCC Ma unsigned long cluster_id; 3137d116dccSCC Ma uintptr_t rv; 3147d116dccSCC Ma 3157d116dccSCC Ma /* 3167d116dccSCC Ma * It's possible to turn on only affinity level 0 i.e. a cpu 3177d116dccSCC Ma * on the MTK_platform. Ignore any other affinity level. 3187d116dccSCC Ma */ 3197d116dccSCC Ma if (afflvl != MPIDR_AFFLVL0) 3207d116dccSCC Ma return rc; 3217d116dccSCC Ma 3227d116dccSCC Ma cpu_id = mpidr & MPIDR_CPU_MASK; 3237d116dccSCC Ma cluster_id = mpidr & MPIDR_CLUSTER_MASK; 3247d116dccSCC Ma 3257d116dccSCC Ma if (cluster_id) 3267d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 3277d116dccSCC Ma else 3287d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 3297d116dccSCC Ma 3307d116dccSCC Ma mmio_write_32(rv, sec_entrypoint); 3317d116dccSCC Ma INFO("mt_on[%ld:%ld], entry %x\n", 3327d116dccSCC Ma cluster_id, cpu_id, mmio_read_32(rv)); 3337d116dccSCC Ma 3347d116dccSCC Ma spm_hotplug_on(mpidr); 3357d116dccSCC Ma 3367d116dccSCC Ma return rc; 3377d116dccSCC Ma } 338*3fc26aa0SKoan-Sin Tan #else 339*3fc26aa0SKoan-Sin Tan static uintptr_t secure_entrypoint; 340*3fc26aa0SKoan-Sin Tan 341*3fc26aa0SKoan-Sin Tan static int plat_power_domain_on(unsigned long mpidr) 342*3fc26aa0SKoan-Sin Tan { 343*3fc26aa0SKoan-Sin Tan int rc = PSCI_E_SUCCESS; 344*3fc26aa0SKoan-Sin Tan unsigned long cpu_id; 345*3fc26aa0SKoan-Sin Tan unsigned long cluster_id; 346*3fc26aa0SKoan-Sin Tan uintptr_t rv; 347*3fc26aa0SKoan-Sin Tan 348*3fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 349*3fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 350*3fc26aa0SKoan-Sin Tan 351*3fc26aa0SKoan-Sin Tan if (cluster_id) 352*3fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 353*3fc26aa0SKoan-Sin Tan else 354*3fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 355*3fc26aa0SKoan-Sin Tan 356*3fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 357*3fc26aa0SKoan-Sin Tan INFO("mt_on[%ld:%ld], entry %x\n", 358*3fc26aa0SKoan-Sin Tan cluster_id, cpu_id, mmio_read_32(rv)); 359*3fc26aa0SKoan-Sin Tan 360*3fc26aa0SKoan-Sin Tan spm_hotplug_on(mpidr); 361*3fc26aa0SKoan-Sin Tan return rc; 362*3fc26aa0SKoan-Sin Tan } 363*3fc26aa0SKoan-Sin Tan #endif 3647d116dccSCC Ma 3657d116dccSCC Ma /******************************************************************************* 3667d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 3677d116dccSCC Ma * off. The level and mpidr determine the affinity instance. The 'state' arg. 3687d116dccSCC Ma * allows the platform to decide whether the cluster is being turned off and 3697d116dccSCC Ma * take apt actions. 3707d116dccSCC Ma * 3717d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 3727d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 3737d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 3747d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 3757d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 3767d116dccSCC Ma ******************************************************************************/ 377*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 3787d116dccSCC Ma static void plat_affinst_off(unsigned int afflvl, unsigned int state) 3797d116dccSCC Ma { 3807d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 3817d116dccSCC Ma 3827d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 3837d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 3847d116dccSCC Ma return; 3857d116dccSCC Ma 3867d116dccSCC Ma /* Prevent interrupts from spuriously waking up this cpu */ 3877d116dccSCC Ma arm_gic_cpuif_deactivate(); 3887d116dccSCC Ma 3897d116dccSCC Ma spm_hotplug_off(mpidr); 3907d116dccSCC Ma 3917d116dccSCC Ma trace_power_flow(mpidr, CPU_DOWN); 3927d116dccSCC Ma 3937d116dccSCC Ma if (afflvl != MPIDR_AFFLVL0) { 3947d116dccSCC Ma /* Disable coherency if this cluster is to be turned off */ 3957d116dccSCC Ma plat_cci_disable(); 3967d116dccSCC Ma 3977d116dccSCC Ma trace_power_flow(mpidr, CLUSTER_DOWN); 3987d116dccSCC Ma } 3997d116dccSCC Ma } 400*3fc26aa0SKoan-Sin Tan #else 401*3fc26aa0SKoan-Sin Tan static void plat_power_domain_off(const psci_power_state_t *state) 402*3fc26aa0SKoan-Sin Tan { 403*3fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 404*3fc26aa0SKoan-Sin Tan 405*3fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 406*3fc26aa0SKoan-Sin Tan arm_gic_cpuif_deactivate(); 407*3fc26aa0SKoan-Sin Tan 408*3fc26aa0SKoan-Sin Tan spm_hotplug_off(mpidr); 409*3fc26aa0SKoan-Sin Tan 410*3fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_DOWN); 411*3fc26aa0SKoan-Sin Tan 412*3fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 413*3fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 414*3fc26aa0SKoan-Sin Tan plat_cci_disable(); 415*3fc26aa0SKoan-Sin Tan 416*3fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_DOWN); 417*3fc26aa0SKoan-Sin Tan } 418*3fc26aa0SKoan-Sin Tan } 419*3fc26aa0SKoan-Sin Tan #endif 4207d116dccSCC Ma 4217d116dccSCC Ma /******************************************************************************* 4227d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be 4237d116dccSCC Ma * suspended. The level and mpidr determine the affinity instance. The 'state' 4247d116dccSCC Ma * arg. allows the platform to decide whether the cluster is being turned off 4257d116dccSCC Ma * and take apt actions. 4267d116dccSCC Ma * 4277d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 4287d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 4297d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 4307d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 4317d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 4327d116dccSCC Ma ******************************************************************************/ 433*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 4347d116dccSCC Ma static void plat_affinst_suspend(unsigned long sec_entrypoint, 4357d116dccSCC Ma unsigned int afflvl, 4367d116dccSCC Ma unsigned int state) 4377d116dccSCC Ma { 4387d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 4397d116dccSCC Ma unsigned long cluster_id; 4407d116dccSCC Ma unsigned long cpu_id; 4417d116dccSCC Ma uintptr_t rv; 4427d116dccSCC Ma 4437d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 4447d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 4457d116dccSCC Ma return; 4467d116dccSCC Ma 4477d116dccSCC Ma cpu_id = mpidr & MPIDR_CPU_MASK; 4487d116dccSCC Ma cluster_id = mpidr & MPIDR_CLUSTER_MASK; 4497d116dccSCC Ma 4507d116dccSCC Ma if (cluster_id) 4517d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 4527d116dccSCC Ma else 4537d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 4547d116dccSCC Ma 4557d116dccSCC Ma mmio_write_32(rv, sec_entrypoint); 4567d116dccSCC Ma 4578e53ec53SJimmy Huang if (afflvl < MPIDR_AFFLVL2) 4588e53ec53SJimmy Huang spm_mcdi_prepare_for_off_state(mpidr, afflvl); 4597d116dccSCC Ma 4607d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL0) 4617d116dccSCC Ma mt_platform_save_context(mpidr); 4627d116dccSCC Ma 4637d116dccSCC Ma /* Perform the common cluster specific operations */ 4647d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 4657d116dccSCC Ma /* Disable coherency if this cluster is to be turned off */ 4667d116dccSCC Ma plat_cci_disable(); 4677d116dccSCC Ma } 4687d116dccSCC Ma 4697d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL2) { 4708e53ec53SJimmy Huang disable_scu(mpidr); 4717d116dccSCC Ma generic_timer_backup(); 4727d116dccSCC Ma spm_system_suspend(); 4737d116dccSCC Ma /* Prevent interrupts from spuriously waking up this cpu */ 4747d116dccSCC Ma arm_gic_cpuif_deactivate(); 4757d116dccSCC Ma } 4767d116dccSCC Ma } 477*3fc26aa0SKoan-Sin Tan #else 478*3fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend(const psci_power_state_t *state) 479*3fc26aa0SKoan-Sin Tan { 480*3fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 481*3fc26aa0SKoan-Sin Tan unsigned long cluster_id; 482*3fc26aa0SKoan-Sin Tan unsigned long cpu_id; 483*3fc26aa0SKoan-Sin Tan uintptr_t rv; 484*3fc26aa0SKoan-Sin Tan 485*3fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 486*3fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 487*3fc26aa0SKoan-Sin Tan 488*3fc26aa0SKoan-Sin Tan if (cluster_id) 489*3fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 490*3fc26aa0SKoan-Sin Tan else 491*3fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 492*3fc26aa0SKoan-Sin Tan 493*3fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 494*3fc26aa0SKoan-Sin Tan 495*3fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 496*3fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 497*3fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 498*3fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 499*3fc26aa0SKoan-Sin Tan } 500*3fc26aa0SKoan-Sin Tan 501*3fc26aa0SKoan-Sin Tan mt_platform_save_context(mpidr); 502*3fc26aa0SKoan-Sin Tan 503*3fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 504*3fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 505*3fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 506*3fc26aa0SKoan-Sin Tan plat_cci_disable(); 507*3fc26aa0SKoan-Sin Tan } 508*3fc26aa0SKoan-Sin Tan 509*3fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 510*3fc26aa0SKoan-Sin Tan disable_scu(mpidr); 511*3fc26aa0SKoan-Sin Tan generic_timer_backup(); 512*3fc26aa0SKoan-Sin Tan spm_system_suspend(); 513*3fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 514*3fc26aa0SKoan-Sin Tan arm_gic_cpuif_deactivate(); 515*3fc26aa0SKoan-Sin Tan } 516*3fc26aa0SKoan-Sin Tan } 517*3fc26aa0SKoan-Sin Tan #endif 5187d116dccSCC Ma 5197d116dccSCC Ma /******************************************************************************* 5207d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 5217d116dccSCC Ma * on after being turned off earlier. The level and mpidr determine the affinity 5227d116dccSCC Ma * instance. The 'state' arg. allows the platform to decide whether the cluster 5237d116dccSCC Ma * was turned off prior to wakeup and do what's necessary to setup it up 5247d116dccSCC Ma * correctly. 5257d116dccSCC Ma ******************************************************************************/ 526*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 5277d116dccSCC Ma static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state) 5287d116dccSCC Ma { 5297d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 5307d116dccSCC Ma 5317d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 5327d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 5337d116dccSCC Ma return; 5347d116dccSCC Ma 5357d116dccSCC Ma /* Perform the common cluster specific operations */ 5367d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 5377d116dccSCC Ma /* Enable coherency if this cluster was off */ 5387d116dccSCC Ma plat_cci_enable(); 5397d116dccSCC Ma trace_power_flow(mpidr, CLUSTER_UP); 5407d116dccSCC Ma } 5417d116dccSCC Ma 5427d116dccSCC Ma /* Enable the gic cpu interface */ 5437d116dccSCC Ma arm_gic_cpuif_setup(); 5447d116dccSCC Ma arm_gic_pcpu_distif_setup(); 5457d116dccSCC Ma trace_power_flow(mpidr, CPU_UP); 5467d116dccSCC Ma } 547*3fc26aa0SKoan-Sin Tan #else 548*3fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void); 549*3fc26aa0SKoan-Sin Tan 550*3fc26aa0SKoan-Sin Tan static void plat_power_domain_on_finish(const psci_power_state_t *state) 551*3fc26aa0SKoan-Sin Tan { 552*3fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 553*3fc26aa0SKoan-Sin Tan 554*3fc26aa0SKoan-Sin Tan assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 555*3fc26aa0SKoan-Sin Tan 556*3fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 557*3fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 558*3fc26aa0SKoan-Sin Tan mtk_system_pwr_domain_resume(); 559*3fc26aa0SKoan-Sin Tan 560*3fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 561*3fc26aa0SKoan-Sin Tan plat_cci_enable(); 562*3fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_UP); 563*3fc26aa0SKoan-Sin Tan } 564*3fc26aa0SKoan-Sin Tan 565*3fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 566*3fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 567*3fc26aa0SKoan-Sin Tan return; 568*3fc26aa0SKoan-Sin Tan 569*3fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 570*3fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 571*3fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 572*3fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_UP); 573*3fc26aa0SKoan-Sin Tan } 574*3fc26aa0SKoan-Sin Tan #endif 5757d116dccSCC Ma 5767d116dccSCC Ma /******************************************************************************* 5777d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 5787d116dccSCC Ma * on after having been suspended earlier. The level and mpidr determine the 5797d116dccSCC Ma * affinity instance. 5807d116dccSCC Ma ******************************************************************************/ 581*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 5827d116dccSCC Ma static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state) 5837d116dccSCC Ma { 5847d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 5857d116dccSCC Ma 5867d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 5877d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 5887d116dccSCC Ma return; 5897d116dccSCC Ma 5907d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL2) { 5917d116dccSCC Ma /* Enable the gic cpu interface */ 5927d116dccSCC Ma arm_gic_setup(); 5937d116dccSCC Ma arm_gic_cpuif_setup(); 5947d116dccSCC Ma spm_system_suspend_finish(); 5958e53ec53SJimmy Huang enable_scu(mpidr); 5967d116dccSCC Ma } 5977d116dccSCC Ma 5987d116dccSCC Ma /* Perform the common cluster specific operations */ 5997d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 6007d116dccSCC Ma /* Enable coherency if this cluster was off */ 6017d116dccSCC Ma plat_cci_enable(); 6027d116dccSCC Ma } 6037d116dccSCC Ma 6047d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL0) 6057d116dccSCC Ma mt_platform_restore_context(mpidr); 6067d116dccSCC Ma 6078e53ec53SJimmy Huang if (afflvl < MPIDR_AFFLVL2) 6088e53ec53SJimmy Huang spm_mcdi_finish_for_on_state(mpidr, afflvl); 6097d116dccSCC Ma 6107d116dccSCC Ma arm_gic_pcpu_distif_setup(); 6117d116dccSCC Ma } 612*3fc26aa0SKoan-Sin Tan #else 613*3fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 614*3fc26aa0SKoan-Sin Tan { 615*3fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 6167d116dccSCC Ma 617*3fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 618*3fc26aa0SKoan-Sin Tan return; 619*3fc26aa0SKoan-Sin Tan 620*3fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 621*3fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 622*3fc26aa0SKoan-Sin Tan arm_gic_setup(); 623*3fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 624*3fc26aa0SKoan-Sin Tan spm_system_suspend_finish(); 625*3fc26aa0SKoan-Sin Tan enable_scu(mpidr); 626*3fc26aa0SKoan-Sin Tan } 627*3fc26aa0SKoan-Sin Tan 628*3fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 629*3fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 630*3fc26aa0SKoan-Sin Tan /* Enable coherency if this cluster was off */ 631*3fc26aa0SKoan-Sin Tan plat_cci_enable(); 632*3fc26aa0SKoan-Sin Tan } 633*3fc26aa0SKoan-Sin Tan 634*3fc26aa0SKoan-Sin Tan mt_platform_restore_context(mpidr); 635*3fc26aa0SKoan-Sin Tan 636*3fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 637*3fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 638*3fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 639*3fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 640*3fc26aa0SKoan-Sin Tan } 641*3fc26aa0SKoan-Sin Tan 642*3fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 643*3fc26aa0SKoan-Sin Tan } 644*3fc26aa0SKoan-Sin Tan #endif 645*3fc26aa0SKoan-Sin Tan 646*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 6477d116dccSCC Ma static unsigned int plat_get_sys_suspend_power_state(void) 6487d116dccSCC Ma { 6497d116dccSCC Ma /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */ 6507d116dccSCC Ma return psci_make_powerstate(0, 1, 2); 6517d116dccSCC Ma } 652*3fc26aa0SKoan-Sin Tan #else 653*3fc26aa0SKoan-Sin Tan static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 654*3fc26aa0SKoan-Sin Tan { 655*3fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= 2); 656*3fc26aa0SKoan-Sin Tan 657*3fc26aa0SKoan-Sin Tan for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 658*3fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 659*3fc26aa0SKoan-Sin Tan } 660*3fc26aa0SKoan-Sin Tan #endif 6617d116dccSCC Ma 6627d116dccSCC Ma /******************************************************************************* 6637d116dccSCC Ma * MTK handlers to shutdown/reboot the system 6647d116dccSCC Ma ******************************************************************************/ 6657d116dccSCC Ma static void __dead2 plat_system_off(void) 6667d116dccSCC Ma { 6677d116dccSCC Ma INFO("MTK System Off\n"); 6687d116dccSCC Ma 6697d116dccSCC Ma rtc_bbpu_power_down(); 6707d116dccSCC Ma 6717d116dccSCC Ma wfi(); 6727d116dccSCC Ma ERROR("MTK System Off: operation not handled.\n"); 6737d116dccSCC Ma panic(); 6747d116dccSCC Ma } 6757d116dccSCC Ma 6767d116dccSCC Ma static void __dead2 plat_system_reset(void) 6777d116dccSCC Ma { 6787d116dccSCC Ma /* Write the System Configuration Control Register */ 6797d116dccSCC Ma INFO("MTK System Reset\n"); 6807d116dccSCC Ma 6812bab3d52SJimmy Huang mmio_clrsetbits_32(MTK_WDT_BASE, 6822bab3d52SJimmy Huang (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ), 6832bab3d52SJimmy Huang MTK_WDT_MODE_KEY); 6847d116dccSCC Ma mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN)); 6857d116dccSCC Ma mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY); 6867d116dccSCC Ma 6877d116dccSCC Ma wfi(); 6887d116dccSCC Ma ERROR("MTK System Reset: operation not handled.\n"); 6897d116dccSCC Ma panic(); 6907d116dccSCC Ma } 6917d116dccSCC Ma 692*3fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT 693*3fc26aa0SKoan-Sin Tan static int plat_validate_power_state(unsigned int power_state, 694*3fc26aa0SKoan-Sin Tan psci_power_state_t *req_state) 695*3fc26aa0SKoan-Sin Tan { 696*3fc26aa0SKoan-Sin Tan int pstate = psci_get_pstate_type(power_state); 697*3fc26aa0SKoan-Sin Tan int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 698*3fc26aa0SKoan-Sin Tan int i; 699*3fc26aa0SKoan-Sin Tan 700*3fc26aa0SKoan-Sin Tan assert(req_state); 701*3fc26aa0SKoan-Sin Tan 702*3fc26aa0SKoan-Sin Tan if (pwr_lvl > PLAT_MAX_PWR_LVL) 703*3fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 704*3fc26aa0SKoan-Sin Tan 705*3fc26aa0SKoan-Sin Tan /* Sanity check the requested state */ 706*3fc26aa0SKoan-Sin Tan if (pstate == PSTATE_TYPE_STANDBY) { 707*3fc26aa0SKoan-Sin Tan /* 708*3fc26aa0SKoan-Sin Tan * It's possible to enter standby only on power level 0 709*3fc26aa0SKoan-Sin Tan * Ignore any other power level. 710*3fc26aa0SKoan-Sin Tan */ 711*3fc26aa0SKoan-Sin Tan if (pwr_lvl != 0) 712*3fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 713*3fc26aa0SKoan-Sin Tan 714*3fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[MTK_PWR_LVL0] = 715*3fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_RET; 716*3fc26aa0SKoan-Sin Tan } else { 717*3fc26aa0SKoan-Sin Tan for (i = 0; i <= pwr_lvl; i++) 718*3fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = 719*3fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_OFF; 720*3fc26aa0SKoan-Sin Tan } 721*3fc26aa0SKoan-Sin Tan 722*3fc26aa0SKoan-Sin Tan /* 723*3fc26aa0SKoan-Sin Tan * We expect the 'state id' to be zero. 724*3fc26aa0SKoan-Sin Tan */ 725*3fc26aa0SKoan-Sin Tan if (psci_get_pstate_id(power_state)) 726*3fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 727*3fc26aa0SKoan-Sin Tan 728*3fc26aa0SKoan-Sin Tan return PSCI_E_SUCCESS; 729*3fc26aa0SKoan-Sin Tan } 730*3fc26aa0SKoan-Sin Tan 731*3fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void) 732*3fc26aa0SKoan-Sin Tan { 733*3fc26aa0SKoan-Sin Tan console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE); 734*3fc26aa0SKoan-Sin Tan 735*3fc26aa0SKoan-Sin Tan /* Assert system power domain is available on the platform */ 736*3fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 737*3fc26aa0SKoan-Sin Tan 738*3fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 739*3fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 740*3fc26aa0SKoan-Sin Tan } 741*3fc26aa0SKoan-Sin Tan #endif 742*3fc26aa0SKoan-Sin Tan 743*3fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 7447d116dccSCC Ma /******************************************************************************* 7457d116dccSCC Ma * Export the platform handlers to enable psci to invoke them 7467d116dccSCC Ma ******************************************************************************/ 7477d116dccSCC Ma static const plat_pm_ops_t plat_plat_pm_ops = { 7487d116dccSCC Ma .affinst_standby = plat_affinst_standby, 7497d116dccSCC Ma .affinst_on = plat_affinst_on, 7507d116dccSCC Ma .affinst_off = plat_affinst_off, 7517d116dccSCC Ma .affinst_suspend = plat_affinst_suspend, 7527d116dccSCC Ma .affinst_on_finish = plat_affinst_on_finish, 7537d116dccSCC Ma .affinst_suspend_finish = plat_affinst_suspend_finish, 7547d116dccSCC Ma .system_off = plat_system_off, 7557d116dccSCC Ma .system_reset = plat_system_reset, 7567d116dccSCC Ma .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 7577d116dccSCC Ma }; 7587d116dccSCC Ma 7597d116dccSCC Ma /******************************************************************************* 7607d116dccSCC Ma * Export the platform specific power ops & initialize the mtk_platform power 7617d116dccSCC Ma * controller 7627d116dccSCC Ma ******************************************************************************/ 7637d116dccSCC Ma int platform_setup_pm(const plat_pm_ops_t **plat_ops) 7647d116dccSCC Ma { 7657d116dccSCC Ma *plat_ops = &plat_plat_pm_ops; 7667d116dccSCC Ma return 0; 7677d116dccSCC Ma } 768*3fc26aa0SKoan-Sin Tan #else 769*3fc26aa0SKoan-Sin Tan static const plat_psci_ops_t plat_plat_pm_ops = { 770*3fc26aa0SKoan-Sin Tan .cpu_standby = plat_cpu_standby, 771*3fc26aa0SKoan-Sin Tan .pwr_domain_on = plat_power_domain_on, 772*3fc26aa0SKoan-Sin Tan .pwr_domain_on_finish = plat_power_domain_on_finish, 773*3fc26aa0SKoan-Sin Tan .pwr_domain_off = plat_power_domain_off, 774*3fc26aa0SKoan-Sin Tan .pwr_domain_suspend = plat_power_domain_suspend, 775*3fc26aa0SKoan-Sin Tan .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 776*3fc26aa0SKoan-Sin Tan .system_off = plat_system_off, 777*3fc26aa0SKoan-Sin Tan .system_reset = plat_system_reset, 778*3fc26aa0SKoan-Sin Tan .validate_power_state = plat_validate_power_state, 779*3fc26aa0SKoan-Sin Tan .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 780*3fc26aa0SKoan-Sin Tan }; 781*3fc26aa0SKoan-Sin Tan 782*3fc26aa0SKoan-Sin Tan int plat_setup_psci_ops(uintptr_t sec_entrypoint, 783*3fc26aa0SKoan-Sin Tan const plat_psci_ops_t **psci_ops) 784*3fc26aa0SKoan-Sin Tan { 785*3fc26aa0SKoan-Sin Tan *psci_ops = &plat_plat_pm_ops; 786*3fc26aa0SKoan-Sin Tan secure_entrypoint = sec_entrypoint; 787*3fc26aa0SKoan-Sin Tan return 0; 788*3fc26aa0SKoan-Sin Tan } 789*3fc26aa0SKoan-Sin Tan 790*3fc26aa0SKoan-Sin Tan /* 791*3fc26aa0SKoan-Sin Tan * The PSCI generic code uses this API to let the platform participate in state 792*3fc26aa0SKoan-Sin Tan * coordination during a power management operation. It compares the platform 793*3fc26aa0SKoan-Sin Tan * specific local power states requested by each cpu for a given power domain 794*3fc26aa0SKoan-Sin Tan * and returns the coordinated target power state that the domain should 795*3fc26aa0SKoan-Sin Tan * enter. A platform assigns a number to a local power state. This default 796*3fc26aa0SKoan-Sin Tan * implementation assumes that the platform assigns these numbers in order of 797*3fc26aa0SKoan-Sin Tan * increasing depth of the power state i.e. for two power states X & Y, if X < Y 798*3fc26aa0SKoan-Sin Tan * then X represents a shallower power state than Y. As a result, the 799*3fc26aa0SKoan-Sin Tan * coordinated target local power state for a power domain will be the minimum 800*3fc26aa0SKoan-Sin Tan * of the requested local power states. 801*3fc26aa0SKoan-Sin Tan */ 802*3fc26aa0SKoan-Sin Tan plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 803*3fc26aa0SKoan-Sin Tan const plat_local_state_t *states, 804*3fc26aa0SKoan-Sin Tan unsigned int ncpu) 805*3fc26aa0SKoan-Sin Tan { 806*3fc26aa0SKoan-Sin Tan plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 807*3fc26aa0SKoan-Sin Tan 808*3fc26aa0SKoan-Sin Tan assert(ncpu); 809*3fc26aa0SKoan-Sin Tan 810*3fc26aa0SKoan-Sin Tan do { 811*3fc26aa0SKoan-Sin Tan temp = *states++; 812*3fc26aa0SKoan-Sin Tan if (temp < target) 813*3fc26aa0SKoan-Sin Tan target = temp; 814*3fc26aa0SKoan-Sin Tan } while (--ncpu); 815*3fc26aa0SKoan-Sin Tan 816*3fc26aa0SKoan-Sin Tan return target; 817*3fc26aa0SKoan-Sin Tan } 818*3fc26aa0SKoan-Sin Tan #endif 819