17d116dccSCC Ma /* 27d116dccSCC Ma * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. 37d116dccSCC Ma * 47d116dccSCC Ma * Redistribution and use in source and binary forms, with or without 57d116dccSCC Ma * modification, are permitted provided that the following conditions are met: 67d116dccSCC Ma * 77d116dccSCC Ma * Redistributions of source code must retain the above copyright notice, this 87d116dccSCC Ma * list of conditions and the following disclaimer. 97d116dccSCC Ma * 107d116dccSCC Ma * Redistributions in binary form must reproduce the above copyright notice, 117d116dccSCC Ma * this list of conditions and the following disclaimer in the documentation 127d116dccSCC Ma * and/or other materials provided with the distribution. 137d116dccSCC Ma * 147d116dccSCC Ma * Neither the name of ARM nor the names of its contributors may be used 157d116dccSCC Ma * to endorse or promote products derived from this software without specific 167d116dccSCC Ma * prior written permission. 177d116dccSCC Ma * 187d116dccSCC Ma * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 197d116dccSCC Ma * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 207d116dccSCC Ma * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 217d116dccSCC Ma * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 227d116dccSCC Ma * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 237d116dccSCC Ma * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 247d116dccSCC Ma * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 257d116dccSCC Ma * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 267d116dccSCC Ma * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 277d116dccSCC Ma * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 287d116dccSCC Ma * POSSIBILITY OF SUCH DAMAGE. 297d116dccSCC Ma */ 307d116dccSCC Ma 317d116dccSCC Ma #include <arch_helpers.h> 327d116dccSCC Ma #include <arm_gic.h> 337d116dccSCC Ma #include <assert.h> 347d116dccSCC Ma #include <bakery_lock.h> 357d116dccSCC Ma #include <cci.h> 367d116dccSCC Ma #include <console.h> 377d116dccSCC Ma #include <debug.h> 387d116dccSCC Ma #include <errno.h> 397d116dccSCC Ma #include <mcucfg.h> 407d116dccSCC Ma #include <mmio.h> 417d116dccSCC Ma #include <mt8173_def.h> 427d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 437d116dccSCC Ma #include <plat_private.h> 447d116dccSCC Ma #include <power_tracer.h> 457d116dccSCC Ma #include <psci.h> 467d116dccSCC Ma #include <rtc.h> 477d116dccSCC Ma #include <scu.h> 487d116dccSCC Ma #include <spm_hotplug.h> 497d116dccSCC Ma #include <spm_mcdi.h> 507d116dccSCC Ma #include <spm_suspend.h> 517d116dccSCC Ma 523fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT 533fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL0 0 543fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL1 1 553fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL2 2 563fc26aa0SKoan-Sin Tan 573fc26aa0SKoan-Sin Tan /* Macros to read the MTK power domain state */ 583fc26aa0SKoan-Sin Tan #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 593fc26aa0SKoan-Sin Tan #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 603fc26aa0SKoan-Sin Tan #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 613fc26aa0SKoan-Sin Tan (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 623fc26aa0SKoan-Sin Tan #endif 633fc26aa0SKoan-Sin Tan 64*9cfd83e9SKoan-Sin Tan #if PSCI_EXTENDED_STATE_ID 65*9cfd83e9SKoan-Sin Tan /* 66*9cfd83e9SKoan-Sin Tan * The table storing the valid idle power states. Ensure that the 67*9cfd83e9SKoan-Sin Tan * array entries are populated in ascending order of state-id to 68*9cfd83e9SKoan-Sin Tan * enable us to use binary search during power state validation. 69*9cfd83e9SKoan-Sin Tan * The table must be terminated by a NULL entry. 70*9cfd83e9SKoan-Sin Tan */ 71*9cfd83e9SKoan-Sin Tan const unsigned int mtk_pm_idle_states[] = { 72*9cfd83e9SKoan-Sin Tan /* State-id - 0x001 */ 73*9cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 74*9cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 75*9cfd83e9SKoan-Sin Tan /* State-id - 0x002 */ 76*9cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 77*9cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 78*9cfd83e9SKoan-Sin Tan /* State-id - 0x022 */ 79*9cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 80*9cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 81*9cfd83e9SKoan-Sin Tan #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 82*9cfd83e9SKoan-Sin Tan /* State-id - 0x222 */ 83*9cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 84*9cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 85*9cfd83e9SKoan-Sin Tan #endif 86*9cfd83e9SKoan-Sin Tan 0, 87*9cfd83e9SKoan-Sin Tan }; 88*9cfd83e9SKoan-Sin Tan #endif 89*9cfd83e9SKoan-Sin Tan 907d116dccSCC Ma struct core_context { 917d116dccSCC Ma unsigned long timer_data[8]; 927d116dccSCC Ma unsigned int count; 937d116dccSCC Ma unsigned int rst; 947d116dccSCC Ma unsigned int abt; 957d116dccSCC Ma unsigned int brk; 967d116dccSCC Ma }; 977d116dccSCC Ma 987d116dccSCC Ma struct cluster_context { 997d116dccSCC Ma struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 1007d116dccSCC Ma }; 1017d116dccSCC Ma 1027d116dccSCC Ma /* 1037d116dccSCC Ma * Top level structure to hold the complete context of a multi cluster system 1047d116dccSCC Ma */ 1057d116dccSCC Ma struct system_context { 1067d116dccSCC Ma struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 1077d116dccSCC Ma }; 1087d116dccSCC Ma 1097d116dccSCC Ma /* 1107d116dccSCC Ma * Top level structure which encapsulates the context of the entire system 1117d116dccSCC Ma */ 1127d116dccSCC Ma static struct system_context dormant_data[1]; 1137d116dccSCC Ma 1147d116dccSCC Ma static inline struct cluster_context *system_cluster( 1157d116dccSCC Ma struct system_context *system, 1167d116dccSCC Ma uint32_t clusterid) 1177d116dccSCC Ma { 1187d116dccSCC Ma return &system->cluster[clusterid]; 1197d116dccSCC Ma } 1207d116dccSCC Ma 1217d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster, 1227d116dccSCC Ma uint32_t cpuid) 1237d116dccSCC Ma { 1247d116dccSCC Ma return &cluster->core[cpuid]; 1257d116dccSCC Ma } 1267d116dccSCC Ma 1277d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr) 1287d116dccSCC Ma { 1297d116dccSCC Ma uint32_t clusterid; 1307d116dccSCC Ma 1317d116dccSCC Ma clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 1327d116dccSCC Ma 1337d116dccSCC Ma return system_cluster(dormant_data, clusterid); 1347d116dccSCC Ma } 1357d116dccSCC Ma 1367d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr) 1377d116dccSCC Ma { 1387d116dccSCC Ma struct cluster_context *cluster; 1397d116dccSCC Ma uint32_t cpuid; 1407d116dccSCC Ma 1417d116dccSCC Ma cluster = get_cluster_data(mpidr); 1427d116dccSCC Ma cpuid = mpidr & MPIDR_CPU_MASK; 1437d116dccSCC Ma 1447d116dccSCC Ma return cluster_core(cluster, cpuid); 1457d116dccSCC Ma } 1467d116dccSCC Ma 1477d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container) 1487d116dccSCC Ma { 1497d116dccSCC Ma uint64_t ctl; 1507d116dccSCC Ma uint64_t val; 1517d116dccSCC Ma 1527d116dccSCC Ma __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 1537d116dccSCC Ma "mrs %x1, cntp_cval_el0\n\t" 1547d116dccSCC Ma "stp %x0, %x1, [%2, #0]" 1557d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1567d116dccSCC Ma : "r" (container) 1577d116dccSCC Ma : "memory"); 1587d116dccSCC Ma 1597d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 1607d116dccSCC Ma "mrs %x1, cntp_ctl_el0\n\t" 1617d116dccSCC Ma "stp %x0, %x1, [%2, #16]" 1627d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1637d116dccSCC Ma : "r" (container) 1647d116dccSCC Ma : "memory"); 1657d116dccSCC Ma 1667d116dccSCC Ma __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 1677d116dccSCC Ma "mrs %x1, cntv_ctl_el0\n\t" 1687d116dccSCC Ma "stp %x0, %x1, [%2, #32]" 1697d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1707d116dccSCC Ma : "r" (container) 1717d116dccSCC Ma : "memory"); 1727d116dccSCC Ma } 1737d116dccSCC Ma 1747d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container) 1757d116dccSCC Ma { 1767d116dccSCC Ma uint64_t ctl; 1777d116dccSCC Ma uint64_t val; 1787d116dccSCC Ma 1797d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 1807d116dccSCC Ma "msr cntkctl_el1, %x0\n\t" 1817d116dccSCC Ma "msr cntp_cval_el0, %x1" 1827d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1837d116dccSCC Ma : "r" (container) 1847d116dccSCC Ma : "memory"); 1857d116dccSCC Ma 1867d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 1877d116dccSCC Ma "msr cntp_tval_el0, %x0\n\t" 1887d116dccSCC Ma "msr cntp_ctl_el0, %x1" 1897d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1907d116dccSCC Ma : "r" (container) 1917d116dccSCC Ma : "memory"); 1927d116dccSCC Ma 1937d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 1947d116dccSCC Ma "msr cntv_tval_el0, %x0\n\t" 1957d116dccSCC Ma "msr cntv_ctl_el0, %x1" 1967d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1977d116dccSCC Ma : "r" (container) 1987d116dccSCC Ma : "memory"); 1997d116dccSCC Ma } 2007d116dccSCC Ma 2017d116dccSCC Ma static inline uint64_t read_cntpctl(void) 2027d116dccSCC Ma { 2037d116dccSCC Ma uint64_t cntpctl; 2047d116dccSCC Ma 2057d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_ctl_el0" 2067d116dccSCC Ma : "=r" (cntpctl) : : "memory"); 2077d116dccSCC Ma 2087d116dccSCC Ma return cntpctl; 2097d116dccSCC Ma } 2107d116dccSCC Ma 2117d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl) 2127d116dccSCC Ma { 2137d116dccSCC Ma __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 2147d116dccSCC Ma } 2157d116dccSCC Ma 2167d116dccSCC Ma static void stop_generic_timer(void) 2177d116dccSCC Ma { 2187d116dccSCC Ma /* 2197d116dccSCC Ma * Disable the timer and mask the irq to prevent 2207d116dccSCC Ma * suprious interrupts on this cpu interface. It 2217d116dccSCC Ma * will bite us when we come back if we don't. It 2227d116dccSCC Ma * will be replayed on the inbound cluster. 2237d116dccSCC Ma */ 2247d116dccSCC Ma uint64_t cntpctl = read_cntpctl(); 2257d116dccSCC Ma 2267d116dccSCC Ma write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 2277d116dccSCC Ma } 2287d116dccSCC Ma 2297d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr) 2307d116dccSCC Ma { 2317d116dccSCC Ma struct core_context *core; 2327d116dccSCC Ma 2337d116dccSCC Ma core = get_core_data(mpidr); 2347d116dccSCC Ma mt_save_generic_timer(core->timer_data); 2357d116dccSCC Ma 2367d116dccSCC Ma /* disable timer irq, and upper layer should enable it again. */ 2377d116dccSCC Ma stop_generic_timer(); 2387d116dccSCC Ma } 2397d116dccSCC Ma 2407d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr) 2417d116dccSCC Ma { 2427d116dccSCC Ma struct core_context *core; 2437d116dccSCC Ma 2447d116dccSCC Ma core = get_core_data(mpidr); 2457d116dccSCC Ma mt_restore_generic_timer(core->timer_data); 2467d116dccSCC Ma } 2477d116dccSCC Ma 2487d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr) 2497d116dccSCC Ma { 2507d116dccSCC Ma /* mcusys_save_context: */ 2517d116dccSCC Ma mt_cpu_save(mpidr); 2527d116dccSCC Ma } 2537d116dccSCC Ma 2547d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr) 2557d116dccSCC Ma { 2567d116dccSCC Ma /* mcusys_restore_context: */ 2577d116dccSCC Ma mt_cpu_restore(mpidr); 2587d116dccSCC Ma } 2597d116dccSCC Ma 2603fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 2617d116dccSCC Ma /******************************************************************************* 2627d116dccSCC Ma * Private function which is used to determine if any platform actions 2637d116dccSCC Ma * should be performed for the specified affinity instance given its 2647d116dccSCC Ma * state. Nothing needs to be done if the 'state' is not off or if this is not 2657d116dccSCC Ma * the highest affinity level which will enter the 'state'. 2667d116dccSCC Ma *******************************************************************************/ 2677d116dccSCC Ma static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state) 2687d116dccSCC Ma { 2697d116dccSCC Ma unsigned int max_phys_off_afflvl; 2707d116dccSCC Ma 2717d116dccSCC Ma assert(afflvl <= MPIDR_AFFLVL2); 2727d116dccSCC Ma 2737d116dccSCC Ma if (state != PSCI_STATE_OFF) 2747d116dccSCC Ma return -EAGAIN; 2757d116dccSCC Ma 2767d116dccSCC Ma /* 2777d116dccSCC Ma * Find the highest affinity level which will be suspended and postpone 2787d116dccSCC Ma * all the platform specific actions until that level is hit. 2797d116dccSCC Ma */ 2807d116dccSCC Ma max_phys_off_afflvl = psci_get_max_phys_off_afflvl(); 2817d116dccSCC Ma assert(max_phys_off_afflvl != PSCI_INVALID_DATA); 2827d116dccSCC Ma if (afflvl != max_phys_off_afflvl) 2837d116dccSCC Ma return -EAGAIN; 2847d116dccSCC Ma 2857d116dccSCC Ma return 0; 2867d116dccSCC Ma } 2877d116dccSCC Ma 2887d116dccSCC Ma /******************************************************************************* 2897d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to enter 2907d116dccSCC Ma * standby. 2917d116dccSCC Ma ******************************************************************************/ 2927d116dccSCC Ma static void plat_affinst_standby(unsigned int power_state) 2937d116dccSCC Ma { 2947d116dccSCC Ma unsigned int target_afflvl; 2957d116dccSCC Ma 2967d116dccSCC Ma /* Sanity check the requested state */ 2977d116dccSCC Ma target_afflvl = psci_get_pstate_afflvl(power_state); 2987d116dccSCC Ma 2997d116dccSCC Ma /* 3007d116dccSCC Ma * It's possible to enter standby only on affinity level 0 i.e. a cpu 3017d116dccSCC Ma * on the MTK_platform. Ignore any other affinity level. 3027d116dccSCC Ma */ 3037d116dccSCC Ma if (target_afflvl == MPIDR_AFFLVL0) { 3047d116dccSCC Ma /* 3057d116dccSCC Ma * Enter standby state. dsb is good practice before using wfi 3067d116dccSCC Ma * to enter low power states. 3077d116dccSCC Ma */ 3087d116dccSCC Ma dsb(); 3097d116dccSCC Ma wfi(); 3107d116dccSCC Ma } 3117d116dccSCC Ma } 3123fc26aa0SKoan-Sin Tan #else 3133fc26aa0SKoan-Sin Tan static void plat_cpu_standby(plat_local_state_t cpu_state) 3143fc26aa0SKoan-Sin Tan { 3153fc26aa0SKoan-Sin Tan unsigned int scr; 3163fc26aa0SKoan-Sin Tan 3173fc26aa0SKoan-Sin Tan scr = read_scr_el3(); 3183fc26aa0SKoan-Sin Tan write_scr_el3(scr | SCR_IRQ_BIT); 3193fc26aa0SKoan-Sin Tan isb(); 3203fc26aa0SKoan-Sin Tan dsb(); 3213fc26aa0SKoan-Sin Tan wfi(); 3223fc26aa0SKoan-Sin Tan write_scr_el3(scr); 3233fc26aa0SKoan-Sin Tan } 3243fc26aa0SKoan-Sin Tan #endif 3257d116dccSCC Ma 3267d116dccSCC Ma /******************************************************************************* 3277d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 3287d116dccSCC Ma * on. The level and mpidr determine the affinity instance. 3297d116dccSCC Ma ******************************************************************************/ 3303fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 3317d116dccSCC Ma static int plat_affinst_on(unsigned long mpidr, 3327d116dccSCC Ma unsigned long sec_entrypoint, 3337d116dccSCC Ma unsigned int afflvl, 3347d116dccSCC Ma unsigned int state) 3357d116dccSCC Ma { 3367d116dccSCC Ma int rc = PSCI_E_SUCCESS; 3377d116dccSCC Ma unsigned long cpu_id; 3387d116dccSCC Ma unsigned long cluster_id; 3397d116dccSCC Ma uintptr_t rv; 3407d116dccSCC Ma 3417d116dccSCC Ma /* 3427d116dccSCC Ma * It's possible to turn on only affinity level 0 i.e. a cpu 3437d116dccSCC Ma * on the MTK_platform. Ignore any other affinity level. 3447d116dccSCC Ma */ 3457d116dccSCC Ma if (afflvl != MPIDR_AFFLVL0) 3467d116dccSCC Ma return rc; 3477d116dccSCC Ma 3487d116dccSCC Ma cpu_id = mpidr & MPIDR_CPU_MASK; 3497d116dccSCC Ma cluster_id = mpidr & MPIDR_CLUSTER_MASK; 3507d116dccSCC Ma 3517d116dccSCC Ma if (cluster_id) 3527d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 3537d116dccSCC Ma else 3547d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 3557d116dccSCC Ma 3567d116dccSCC Ma mmio_write_32(rv, sec_entrypoint); 3577d116dccSCC Ma INFO("mt_on[%ld:%ld], entry %x\n", 3587d116dccSCC Ma cluster_id, cpu_id, mmio_read_32(rv)); 3597d116dccSCC Ma 3607d116dccSCC Ma spm_hotplug_on(mpidr); 3617d116dccSCC Ma 3627d116dccSCC Ma return rc; 3637d116dccSCC Ma } 3643fc26aa0SKoan-Sin Tan #else 3653fc26aa0SKoan-Sin Tan static uintptr_t secure_entrypoint; 3663fc26aa0SKoan-Sin Tan 3673fc26aa0SKoan-Sin Tan static int plat_power_domain_on(unsigned long mpidr) 3683fc26aa0SKoan-Sin Tan { 3693fc26aa0SKoan-Sin Tan int rc = PSCI_E_SUCCESS; 3703fc26aa0SKoan-Sin Tan unsigned long cpu_id; 3713fc26aa0SKoan-Sin Tan unsigned long cluster_id; 3723fc26aa0SKoan-Sin Tan uintptr_t rv; 3733fc26aa0SKoan-Sin Tan 3743fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 3753fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 3763fc26aa0SKoan-Sin Tan 3773fc26aa0SKoan-Sin Tan if (cluster_id) 3783fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 3793fc26aa0SKoan-Sin Tan else 3803fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 3813fc26aa0SKoan-Sin Tan 3823fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 3833fc26aa0SKoan-Sin Tan INFO("mt_on[%ld:%ld], entry %x\n", 3843fc26aa0SKoan-Sin Tan cluster_id, cpu_id, mmio_read_32(rv)); 3853fc26aa0SKoan-Sin Tan 3863fc26aa0SKoan-Sin Tan spm_hotplug_on(mpidr); 3873fc26aa0SKoan-Sin Tan return rc; 3883fc26aa0SKoan-Sin Tan } 3893fc26aa0SKoan-Sin Tan #endif 3907d116dccSCC Ma 3917d116dccSCC Ma /******************************************************************************* 3927d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 3937d116dccSCC Ma * off. The level and mpidr determine the affinity instance. The 'state' arg. 3947d116dccSCC Ma * allows the platform to decide whether the cluster is being turned off and 3957d116dccSCC Ma * take apt actions. 3967d116dccSCC Ma * 3977d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 3987d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 3997d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 4007d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 4017d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 4027d116dccSCC Ma ******************************************************************************/ 4033fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 4047d116dccSCC Ma static void plat_affinst_off(unsigned int afflvl, unsigned int state) 4057d116dccSCC Ma { 4067d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 4077d116dccSCC Ma 4087d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 4097d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 4107d116dccSCC Ma return; 4117d116dccSCC Ma 4127d116dccSCC Ma /* Prevent interrupts from spuriously waking up this cpu */ 4137d116dccSCC Ma arm_gic_cpuif_deactivate(); 4147d116dccSCC Ma 4157d116dccSCC Ma spm_hotplug_off(mpidr); 4167d116dccSCC Ma 4177d116dccSCC Ma trace_power_flow(mpidr, CPU_DOWN); 4187d116dccSCC Ma 4197d116dccSCC Ma if (afflvl != MPIDR_AFFLVL0) { 4207d116dccSCC Ma /* Disable coherency if this cluster is to be turned off */ 4217d116dccSCC Ma plat_cci_disable(); 4227d116dccSCC Ma 4237d116dccSCC Ma trace_power_flow(mpidr, CLUSTER_DOWN); 4247d116dccSCC Ma } 4257d116dccSCC Ma } 4263fc26aa0SKoan-Sin Tan #else 4273fc26aa0SKoan-Sin Tan static void plat_power_domain_off(const psci_power_state_t *state) 4283fc26aa0SKoan-Sin Tan { 4293fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 4303fc26aa0SKoan-Sin Tan 4313fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 4323fc26aa0SKoan-Sin Tan arm_gic_cpuif_deactivate(); 4333fc26aa0SKoan-Sin Tan 4343fc26aa0SKoan-Sin Tan spm_hotplug_off(mpidr); 4353fc26aa0SKoan-Sin Tan 4363fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_DOWN); 4373fc26aa0SKoan-Sin Tan 4383fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 4393fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 4403fc26aa0SKoan-Sin Tan plat_cci_disable(); 4413fc26aa0SKoan-Sin Tan 4423fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_DOWN); 4433fc26aa0SKoan-Sin Tan } 4443fc26aa0SKoan-Sin Tan } 4453fc26aa0SKoan-Sin Tan #endif 4467d116dccSCC Ma 4477d116dccSCC Ma /******************************************************************************* 4487d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be 4497d116dccSCC Ma * suspended. The level and mpidr determine the affinity instance. The 'state' 4507d116dccSCC Ma * arg. allows the platform to decide whether the cluster is being turned off 4517d116dccSCC Ma * and take apt actions. 4527d116dccSCC Ma * 4537d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 4547d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 4557d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 4567d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 4577d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 4587d116dccSCC Ma ******************************************************************************/ 4593fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 4607d116dccSCC Ma static void plat_affinst_suspend(unsigned long sec_entrypoint, 4617d116dccSCC Ma unsigned int afflvl, 4627d116dccSCC Ma unsigned int state) 4637d116dccSCC Ma { 4647d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 4657d116dccSCC Ma unsigned long cluster_id; 4667d116dccSCC Ma unsigned long cpu_id; 4677d116dccSCC Ma uintptr_t rv; 4687d116dccSCC Ma 4697d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 4707d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 4717d116dccSCC Ma return; 4727d116dccSCC Ma 4737d116dccSCC Ma cpu_id = mpidr & MPIDR_CPU_MASK; 4747d116dccSCC Ma cluster_id = mpidr & MPIDR_CLUSTER_MASK; 4757d116dccSCC Ma 4767d116dccSCC Ma if (cluster_id) 4777d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 4787d116dccSCC Ma else 4797d116dccSCC Ma rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 4807d116dccSCC Ma 4817d116dccSCC Ma mmio_write_32(rv, sec_entrypoint); 4827d116dccSCC Ma 4838e53ec53SJimmy Huang if (afflvl < MPIDR_AFFLVL2) 4848e53ec53SJimmy Huang spm_mcdi_prepare_for_off_state(mpidr, afflvl); 4857d116dccSCC Ma 4867d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL0) 4877d116dccSCC Ma mt_platform_save_context(mpidr); 4887d116dccSCC Ma 4897d116dccSCC Ma /* Perform the common cluster specific operations */ 4907d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 4917d116dccSCC Ma /* Disable coherency if this cluster is to be turned off */ 4927d116dccSCC Ma plat_cci_disable(); 4937d116dccSCC Ma } 4947d116dccSCC Ma 4957d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL2) { 4968e53ec53SJimmy Huang disable_scu(mpidr); 4977d116dccSCC Ma generic_timer_backup(); 4987d116dccSCC Ma spm_system_suspend(); 4997d116dccSCC Ma /* Prevent interrupts from spuriously waking up this cpu */ 5007d116dccSCC Ma arm_gic_cpuif_deactivate(); 5017d116dccSCC Ma } 5027d116dccSCC Ma } 5033fc26aa0SKoan-Sin Tan #else 5043fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend(const psci_power_state_t *state) 5053fc26aa0SKoan-Sin Tan { 5063fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 5073fc26aa0SKoan-Sin Tan unsigned long cluster_id; 5083fc26aa0SKoan-Sin Tan unsigned long cpu_id; 5093fc26aa0SKoan-Sin Tan uintptr_t rv; 5103fc26aa0SKoan-Sin Tan 5113fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 5123fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 5133fc26aa0SKoan-Sin Tan 5143fc26aa0SKoan-Sin Tan if (cluster_id) 5153fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 5163fc26aa0SKoan-Sin Tan else 5173fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 5183fc26aa0SKoan-Sin Tan 5193fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 5203fc26aa0SKoan-Sin Tan 5213fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 5223fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 5233fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 5243fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 5253fc26aa0SKoan-Sin Tan } 5263fc26aa0SKoan-Sin Tan 5273fc26aa0SKoan-Sin Tan mt_platform_save_context(mpidr); 5283fc26aa0SKoan-Sin Tan 5293fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 5303fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 5313fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 5323fc26aa0SKoan-Sin Tan plat_cci_disable(); 5333fc26aa0SKoan-Sin Tan } 5343fc26aa0SKoan-Sin Tan 5353fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 5363fc26aa0SKoan-Sin Tan disable_scu(mpidr); 5373fc26aa0SKoan-Sin Tan generic_timer_backup(); 5383fc26aa0SKoan-Sin Tan spm_system_suspend(); 5393fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 5403fc26aa0SKoan-Sin Tan arm_gic_cpuif_deactivate(); 5413fc26aa0SKoan-Sin Tan } 5423fc26aa0SKoan-Sin Tan } 5433fc26aa0SKoan-Sin Tan #endif 5447d116dccSCC Ma 5457d116dccSCC Ma /******************************************************************************* 5467d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 5477d116dccSCC Ma * on after being turned off earlier. The level and mpidr determine the affinity 5487d116dccSCC Ma * instance. The 'state' arg. allows the platform to decide whether the cluster 5497d116dccSCC Ma * was turned off prior to wakeup and do what's necessary to setup it up 5507d116dccSCC Ma * correctly. 5517d116dccSCC Ma ******************************************************************************/ 5523fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 5537d116dccSCC Ma static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state) 5547d116dccSCC Ma { 5557d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 5567d116dccSCC Ma 5577d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 5587d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 5597d116dccSCC Ma return; 5607d116dccSCC Ma 5617d116dccSCC Ma /* Perform the common cluster specific operations */ 5627d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 5637d116dccSCC Ma /* Enable coherency if this cluster was off */ 5647d116dccSCC Ma plat_cci_enable(); 5657d116dccSCC Ma trace_power_flow(mpidr, CLUSTER_UP); 5667d116dccSCC Ma } 5677d116dccSCC Ma 5687d116dccSCC Ma /* Enable the gic cpu interface */ 5697d116dccSCC Ma arm_gic_cpuif_setup(); 5707d116dccSCC Ma arm_gic_pcpu_distif_setup(); 5717d116dccSCC Ma trace_power_flow(mpidr, CPU_UP); 5727d116dccSCC Ma } 5733fc26aa0SKoan-Sin Tan #else 5743fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void); 5753fc26aa0SKoan-Sin Tan 5763fc26aa0SKoan-Sin Tan static void plat_power_domain_on_finish(const psci_power_state_t *state) 5773fc26aa0SKoan-Sin Tan { 5783fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 5793fc26aa0SKoan-Sin Tan 5803fc26aa0SKoan-Sin Tan assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 5813fc26aa0SKoan-Sin Tan 5823fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 5833fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 5843fc26aa0SKoan-Sin Tan mtk_system_pwr_domain_resume(); 5853fc26aa0SKoan-Sin Tan 5863fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 5873fc26aa0SKoan-Sin Tan plat_cci_enable(); 5883fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_UP); 5893fc26aa0SKoan-Sin Tan } 5903fc26aa0SKoan-Sin Tan 5913fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 5923fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 5933fc26aa0SKoan-Sin Tan return; 5943fc26aa0SKoan-Sin Tan 5953fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 5963fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 5973fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 5983fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_UP); 5993fc26aa0SKoan-Sin Tan } 6003fc26aa0SKoan-Sin Tan #endif 6017d116dccSCC Ma 6027d116dccSCC Ma /******************************************************************************* 6037d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 6047d116dccSCC Ma * on after having been suspended earlier. The level and mpidr determine the 6057d116dccSCC Ma * affinity instance. 6067d116dccSCC Ma ******************************************************************************/ 6073fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 6087d116dccSCC Ma static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state) 6097d116dccSCC Ma { 6107d116dccSCC Ma unsigned long mpidr = read_mpidr_el1(); 6117d116dccSCC Ma 6127d116dccSCC Ma /* Determine if any platform actions need to be executed. */ 6137d116dccSCC Ma if (plat_do_plat_actions(afflvl, state) == -EAGAIN) 6147d116dccSCC Ma return; 6157d116dccSCC Ma 6167d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL2) { 6177d116dccSCC Ma /* Enable the gic cpu interface */ 6187d116dccSCC Ma arm_gic_setup(); 6197d116dccSCC Ma arm_gic_cpuif_setup(); 6207d116dccSCC Ma spm_system_suspend_finish(); 6218e53ec53SJimmy Huang enable_scu(mpidr); 6227d116dccSCC Ma } 6237d116dccSCC Ma 6247d116dccSCC Ma /* Perform the common cluster specific operations */ 6257d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL1) { 6267d116dccSCC Ma /* Enable coherency if this cluster was off */ 6277d116dccSCC Ma plat_cci_enable(); 6287d116dccSCC Ma } 6297d116dccSCC Ma 6307d116dccSCC Ma if (afflvl >= MPIDR_AFFLVL0) 6317d116dccSCC Ma mt_platform_restore_context(mpidr); 6327d116dccSCC Ma 6338e53ec53SJimmy Huang if (afflvl < MPIDR_AFFLVL2) 6348e53ec53SJimmy Huang spm_mcdi_finish_for_on_state(mpidr, afflvl); 6357d116dccSCC Ma 6367d116dccSCC Ma arm_gic_pcpu_distif_setup(); 6377d116dccSCC Ma } 6383fc26aa0SKoan-Sin Tan #else 6393fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 6403fc26aa0SKoan-Sin Tan { 6413fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 6427d116dccSCC Ma 6433fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 6443fc26aa0SKoan-Sin Tan return; 6453fc26aa0SKoan-Sin Tan 6463fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 6473fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 6483fc26aa0SKoan-Sin Tan arm_gic_setup(); 6493fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 6503fc26aa0SKoan-Sin Tan spm_system_suspend_finish(); 6513fc26aa0SKoan-Sin Tan enable_scu(mpidr); 6523fc26aa0SKoan-Sin Tan } 6533fc26aa0SKoan-Sin Tan 6543fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 6553fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 6563fc26aa0SKoan-Sin Tan /* Enable coherency if this cluster was off */ 6573fc26aa0SKoan-Sin Tan plat_cci_enable(); 6583fc26aa0SKoan-Sin Tan } 6593fc26aa0SKoan-Sin Tan 6603fc26aa0SKoan-Sin Tan mt_platform_restore_context(mpidr); 6613fc26aa0SKoan-Sin Tan 6623fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 6633fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 6643fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 6653fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 6663fc26aa0SKoan-Sin Tan } 6673fc26aa0SKoan-Sin Tan 6683fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 6693fc26aa0SKoan-Sin Tan } 6703fc26aa0SKoan-Sin Tan #endif 6713fc26aa0SKoan-Sin Tan 6723fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 6737d116dccSCC Ma static unsigned int plat_get_sys_suspend_power_state(void) 6747d116dccSCC Ma { 6757d116dccSCC Ma /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */ 6767d116dccSCC Ma return psci_make_powerstate(0, 1, 2); 6777d116dccSCC Ma } 6783fc26aa0SKoan-Sin Tan #else 6793fc26aa0SKoan-Sin Tan static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 6803fc26aa0SKoan-Sin Tan { 6813fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= 2); 6823fc26aa0SKoan-Sin Tan 6833fc26aa0SKoan-Sin Tan for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 6843fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 6853fc26aa0SKoan-Sin Tan } 6863fc26aa0SKoan-Sin Tan #endif 6877d116dccSCC Ma 6887d116dccSCC Ma /******************************************************************************* 6897d116dccSCC Ma * MTK handlers to shutdown/reboot the system 6907d116dccSCC Ma ******************************************************************************/ 6917d116dccSCC Ma static void __dead2 plat_system_off(void) 6927d116dccSCC Ma { 6937d116dccSCC Ma INFO("MTK System Off\n"); 6947d116dccSCC Ma 6957d116dccSCC Ma rtc_bbpu_power_down(); 6967d116dccSCC Ma 6977d116dccSCC Ma wfi(); 6987d116dccSCC Ma ERROR("MTK System Off: operation not handled.\n"); 6997d116dccSCC Ma panic(); 7007d116dccSCC Ma } 7017d116dccSCC Ma 7027d116dccSCC Ma static void __dead2 plat_system_reset(void) 7037d116dccSCC Ma { 7047d116dccSCC Ma /* Write the System Configuration Control Register */ 7057d116dccSCC Ma INFO("MTK System Reset\n"); 7067d116dccSCC Ma 7072bab3d52SJimmy Huang mmio_clrsetbits_32(MTK_WDT_BASE, 7082bab3d52SJimmy Huang (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ), 7092bab3d52SJimmy Huang MTK_WDT_MODE_KEY); 7107d116dccSCC Ma mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN)); 7117d116dccSCC Ma mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY); 7127d116dccSCC Ma 7137d116dccSCC Ma wfi(); 7147d116dccSCC Ma ERROR("MTK System Reset: operation not handled.\n"); 7157d116dccSCC Ma panic(); 7167d116dccSCC Ma } 7177d116dccSCC Ma 7183fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT 719*9cfd83e9SKoan-Sin Tan #if !PSCI_EXTENDED_STATE_ID 7203fc26aa0SKoan-Sin Tan static int plat_validate_power_state(unsigned int power_state, 7213fc26aa0SKoan-Sin Tan psci_power_state_t *req_state) 7223fc26aa0SKoan-Sin Tan { 7233fc26aa0SKoan-Sin Tan int pstate = psci_get_pstate_type(power_state); 7243fc26aa0SKoan-Sin Tan int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 7253fc26aa0SKoan-Sin Tan int i; 7263fc26aa0SKoan-Sin Tan 7273fc26aa0SKoan-Sin Tan assert(req_state); 7283fc26aa0SKoan-Sin Tan 7293fc26aa0SKoan-Sin Tan if (pwr_lvl > PLAT_MAX_PWR_LVL) 7303fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 7313fc26aa0SKoan-Sin Tan 7323fc26aa0SKoan-Sin Tan /* Sanity check the requested state */ 7333fc26aa0SKoan-Sin Tan if (pstate == PSTATE_TYPE_STANDBY) { 7343fc26aa0SKoan-Sin Tan /* 7353fc26aa0SKoan-Sin Tan * It's possible to enter standby only on power level 0 7363fc26aa0SKoan-Sin Tan * Ignore any other power level. 7373fc26aa0SKoan-Sin Tan */ 7383fc26aa0SKoan-Sin Tan if (pwr_lvl != 0) 7393fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 7403fc26aa0SKoan-Sin Tan 7413fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[MTK_PWR_LVL0] = 7423fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_RET; 7433fc26aa0SKoan-Sin Tan } else { 7443fc26aa0SKoan-Sin Tan for (i = 0; i <= pwr_lvl; i++) 7453fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = 7463fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_OFF; 7473fc26aa0SKoan-Sin Tan } 7483fc26aa0SKoan-Sin Tan 7493fc26aa0SKoan-Sin Tan /* 7503fc26aa0SKoan-Sin Tan * We expect the 'state id' to be zero. 7513fc26aa0SKoan-Sin Tan */ 7523fc26aa0SKoan-Sin Tan if (psci_get_pstate_id(power_state)) 7533fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 7543fc26aa0SKoan-Sin Tan 7553fc26aa0SKoan-Sin Tan return PSCI_E_SUCCESS; 7563fc26aa0SKoan-Sin Tan } 757*9cfd83e9SKoan-Sin Tan #else 758*9cfd83e9SKoan-Sin Tan int plat_validate_power_state(unsigned int power_state, 759*9cfd83e9SKoan-Sin Tan psci_power_state_t *req_state) 760*9cfd83e9SKoan-Sin Tan { 761*9cfd83e9SKoan-Sin Tan unsigned int state_id; 762*9cfd83e9SKoan-Sin Tan int i; 763*9cfd83e9SKoan-Sin Tan 764*9cfd83e9SKoan-Sin Tan assert(req_state); 765*9cfd83e9SKoan-Sin Tan 766*9cfd83e9SKoan-Sin Tan /* 767*9cfd83e9SKoan-Sin Tan * Currently we are using a linear search for finding the matching 768*9cfd83e9SKoan-Sin Tan * entry in the idle power state array. This can be made a binary 769*9cfd83e9SKoan-Sin Tan * search if the number of entries justify the additional complexity. 770*9cfd83e9SKoan-Sin Tan */ 771*9cfd83e9SKoan-Sin Tan for (i = 0; !!mtk_pm_idle_states[i]; i++) { 772*9cfd83e9SKoan-Sin Tan if (power_state == mtk_pm_idle_states[i]) 773*9cfd83e9SKoan-Sin Tan break; 774*9cfd83e9SKoan-Sin Tan } 775*9cfd83e9SKoan-Sin Tan 776*9cfd83e9SKoan-Sin Tan /* Return error if entry not found in the idle state array */ 777*9cfd83e9SKoan-Sin Tan if (!mtk_pm_idle_states[i]) 778*9cfd83e9SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 779*9cfd83e9SKoan-Sin Tan 780*9cfd83e9SKoan-Sin Tan i = 0; 781*9cfd83e9SKoan-Sin Tan state_id = psci_get_pstate_id(power_state); 782*9cfd83e9SKoan-Sin Tan 783*9cfd83e9SKoan-Sin Tan /* Parse the State ID and populate the state info parameter */ 784*9cfd83e9SKoan-Sin Tan while (state_id) { 785*9cfd83e9SKoan-Sin Tan req_state->pwr_domain_state[i++] = state_id & 786*9cfd83e9SKoan-Sin Tan MTK_LOCAL_PSTATE_MASK; 787*9cfd83e9SKoan-Sin Tan state_id >>= MTK_LOCAL_PSTATE_WIDTH; 788*9cfd83e9SKoan-Sin Tan } 789*9cfd83e9SKoan-Sin Tan 790*9cfd83e9SKoan-Sin Tan return PSCI_E_SUCCESS; 791*9cfd83e9SKoan-Sin Tan } 792*9cfd83e9SKoan-Sin Tan #endif 7933fc26aa0SKoan-Sin Tan 7943fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void) 7953fc26aa0SKoan-Sin Tan { 7963fc26aa0SKoan-Sin Tan console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE); 7973fc26aa0SKoan-Sin Tan 7983fc26aa0SKoan-Sin Tan /* Assert system power domain is available on the platform */ 7993fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 8003fc26aa0SKoan-Sin Tan 8013fc26aa0SKoan-Sin Tan arm_gic_cpuif_setup(); 8023fc26aa0SKoan-Sin Tan arm_gic_pcpu_distif_setup(); 8033fc26aa0SKoan-Sin Tan } 8043fc26aa0SKoan-Sin Tan #endif 8053fc26aa0SKoan-Sin Tan 8063fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT 8077d116dccSCC Ma /******************************************************************************* 8087d116dccSCC Ma * Export the platform handlers to enable psci to invoke them 8097d116dccSCC Ma ******************************************************************************/ 8107d116dccSCC Ma static const plat_pm_ops_t plat_plat_pm_ops = { 8117d116dccSCC Ma .affinst_standby = plat_affinst_standby, 8127d116dccSCC Ma .affinst_on = plat_affinst_on, 8137d116dccSCC Ma .affinst_off = plat_affinst_off, 8147d116dccSCC Ma .affinst_suspend = plat_affinst_suspend, 8157d116dccSCC Ma .affinst_on_finish = plat_affinst_on_finish, 8167d116dccSCC Ma .affinst_suspend_finish = plat_affinst_suspend_finish, 8177d116dccSCC Ma .system_off = plat_system_off, 8187d116dccSCC Ma .system_reset = plat_system_reset, 8197d116dccSCC Ma .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 8207d116dccSCC Ma }; 8217d116dccSCC Ma 8227d116dccSCC Ma /******************************************************************************* 8237d116dccSCC Ma * Export the platform specific power ops & initialize the mtk_platform power 8247d116dccSCC Ma * controller 8257d116dccSCC Ma ******************************************************************************/ 8267d116dccSCC Ma int platform_setup_pm(const plat_pm_ops_t **plat_ops) 8277d116dccSCC Ma { 8287d116dccSCC Ma *plat_ops = &plat_plat_pm_ops; 8297d116dccSCC Ma return 0; 8307d116dccSCC Ma } 8313fc26aa0SKoan-Sin Tan #else 8323fc26aa0SKoan-Sin Tan static const plat_psci_ops_t plat_plat_pm_ops = { 8333fc26aa0SKoan-Sin Tan .cpu_standby = plat_cpu_standby, 8343fc26aa0SKoan-Sin Tan .pwr_domain_on = plat_power_domain_on, 8353fc26aa0SKoan-Sin Tan .pwr_domain_on_finish = plat_power_domain_on_finish, 8363fc26aa0SKoan-Sin Tan .pwr_domain_off = plat_power_domain_off, 8373fc26aa0SKoan-Sin Tan .pwr_domain_suspend = plat_power_domain_suspend, 8383fc26aa0SKoan-Sin Tan .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 8393fc26aa0SKoan-Sin Tan .system_off = plat_system_off, 8403fc26aa0SKoan-Sin Tan .system_reset = plat_system_reset, 8413fc26aa0SKoan-Sin Tan .validate_power_state = plat_validate_power_state, 8423fc26aa0SKoan-Sin Tan .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 8433fc26aa0SKoan-Sin Tan }; 8443fc26aa0SKoan-Sin Tan 8453fc26aa0SKoan-Sin Tan int plat_setup_psci_ops(uintptr_t sec_entrypoint, 8463fc26aa0SKoan-Sin Tan const plat_psci_ops_t **psci_ops) 8473fc26aa0SKoan-Sin Tan { 8483fc26aa0SKoan-Sin Tan *psci_ops = &plat_plat_pm_ops; 8493fc26aa0SKoan-Sin Tan secure_entrypoint = sec_entrypoint; 8503fc26aa0SKoan-Sin Tan return 0; 8513fc26aa0SKoan-Sin Tan } 8523fc26aa0SKoan-Sin Tan 8533fc26aa0SKoan-Sin Tan /* 8543fc26aa0SKoan-Sin Tan * The PSCI generic code uses this API to let the platform participate in state 8553fc26aa0SKoan-Sin Tan * coordination during a power management operation. It compares the platform 8563fc26aa0SKoan-Sin Tan * specific local power states requested by each cpu for a given power domain 8573fc26aa0SKoan-Sin Tan * and returns the coordinated target power state that the domain should 8583fc26aa0SKoan-Sin Tan * enter. A platform assigns a number to a local power state. This default 8593fc26aa0SKoan-Sin Tan * implementation assumes that the platform assigns these numbers in order of 8603fc26aa0SKoan-Sin Tan * increasing depth of the power state i.e. for two power states X & Y, if X < Y 8613fc26aa0SKoan-Sin Tan * then X represents a shallower power state than Y. As a result, the 8623fc26aa0SKoan-Sin Tan * coordinated target local power state for a power domain will be the minimum 8633fc26aa0SKoan-Sin Tan * of the requested local power states. 8643fc26aa0SKoan-Sin Tan */ 8653fc26aa0SKoan-Sin Tan plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 8663fc26aa0SKoan-Sin Tan const plat_local_state_t *states, 8673fc26aa0SKoan-Sin Tan unsigned int ncpu) 8683fc26aa0SKoan-Sin Tan { 8693fc26aa0SKoan-Sin Tan plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 8703fc26aa0SKoan-Sin Tan 8713fc26aa0SKoan-Sin Tan assert(ncpu); 8723fc26aa0SKoan-Sin Tan 8733fc26aa0SKoan-Sin Tan do { 8743fc26aa0SKoan-Sin Tan temp = *states++; 8753fc26aa0SKoan-Sin Tan if (temp < target) 8763fc26aa0SKoan-Sin Tan target = temp; 8773fc26aa0SKoan-Sin Tan } while (--ncpu); 8783fc26aa0SKoan-Sin Tan 8793fc26aa0SKoan-Sin Tan return target; 8803fc26aa0SKoan-Sin Tan } 8813fc26aa0SKoan-Sin Tan #endif 882