17d116dccSCC Ma /* 2*f1be00daSLouis Mayencourt * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. 37d116dccSCC Ma * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 57d116dccSCC Ma */ 67d116dccSCC Ma 77d116dccSCC Ma #include <assert.h> 87d116dccSCC Ma #include <errno.h> 909d40e0eSAntonio Nino Diaz 1009d40e0eSAntonio Nino Diaz #include <arch_helpers.h> 1109d40e0eSAntonio Nino Diaz #include <common/debug.h> 1209d40e0eSAntonio Nino Diaz #include <drivers/arm/cci.h> 1309d40e0eSAntonio Nino Diaz #include <drivers/arm/gicv2.h> 14d1d06275Skenny liang #include <drivers/ti/uart/uart_16550.h> 1509d40e0eSAntonio Nino Diaz #include <lib/bakery_lock.h> 1609d40e0eSAntonio Nino Diaz #include <lib/mmio.h> 1709d40e0eSAntonio Nino Diaz #include <lib/psci/psci.h> 18bd9344f6SAntonio Nino Diaz #include <plat/arm/common/plat_arm.h> 1909d40e0eSAntonio Nino Diaz 207d116dccSCC Ma #include <mcucfg.h> 217d116dccSCC Ma #include <mt8173_def.h> 227d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 237d116dccSCC Ma #include <plat_private.h> 247d116dccSCC Ma #include <power_tracer.h> 257d116dccSCC Ma #include <rtc.h> 267d116dccSCC Ma #include <scu.h> 277d116dccSCC Ma #include <spm_hotplug.h> 287d116dccSCC Ma #include <spm_mcdi.h> 297d116dccSCC Ma #include <spm_suspend.h> 307d116dccSCC Ma 313fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL0 0 323fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL1 1 333fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL2 2 343fc26aa0SKoan-Sin Tan 353fc26aa0SKoan-Sin Tan /* Macros to read the MTK power domain state */ 363fc26aa0SKoan-Sin Tan #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 373fc26aa0SKoan-Sin Tan #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 383fc26aa0SKoan-Sin Tan #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 393fc26aa0SKoan-Sin Tan (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 403fc26aa0SKoan-Sin Tan 419cfd83e9SKoan-Sin Tan #if PSCI_EXTENDED_STATE_ID 429cfd83e9SKoan-Sin Tan /* 439cfd83e9SKoan-Sin Tan * The table storing the valid idle power states. Ensure that the 449cfd83e9SKoan-Sin Tan * array entries are populated in ascending order of state-id to 459cfd83e9SKoan-Sin Tan * enable us to use binary search during power state validation. 469cfd83e9SKoan-Sin Tan * The table must be terminated by a NULL entry. 479cfd83e9SKoan-Sin Tan */ 489cfd83e9SKoan-Sin Tan const unsigned int mtk_pm_idle_states[] = { 499cfd83e9SKoan-Sin Tan /* State-id - 0x001 */ 509cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 519cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 529cfd83e9SKoan-Sin Tan /* State-id - 0x002 */ 539cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 549cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 559cfd83e9SKoan-Sin Tan /* State-id - 0x022 */ 569cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 579cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 589cfd83e9SKoan-Sin Tan #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 599cfd83e9SKoan-Sin Tan /* State-id - 0x222 */ 609cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 619cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 629cfd83e9SKoan-Sin Tan #endif 639cfd83e9SKoan-Sin Tan 0, 649cfd83e9SKoan-Sin Tan }; 659cfd83e9SKoan-Sin Tan #endif 669cfd83e9SKoan-Sin Tan 677d116dccSCC Ma struct core_context { 687d116dccSCC Ma unsigned long timer_data[8]; 697d116dccSCC Ma unsigned int count; 707d116dccSCC Ma unsigned int rst; 717d116dccSCC Ma unsigned int abt; 727d116dccSCC Ma unsigned int brk; 737d116dccSCC Ma }; 747d116dccSCC Ma 757d116dccSCC Ma struct cluster_context { 767d116dccSCC Ma struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 777d116dccSCC Ma }; 787d116dccSCC Ma 797d116dccSCC Ma /* 807d116dccSCC Ma * Top level structure to hold the complete context of a multi cluster system 817d116dccSCC Ma */ 827d116dccSCC Ma struct system_context { 837d116dccSCC Ma struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 847d116dccSCC Ma }; 857d116dccSCC Ma 867d116dccSCC Ma /* 877d116dccSCC Ma * Top level structure which encapsulates the context of the entire system 887d116dccSCC Ma */ 897d116dccSCC Ma static struct system_context dormant_data[1]; 907d116dccSCC Ma 917d116dccSCC Ma static inline struct cluster_context *system_cluster( 927d116dccSCC Ma struct system_context *system, 937d116dccSCC Ma uint32_t clusterid) 947d116dccSCC Ma { 957d116dccSCC Ma return &system->cluster[clusterid]; 967d116dccSCC Ma } 977d116dccSCC Ma 987d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster, 997d116dccSCC Ma uint32_t cpuid) 1007d116dccSCC Ma { 1017d116dccSCC Ma return &cluster->core[cpuid]; 1027d116dccSCC Ma } 1037d116dccSCC Ma 1047d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr) 1057d116dccSCC Ma { 1067d116dccSCC Ma uint32_t clusterid; 1077d116dccSCC Ma 1087d116dccSCC Ma clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 1097d116dccSCC Ma 1107d116dccSCC Ma return system_cluster(dormant_data, clusterid); 1117d116dccSCC Ma } 1127d116dccSCC Ma 1137d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr) 1147d116dccSCC Ma { 1157d116dccSCC Ma struct cluster_context *cluster; 1167d116dccSCC Ma uint32_t cpuid; 1177d116dccSCC Ma 1187d116dccSCC Ma cluster = get_cluster_data(mpidr); 1197d116dccSCC Ma cpuid = mpidr & MPIDR_CPU_MASK; 1207d116dccSCC Ma 1217d116dccSCC Ma return cluster_core(cluster, cpuid); 1227d116dccSCC Ma } 1237d116dccSCC Ma 1247d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container) 1257d116dccSCC Ma { 1267d116dccSCC Ma uint64_t ctl; 1277d116dccSCC Ma uint64_t val; 1287d116dccSCC Ma 1297d116dccSCC Ma __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 1307d116dccSCC Ma "mrs %x1, cntp_cval_el0\n\t" 1317d116dccSCC Ma "stp %x0, %x1, [%2, #0]" 1327d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1337d116dccSCC Ma : "r" (container) 1347d116dccSCC Ma : "memory"); 1357d116dccSCC Ma 1367d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 1377d116dccSCC Ma "mrs %x1, cntp_ctl_el0\n\t" 1387d116dccSCC Ma "stp %x0, %x1, [%2, #16]" 1397d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1407d116dccSCC Ma : "r" (container) 1417d116dccSCC Ma : "memory"); 1427d116dccSCC Ma 1437d116dccSCC Ma __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 1447d116dccSCC Ma "mrs %x1, cntv_ctl_el0\n\t" 1457d116dccSCC Ma "stp %x0, %x1, [%2, #32]" 1467d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1477d116dccSCC Ma : "r" (container) 1487d116dccSCC Ma : "memory"); 1497d116dccSCC Ma } 1507d116dccSCC Ma 1517d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container) 1527d116dccSCC Ma { 1537d116dccSCC Ma uint64_t ctl; 1547d116dccSCC Ma uint64_t val; 1557d116dccSCC Ma 1567d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 1577d116dccSCC Ma "msr cntkctl_el1, %x0\n\t" 1587d116dccSCC Ma "msr cntp_cval_el0, %x1" 1597d116dccSCC Ma : "=&r" (ctl), "=&r" (val) 1607d116dccSCC Ma : "r" (container) 1617d116dccSCC Ma : "memory"); 1627d116dccSCC Ma 1637d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 1647d116dccSCC Ma "msr cntp_tval_el0, %x0\n\t" 1657d116dccSCC Ma "msr cntp_ctl_el0, %x1" 1667d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1677d116dccSCC Ma : "r" (container) 1687d116dccSCC Ma : "memory"); 1697d116dccSCC Ma 1707d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 1717d116dccSCC Ma "msr cntv_tval_el0, %x0\n\t" 1727d116dccSCC Ma "msr cntv_ctl_el0, %x1" 1737d116dccSCC Ma : "=&r" (val), "=&r" (ctl) 1747d116dccSCC Ma : "r" (container) 1757d116dccSCC Ma : "memory"); 1767d116dccSCC Ma } 1777d116dccSCC Ma 1787d116dccSCC Ma static inline uint64_t read_cntpctl(void) 1797d116dccSCC Ma { 1807d116dccSCC Ma uint64_t cntpctl; 1817d116dccSCC Ma 1827d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_ctl_el0" 1837d116dccSCC Ma : "=r" (cntpctl) : : "memory"); 1847d116dccSCC Ma 1857d116dccSCC Ma return cntpctl; 1867d116dccSCC Ma } 1877d116dccSCC Ma 1887d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl) 1897d116dccSCC Ma { 1907d116dccSCC Ma __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 1917d116dccSCC Ma } 1927d116dccSCC Ma 1937d116dccSCC Ma static void stop_generic_timer(void) 1947d116dccSCC Ma { 1957d116dccSCC Ma /* 1967d116dccSCC Ma * Disable the timer and mask the irq to prevent 1977d116dccSCC Ma * suprious interrupts on this cpu interface. It 1987d116dccSCC Ma * will bite us when we come back if we don't. It 1997d116dccSCC Ma * will be replayed on the inbound cluster. 2007d116dccSCC Ma */ 2017d116dccSCC Ma uint64_t cntpctl = read_cntpctl(); 2027d116dccSCC Ma 2037d116dccSCC Ma write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 2047d116dccSCC Ma } 2057d116dccSCC Ma 2067d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr) 2077d116dccSCC Ma { 2087d116dccSCC Ma struct core_context *core; 2097d116dccSCC Ma 2107d116dccSCC Ma core = get_core_data(mpidr); 2117d116dccSCC Ma mt_save_generic_timer(core->timer_data); 2127d116dccSCC Ma 2137d116dccSCC Ma /* disable timer irq, and upper layer should enable it again. */ 2147d116dccSCC Ma stop_generic_timer(); 2157d116dccSCC Ma } 2167d116dccSCC Ma 2177d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr) 2187d116dccSCC Ma { 2197d116dccSCC Ma struct core_context *core; 2207d116dccSCC Ma 2217d116dccSCC Ma core = get_core_data(mpidr); 2227d116dccSCC Ma mt_restore_generic_timer(core->timer_data); 2237d116dccSCC Ma } 2247d116dccSCC Ma 2257d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr) 2267d116dccSCC Ma { 2277d116dccSCC Ma /* mcusys_save_context: */ 2287d116dccSCC Ma mt_cpu_save(mpidr); 2297d116dccSCC Ma } 2307d116dccSCC Ma 2317d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr) 2327d116dccSCC Ma { 2337d116dccSCC Ma /* mcusys_restore_context: */ 2347d116dccSCC Ma mt_cpu_restore(mpidr); 2357d116dccSCC Ma } 2367d116dccSCC Ma 2373fc26aa0SKoan-Sin Tan static void plat_cpu_standby(plat_local_state_t cpu_state) 2383fc26aa0SKoan-Sin Tan { 239*f1be00daSLouis Mayencourt u_register_t scr; 2403fc26aa0SKoan-Sin Tan 2413fc26aa0SKoan-Sin Tan scr = read_scr_el3(); 2423fc26aa0SKoan-Sin Tan write_scr_el3(scr | SCR_IRQ_BIT); 2433fc26aa0SKoan-Sin Tan isb(); 2443fc26aa0SKoan-Sin Tan dsb(); 2453fc26aa0SKoan-Sin Tan wfi(); 2463fc26aa0SKoan-Sin Tan write_scr_el3(scr); 2473fc26aa0SKoan-Sin Tan } 2487d116dccSCC Ma 2497d116dccSCC Ma /******************************************************************************* 2507d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 2517d116dccSCC Ma * on. The level and mpidr determine the affinity instance. 2527d116dccSCC Ma ******************************************************************************/ 2533fc26aa0SKoan-Sin Tan static uintptr_t secure_entrypoint; 2543fc26aa0SKoan-Sin Tan 2553fc26aa0SKoan-Sin Tan static int plat_power_domain_on(unsigned long mpidr) 2563fc26aa0SKoan-Sin Tan { 2573fc26aa0SKoan-Sin Tan int rc = PSCI_E_SUCCESS; 2583fc26aa0SKoan-Sin Tan unsigned long cpu_id; 2593fc26aa0SKoan-Sin Tan unsigned long cluster_id; 2603fc26aa0SKoan-Sin Tan uintptr_t rv; 2613fc26aa0SKoan-Sin Tan 2623fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 2633fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 2643fc26aa0SKoan-Sin Tan 2653fc26aa0SKoan-Sin Tan if (cluster_id) 2663fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 2673fc26aa0SKoan-Sin Tan else 2683fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 2693fc26aa0SKoan-Sin Tan 2703fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 2713fc26aa0SKoan-Sin Tan INFO("mt_on[%ld:%ld], entry %x\n", 2723fc26aa0SKoan-Sin Tan cluster_id, cpu_id, mmio_read_32(rv)); 2733fc26aa0SKoan-Sin Tan 2743fc26aa0SKoan-Sin Tan spm_hotplug_on(mpidr); 2753fc26aa0SKoan-Sin Tan return rc; 2763fc26aa0SKoan-Sin Tan } 2777d116dccSCC Ma 2787d116dccSCC Ma /******************************************************************************* 2797d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned 2807d116dccSCC Ma * off. The level and mpidr determine the affinity instance. The 'state' arg. 2817d116dccSCC Ma * allows the platform to decide whether the cluster is being turned off and 2827d116dccSCC Ma * take apt actions. 2837d116dccSCC Ma * 2847d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 2857d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 2867d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 2877d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 2887d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 2897d116dccSCC Ma ******************************************************************************/ 2903fc26aa0SKoan-Sin Tan static void plat_power_domain_off(const psci_power_state_t *state) 2913fc26aa0SKoan-Sin Tan { 2923fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 2933fc26aa0SKoan-Sin Tan 2943fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 2958bc20038SKoan-Sin Tan gicv2_cpuif_disable(); 2963fc26aa0SKoan-Sin Tan 2973fc26aa0SKoan-Sin Tan spm_hotplug_off(mpidr); 2983fc26aa0SKoan-Sin Tan 2993fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_DOWN); 3003fc26aa0SKoan-Sin Tan 3013fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 3023fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 3033fc26aa0SKoan-Sin Tan plat_cci_disable(); 3043fc26aa0SKoan-Sin Tan 3053fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_DOWN); 3063fc26aa0SKoan-Sin Tan } 3073fc26aa0SKoan-Sin Tan } 3087d116dccSCC Ma 3097d116dccSCC Ma /******************************************************************************* 3107d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be 3117d116dccSCC Ma * suspended. The level and mpidr determine the affinity instance. The 'state' 3127d116dccSCC Ma * arg. allows the platform to decide whether the cluster is being turned off 3137d116dccSCC Ma * and take apt actions. 3147d116dccSCC Ma * 3157d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be 3167d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches 3177d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is 3187d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be 3197d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results. 3207d116dccSCC Ma ******************************************************************************/ 3213fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend(const psci_power_state_t *state) 3223fc26aa0SKoan-Sin Tan { 3233fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 3243fc26aa0SKoan-Sin Tan unsigned long cluster_id; 3253fc26aa0SKoan-Sin Tan unsigned long cpu_id; 3263fc26aa0SKoan-Sin Tan uintptr_t rv; 3273fc26aa0SKoan-Sin Tan 3283fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK; 3293fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK; 3303fc26aa0SKoan-Sin Tan 3313fc26aa0SKoan-Sin Tan if (cluster_id) 3323fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 3333fc26aa0SKoan-Sin Tan else 3343fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 3353fc26aa0SKoan-Sin Tan 3363fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint); 3373fc26aa0SKoan-Sin Tan 3383fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 3393fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 3403fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 3413fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 3423fc26aa0SKoan-Sin Tan } 3433fc26aa0SKoan-Sin Tan 3443fc26aa0SKoan-Sin Tan mt_platform_save_context(mpidr); 3453fc26aa0SKoan-Sin Tan 3463fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 3473fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 3483fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */ 3493fc26aa0SKoan-Sin Tan plat_cci_disable(); 3503fc26aa0SKoan-Sin Tan } 3513fc26aa0SKoan-Sin Tan 3523fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 3533fc26aa0SKoan-Sin Tan disable_scu(mpidr); 3543fc26aa0SKoan-Sin Tan generic_timer_backup(); 3553fc26aa0SKoan-Sin Tan spm_system_suspend(); 3563fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */ 3578bc20038SKoan-Sin Tan gicv2_cpuif_disable(); 3583fc26aa0SKoan-Sin Tan } 3593fc26aa0SKoan-Sin Tan } 3607d116dccSCC Ma 3617d116dccSCC Ma /******************************************************************************* 3627d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 3637d116dccSCC Ma * on after being turned off earlier. The level and mpidr determine the affinity 3647d116dccSCC Ma * instance. The 'state' arg. allows the platform to decide whether the cluster 3657d116dccSCC Ma * was turned off prior to wakeup and do what's necessary to setup it up 3667d116dccSCC Ma * correctly. 3677d116dccSCC Ma ******************************************************************************/ 3683fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void); 3693fc26aa0SKoan-Sin Tan 3703fc26aa0SKoan-Sin Tan static void plat_power_domain_on_finish(const psci_power_state_t *state) 3713fc26aa0SKoan-Sin Tan { 3723fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 3733fc26aa0SKoan-Sin Tan 3743fc26aa0SKoan-Sin Tan assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 3753fc26aa0SKoan-Sin Tan 3763fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 3773fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 3783fc26aa0SKoan-Sin Tan mtk_system_pwr_domain_resume(); 3793fc26aa0SKoan-Sin Tan 3803fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 3813fc26aa0SKoan-Sin Tan plat_cci_enable(); 3823fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_UP); 3833fc26aa0SKoan-Sin Tan } 3843fc26aa0SKoan-Sin Tan 3853fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 3863fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 3873fc26aa0SKoan-Sin Tan return; 3883fc26aa0SKoan-Sin Tan 3893fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 3908bc20038SKoan-Sin Tan gicv2_cpuif_enable(); 3918bc20038SKoan-Sin Tan gicv2_pcpu_distif_init(); 3923fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_UP); 3933fc26aa0SKoan-Sin Tan } 3947d116dccSCC Ma 3957d116dccSCC Ma /******************************************************************************* 3967d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered 3977d116dccSCC Ma * on after having been suspended earlier. The level and mpidr determine the 3987d116dccSCC Ma * affinity instance. 3997d116dccSCC Ma ******************************************************************************/ 4003fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 4013fc26aa0SKoan-Sin Tan { 4023fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1(); 4037d116dccSCC Ma 4043fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 4053fc26aa0SKoan-Sin Tan return; 4063fc26aa0SKoan-Sin Tan 4073fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 4083fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */ 4098bc20038SKoan-Sin Tan plat_arm_gic_init(); 4103fc26aa0SKoan-Sin Tan spm_system_suspend_finish(); 4113fc26aa0SKoan-Sin Tan enable_scu(mpidr); 4123fc26aa0SKoan-Sin Tan } 4133fc26aa0SKoan-Sin Tan 4143fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */ 4153fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 4163fc26aa0SKoan-Sin Tan /* Enable coherency if this cluster was off */ 4173fc26aa0SKoan-Sin Tan plat_cci_enable(); 4183fc26aa0SKoan-Sin Tan } 4193fc26aa0SKoan-Sin Tan 4203fc26aa0SKoan-Sin Tan mt_platform_restore_context(mpidr); 4213fc26aa0SKoan-Sin Tan 4223fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 4233fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 4243fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 4253fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 4263fc26aa0SKoan-Sin Tan } 4273fc26aa0SKoan-Sin Tan 4288bc20038SKoan-Sin Tan gicv2_pcpu_distif_init(); 4293fc26aa0SKoan-Sin Tan } 4303fc26aa0SKoan-Sin Tan 4313fc26aa0SKoan-Sin Tan static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 4323fc26aa0SKoan-Sin Tan { 4333fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= 2); 4343fc26aa0SKoan-Sin Tan 4353fc26aa0SKoan-Sin Tan for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 4363fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 4373fc26aa0SKoan-Sin Tan } 4387d116dccSCC Ma 4397d116dccSCC Ma /******************************************************************************* 4407d116dccSCC Ma * MTK handlers to shutdown/reboot the system 4417d116dccSCC Ma ******************************************************************************/ 4427d116dccSCC Ma static void __dead2 plat_system_off(void) 4437d116dccSCC Ma { 4447d116dccSCC Ma INFO("MTK System Off\n"); 4457d116dccSCC Ma 4467d116dccSCC Ma rtc_bbpu_power_down(); 4477d116dccSCC Ma 4487d116dccSCC Ma wfi(); 4497d116dccSCC Ma ERROR("MTK System Off: operation not handled.\n"); 4507d116dccSCC Ma panic(); 4517d116dccSCC Ma } 4527d116dccSCC Ma 4537d116dccSCC Ma static void __dead2 plat_system_reset(void) 4547d116dccSCC Ma { 4557d116dccSCC Ma /* Write the System Configuration Control Register */ 4567d116dccSCC Ma INFO("MTK System Reset\n"); 4577d116dccSCC Ma 4582bab3d52SJimmy Huang mmio_clrsetbits_32(MTK_WDT_BASE, 4592bab3d52SJimmy Huang (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ), 4602bab3d52SJimmy Huang MTK_WDT_MODE_KEY); 4617d116dccSCC Ma mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN)); 4627d116dccSCC Ma mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY); 4637d116dccSCC Ma 4647d116dccSCC Ma wfi(); 4657d116dccSCC Ma ERROR("MTK System Reset: operation not handled.\n"); 4667d116dccSCC Ma panic(); 4677d116dccSCC Ma } 4687d116dccSCC Ma 4699cfd83e9SKoan-Sin Tan #if !PSCI_EXTENDED_STATE_ID 4703fc26aa0SKoan-Sin Tan static int plat_validate_power_state(unsigned int power_state, 4713fc26aa0SKoan-Sin Tan psci_power_state_t *req_state) 4723fc26aa0SKoan-Sin Tan { 4733fc26aa0SKoan-Sin Tan int pstate = psci_get_pstate_type(power_state); 4743fc26aa0SKoan-Sin Tan int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 4753fc26aa0SKoan-Sin Tan int i; 4763fc26aa0SKoan-Sin Tan 4773fc26aa0SKoan-Sin Tan assert(req_state); 4783fc26aa0SKoan-Sin Tan 4793fc26aa0SKoan-Sin Tan if (pwr_lvl > PLAT_MAX_PWR_LVL) 4803fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 4813fc26aa0SKoan-Sin Tan 4823fc26aa0SKoan-Sin Tan /* Sanity check the requested state */ 4833fc26aa0SKoan-Sin Tan if (pstate == PSTATE_TYPE_STANDBY) { 4843fc26aa0SKoan-Sin Tan /* 4853fc26aa0SKoan-Sin Tan * It's possible to enter standby only on power level 0 4863fc26aa0SKoan-Sin Tan * Ignore any other power level. 4873fc26aa0SKoan-Sin Tan */ 4883fc26aa0SKoan-Sin Tan if (pwr_lvl != 0) 4893fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 4903fc26aa0SKoan-Sin Tan 4913fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[MTK_PWR_LVL0] = 4923fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_RET; 4933fc26aa0SKoan-Sin Tan } else { 4943fc26aa0SKoan-Sin Tan for (i = 0; i <= pwr_lvl; i++) 4953fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = 4963fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_OFF; 4973fc26aa0SKoan-Sin Tan } 4983fc26aa0SKoan-Sin Tan 4993fc26aa0SKoan-Sin Tan /* 5003fc26aa0SKoan-Sin Tan * We expect the 'state id' to be zero. 5013fc26aa0SKoan-Sin Tan */ 5023fc26aa0SKoan-Sin Tan if (psci_get_pstate_id(power_state)) 5033fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 5043fc26aa0SKoan-Sin Tan 5053fc26aa0SKoan-Sin Tan return PSCI_E_SUCCESS; 5063fc26aa0SKoan-Sin Tan } 5079cfd83e9SKoan-Sin Tan #else 5089cfd83e9SKoan-Sin Tan int plat_validate_power_state(unsigned int power_state, 5099cfd83e9SKoan-Sin Tan psci_power_state_t *req_state) 5109cfd83e9SKoan-Sin Tan { 5119cfd83e9SKoan-Sin Tan unsigned int state_id; 5129cfd83e9SKoan-Sin Tan int i; 5139cfd83e9SKoan-Sin Tan 5149cfd83e9SKoan-Sin Tan assert(req_state); 5159cfd83e9SKoan-Sin Tan 5169cfd83e9SKoan-Sin Tan /* 5179cfd83e9SKoan-Sin Tan * Currently we are using a linear search for finding the matching 5189cfd83e9SKoan-Sin Tan * entry in the idle power state array. This can be made a binary 5199cfd83e9SKoan-Sin Tan * search if the number of entries justify the additional complexity. 5209cfd83e9SKoan-Sin Tan */ 5219cfd83e9SKoan-Sin Tan for (i = 0; !!mtk_pm_idle_states[i]; i++) { 5229cfd83e9SKoan-Sin Tan if (power_state == mtk_pm_idle_states[i]) 5239cfd83e9SKoan-Sin Tan break; 5249cfd83e9SKoan-Sin Tan } 5259cfd83e9SKoan-Sin Tan 5269cfd83e9SKoan-Sin Tan /* Return error if entry not found in the idle state array */ 5279cfd83e9SKoan-Sin Tan if (!mtk_pm_idle_states[i]) 5289cfd83e9SKoan-Sin Tan return PSCI_E_INVALID_PARAMS; 5299cfd83e9SKoan-Sin Tan 5309cfd83e9SKoan-Sin Tan i = 0; 5319cfd83e9SKoan-Sin Tan state_id = psci_get_pstate_id(power_state); 5329cfd83e9SKoan-Sin Tan 5339cfd83e9SKoan-Sin Tan /* Parse the State ID and populate the state info parameter */ 5349cfd83e9SKoan-Sin Tan while (state_id) { 5359cfd83e9SKoan-Sin Tan req_state->pwr_domain_state[i++] = state_id & 5369cfd83e9SKoan-Sin Tan MTK_LOCAL_PSTATE_MASK; 5379cfd83e9SKoan-Sin Tan state_id >>= MTK_LOCAL_PSTATE_WIDTH; 5389cfd83e9SKoan-Sin Tan } 5399cfd83e9SKoan-Sin Tan 5409cfd83e9SKoan-Sin Tan return PSCI_E_SUCCESS; 5419cfd83e9SKoan-Sin Tan } 5429cfd83e9SKoan-Sin Tan #endif 5433fc26aa0SKoan-Sin Tan 5443fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void) 5453fc26aa0SKoan-Sin Tan { 546d1d06275Skenny liang console_switch_state(CONSOLE_FLAG_BOOT); 5473fc26aa0SKoan-Sin Tan 5483fc26aa0SKoan-Sin Tan /* Assert system power domain is available on the platform */ 5493fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 5503fc26aa0SKoan-Sin Tan 5518bc20038SKoan-Sin Tan plat_arm_gic_init(); 552d1d06275Skenny liang 553d1d06275Skenny liang console_switch_state(CONSOLE_FLAG_RUNTIME); 5543fc26aa0SKoan-Sin Tan } 5553fc26aa0SKoan-Sin Tan 5563fc26aa0SKoan-Sin Tan static const plat_psci_ops_t plat_plat_pm_ops = { 5573fc26aa0SKoan-Sin Tan .cpu_standby = plat_cpu_standby, 5583fc26aa0SKoan-Sin Tan .pwr_domain_on = plat_power_domain_on, 5593fc26aa0SKoan-Sin Tan .pwr_domain_on_finish = plat_power_domain_on_finish, 5603fc26aa0SKoan-Sin Tan .pwr_domain_off = plat_power_domain_off, 5613fc26aa0SKoan-Sin Tan .pwr_domain_suspend = plat_power_domain_suspend, 5623fc26aa0SKoan-Sin Tan .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 5633fc26aa0SKoan-Sin Tan .system_off = plat_system_off, 5643fc26aa0SKoan-Sin Tan .system_reset = plat_system_reset, 5653fc26aa0SKoan-Sin Tan .validate_power_state = plat_validate_power_state, 5663fc26aa0SKoan-Sin Tan .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 5673fc26aa0SKoan-Sin Tan }; 5683fc26aa0SKoan-Sin Tan 5693fc26aa0SKoan-Sin Tan int plat_setup_psci_ops(uintptr_t sec_entrypoint, 5703fc26aa0SKoan-Sin Tan const plat_psci_ops_t **psci_ops) 5713fc26aa0SKoan-Sin Tan { 5723fc26aa0SKoan-Sin Tan *psci_ops = &plat_plat_pm_ops; 5733fc26aa0SKoan-Sin Tan secure_entrypoint = sec_entrypoint; 5743fc26aa0SKoan-Sin Tan return 0; 5753fc26aa0SKoan-Sin Tan } 5763fc26aa0SKoan-Sin Tan 5773fc26aa0SKoan-Sin Tan /* 5783fc26aa0SKoan-Sin Tan * The PSCI generic code uses this API to let the platform participate in state 5793fc26aa0SKoan-Sin Tan * coordination during a power management operation. It compares the platform 5803fc26aa0SKoan-Sin Tan * specific local power states requested by each cpu for a given power domain 5813fc26aa0SKoan-Sin Tan * and returns the coordinated target power state that the domain should 5823fc26aa0SKoan-Sin Tan * enter. A platform assigns a number to a local power state. This default 5833fc26aa0SKoan-Sin Tan * implementation assumes that the platform assigns these numbers in order of 5843fc26aa0SKoan-Sin Tan * increasing depth of the power state i.e. for two power states X & Y, if X < Y 5853fc26aa0SKoan-Sin Tan * then X represents a shallower power state than Y. As a result, the 5863fc26aa0SKoan-Sin Tan * coordinated target local power state for a power domain will be the minimum 5873fc26aa0SKoan-Sin Tan * of the requested local power states. 5883fc26aa0SKoan-Sin Tan */ 5893fc26aa0SKoan-Sin Tan plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 5903fc26aa0SKoan-Sin Tan const plat_local_state_t *states, 5913fc26aa0SKoan-Sin Tan unsigned int ncpu) 5923fc26aa0SKoan-Sin Tan { 5933fc26aa0SKoan-Sin Tan plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 5943fc26aa0SKoan-Sin Tan 5953fc26aa0SKoan-Sin Tan assert(ncpu); 5963fc26aa0SKoan-Sin Tan 5973fc26aa0SKoan-Sin Tan do { 5983fc26aa0SKoan-Sin Tan temp = *states++; 5993fc26aa0SKoan-Sin Tan if (temp < target) 6003fc26aa0SKoan-Sin Tan target = temp; 6013fc26aa0SKoan-Sin Tan } while (--ncpu); 6023fc26aa0SKoan-Sin Tan 6033fc26aa0SKoan-Sin Tan return target; 6043fc26aa0SKoan-Sin Tan } 605