17d116dccSCC Ma /*
2f1be00daSLouis Mayencourt * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
37d116dccSCC Ma *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57d116dccSCC Ma */
67d116dccSCC Ma
77d116dccSCC Ma #include <assert.h>
87d116dccSCC Ma #include <errno.h>
909d40e0eSAntonio Nino Diaz
1009d40e0eSAntonio Nino Diaz #include <arch_helpers.h>
1109d40e0eSAntonio Nino Diaz #include <common/debug.h>
1209d40e0eSAntonio Nino Diaz #include <drivers/arm/cci.h>
1309d40e0eSAntonio Nino Diaz #include <drivers/arm/gicv2.h>
14d1d06275Skenny liang #include <drivers/ti/uart/uart_16550.h>
1509d40e0eSAntonio Nino Diaz #include <lib/bakery_lock.h>
1609d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1709d40e0eSAntonio Nino Diaz #include <lib/psci/psci.h>
18bd9344f6SAntonio Nino Diaz #include <plat/arm/common/plat_arm.h>
1909d40e0eSAntonio Nino Diaz
207d116dccSCC Ma #include <mcucfg.h>
217d116dccSCC Ma #include <mt8173_def.h>
227d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */
237d116dccSCC Ma #include <plat_private.h>
247d116dccSCC Ma #include <power_tracer.h>
257d116dccSCC Ma #include <rtc.h>
267d116dccSCC Ma #include <scu.h>
277d116dccSCC Ma #include <spm_hotplug.h>
287d116dccSCC Ma #include <spm_mcdi.h>
297d116dccSCC Ma #include <spm_suspend.h>
30*e9cf1bccSJulius Werner #include <wdt.h>
317d116dccSCC Ma
323fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL0 0
333fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL1 1
343fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL2 2
353fc26aa0SKoan-Sin Tan
363fc26aa0SKoan-Sin Tan /* Macros to read the MTK power domain state */
373fc26aa0SKoan-Sin Tan #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0]
383fc26aa0SKoan-Sin Tan #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1]
393fc26aa0SKoan-Sin Tan #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
403fc26aa0SKoan-Sin Tan (state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
413fc26aa0SKoan-Sin Tan
429cfd83e9SKoan-Sin Tan #if PSCI_EXTENDED_STATE_ID
439cfd83e9SKoan-Sin Tan /*
449cfd83e9SKoan-Sin Tan * The table storing the valid idle power states. Ensure that the
459cfd83e9SKoan-Sin Tan * array entries are populated in ascending order of state-id to
469cfd83e9SKoan-Sin Tan * enable us to use binary search during power state validation.
479cfd83e9SKoan-Sin Tan * The table must be terminated by a NULL entry.
489cfd83e9SKoan-Sin Tan */
499cfd83e9SKoan-Sin Tan const unsigned int mtk_pm_idle_states[] = {
509cfd83e9SKoan-Sin Tan /* State-id - 0x001 */
519cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
529cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
539cfd83e9SKoan-Sin Tan /* State-id - 0x002 */
549cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
559cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
569cfd83e9SKoan-Sin Tan /* State-id - 0x022 */
579cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
589cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
599cfd83e9SKoan-Sin Tan #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
609cfd83e9SKoan-Sin Tan /* State-id - 0x222 */
619cfd83e9SKoan-Sin Tan mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
629cfd83e9SKoan-Sin Tan MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
639cfd83e9SKoan-Sin Tan #endif
649cfd83e9SKoan-Sin Tan 0,
659cfd83e9SKoan-Sin Tan };
669cfd83e9SKoan-Sin Tan #endif
679cfd83e9SKoan-Sin Tan
687d116dccSCC Ma struct core_context {
697d116dccSCC Ma unsigned long timer_data[8];
707d116dccSCC Ma unsigned int count;
717d116dccSCC Ma unsigned int rst;
727d116dccSCC Ma unsigned int abt;
737d116dccSCC Ma unsigned int brk;
747d116dccSCC Ma };
757d116dccSCC Ma
767d116dccSCC Ma struct cluster_context {
777d116dccSCC Ma struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
787d116dccSCC Ma };
797d116dccSCC Ma
807d116dccSCC Ma /*
817d116dccSCC Ma * Top level structure to hold the complete context of a multi cluster system
827d116dccSCC Ma */
837d116dccSCC Ma struct system_context {
847d116dccSCC Ma struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
857d116dccSCC Ma };
867d116dccSCC Ma
877d116dccSCC Ma /*
887d116dccSCC Ma * Top level structure which encapsulates the context of the entire system
897d116dccSCC Ma */
907d116dccSCC Ma static struct system_context dormant_data[1];
917d116dccSCC Ma
system_cluster(struct system_context * system,uint32_t clusterid)927d116dccSCC Ma static inline struct cluster_context *system_cluster(
937d116dccSCC Ma struct system_context *system,
947d116dccSCC Ma uint32_t clusterid)
957d116dccSCC Ma {
967d116dccSCC Ma return &system->cluster[clusterid];
977d116dccSCC Ma }
987d116dccSCC Ma
cluster_core(struct cluster_context * cluster,uint32_t cpuid)997d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster,
1007d116dccSCC Ma uint32_t cpuid)
1017d116dccSCC Ma {
1027d116dccSCC Ma return &cluster->core[cpuid];
1037d116dccSCC Ma }
1047d116dccSCC Ma
get_cluster_data(unsigned long mpidr)1057d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr)
1067d116dccSCC Ma {
1077d116dccSCC Ma uint32_t clusterid;
1087d116dccSCC Ma
1097d116dccSCC Ma clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
1107d116dccSCC Ma
1117d116dccSCC Ma return system_cluster(dormant_data, clusterid);
1127d116dccSCC Ma }
1137d116dccSCC Ma
get_core_data(unsigned long mpidr)1147d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr)
1157d116dccSCC Ma {
1167d116dccSCC Ma struct cluster_context *cluster;
1177d116dccSCC Ma uint32_t cpuid;
1187d116dccSCC Ma
1197d116dccSCC Ma cluster = get_cluster_data(mpidr);
1207d116dccSCC Ma cpuid = mpidr & MPIDR_CPU_MASK;
1217d116dccSCC Ma
1227d116dccSCC Ma return cluster_core(cluster, cpuid);
1237d116dccSCC Ma }
1247d116dccSCC Ma
mt_save_generic_timer(unsigned long * container)1257d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container)
1267d116dccSCC Ma {
1277d116dccSCC Ma uint64_t ctl;
1287d116dccSCC Ma uint64_t val;
1297d116dccSCC Ma
1307d116dccSCC Ma __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
1317d116dccSCC Ma "mrs %x1, cntp_cval_el0\n\t"
1327d116dccSCC Ma "stp %x0, %x1, [%2, #0]"
1337d116dccSCC Ma : "=&r" (ctl), "=&r" (val)
1347d116dccSCC Ma : "r" (container)
1357d116dccSCC Ma : "memory");
1367d116dccSCC Ma
1377d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
1387d116dccSCC Ma "mrs %x1, cntp_ctl_el0\n\t"
1397d116dccSCC Ma "stp %x0, %x1, [%2, #16]"
1407d116dccSCC Ma : "=&r" (val), "=&r" (ctl)
1417d116dccSCC Ma : "r" (container)
1427d116dccSCC Ma : "memory");
1437d116dccSCC Ma
1447d116dccSCC Ma __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
1457d116dccSCC Ma "mrs %x1, cntv_ctl_el0\n\t"
1467d116dccSCC Ma "stp %x0, %x1, [%2, #32]"
1477d116dccSCC Ma : "=&r" (val), "=&r" (ctl)
1487d116dccSCC Ma : "r" (container)
1497d116dccSCC Ma : "memory");
1507d116dccSCC Ma }
1517d116dccSCC Ma
mt_restore_generic_timer(unsigned long * container)1527d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container)
1537d116dccSCC Ma {
1547d116dccSCC Ma uint64_t ctl;
1557d116dccSCC Ma uint64_t val;
1567d116dccSCC Ma
1577d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
1587d116dccSCC Ma "msr cntkctl_el1, %x0\n\t"
1597d116dccSCC Ma "msr cntp_cval_el0, %x1"
1607d116dccSCC Ma : "=&r" (ctl), "=&r" (val)
1617d116dccSCC Ma : "r" (container)
1627d116dccSCC Ma : "memory");
1637d116dccSCC Ma
1647d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
1657d116dccSCC Ma "msr cntp_tval_el0, %x0\n\t"
1667d116dccSCC Ma "msr cntp_ctl_el0, %x1"
1677d116dccSCC Ma : "=&r" (val), "=&r" (ctl)
1687d116dccSCC Ma : "r" (container)
1697d116dccSCC Ma : "memory");
1707d116dccSCC Ma
1717d116dccSCC Ma __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
1727d116dccSCC Ma "msr cntv_tval_el0, %x0\n\t"
1737d116dccSCC Ma "msr cntv_ctl_el0, %x1"
1747d116dccSCC Ma : "=&r" (val), "=&r" (ctl)
1757d116dccSCC Ma : "r" (container)
1767d116dccSCC Ma : "memory");
1777d116dccSCC Ma }
1787d116dccSCC Ma
read_cntpctl(void)1797d116dccSCC Ma static inline uint64_t read_cntpctl(void)
1807d116dccSCC Ma {
1817d116dccSCC Ma uint64_t cntpctl;
1827d116dccSCC Ma
1837d116dccSCC Ma __asm__ volatile("mrs %x0, cntp_ctl_el0"
1847d116dccSCC Ma : "=r" (cntpctl) : : "memory");
1857d116dccSCC Ma
1867d116dccSCC Ma return cntpctl;
1877d116dccSCC Ma }
1887d116dccSCC Ma
write_cntpctl(uint64_t cntpctl)1897d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl)
1907d116dccSCC Ma {
1917d116dccSCC Ma __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl));
1927d116dccSCC Ma }
1937d116dccSCC Ma
stop_generic_timer(void)1947d116dccSCC Ma static void stop_generic_timer(void)
1957d116dccSCC Ma {
1967d116dccSCC Ma /*
1977d116dccSCC Ma * Disable the timer and mask the irq to prevent
1987d116dccSCC Ma * suprious interrupts on this cpu interface. It
1997d116dccSCC Ma * will bite us when we come back if we don't. It
2007d116dccSCC Ma * will be replayed on the inbound cluster.
2017d116dccSCC Ma */
2027d116dccSCC Ma uint64_t cntpctl = read_cntpctl();
2037d116dccSCC Ma
2047d116dccSCC Ma write_cntpctl(clr_cntp_ctl_enable(cntpctl));
2057d116dccSCC Ma }
2067d116dccSCC Ma
mt_cpu_save(unsigned long mpidr)2077d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr)
2087d116dccSCC Ma {
2097d116dccSCC Ma struct core_context *core;
2107d116dccSCC Ma
2117d116dccSCC Ma core = get_core_data(mpidr);
2127d116dccSCC Ma mt_save_generic_timer(core->timer_data);
2137d116dccSCC Ma
2147d116dccSCC Ma /* disable timer irq, and upper layer should enable it again. */
2157d116dccSCC Ma stop_generic_timer();
2167d116dccSCC Ma }
2177d116dccSCC Ma
mt_cpu_restore(unsigned long mpidr)2187d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr)
2197d116dccSCC Ma {
2207d116dccSCC Ma struct core_context *core;
2217d116dccSCC Ma
2227d116dccSCC Ma core = get_core_data(mpidr);
2237d116dccSCC Ma mt_restore_generic_timer(core->timer_data);
2247d116dccSCC Ma }
2257d116dccSCC Ma
mt_platform_save_context(unsigned long mpidr)2267d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr)
2277d116dccSCC Ma {
2287d116dccSCC Ma /* mcusys_save_context: */
2297d116dccSCC Ma mt_cpu_save(mpidr);
2307d116dccSCC Ma }
2317d116dccSCC Ma
mt_platform_restore_context(unsigned long mpidr)2327d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr)
2337d116dccSCC Ma {
2347d116dccSCC Ma /* mcusys_restore_context: */
2357d116dccSCC Ma mt_cpu_restore(mpidr);
2367d116dccSCC Ma }
2377d116dccSCC Ma
plat_cpu_standby(plat_local_state_t cpu_state)2383fc26aa0SKoan-Sin Tan static void plat_cpu_standby(plat_local_state_t cpu_state)
2393fc26aa0SKoan-Sin Tan {
240f1be00daSLouis Mayencourt u_register_t scr;
2413fc26aa0SKoan-Sin Tan
2423fc26aa0SKoan-Sin Tan scr = read_scr_el3();
2433fc26aa0SKoan-Sin Tan write_scr_el3(scr | SCR_IRQ_BIT);
2443fc26aa0SKoan-Sin Tan isb();
2453fc26aa0SKoan-Sin Tan dsb();
2463fc26aa0SKoan-Sin Tan wfi();
2473fc26aa0SKoan-Sin Tan write_scr_el3(scr);
2483fc26aa0SKoan-Sin Tan }
2497d116dccSCC Ma
2507d116dccSCC Ma /*******************************************************************************
2517d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned
2527d116dccSCC Ma * on. The level and mpidr determine the affinity instance.
2537d116dccSCC Ma ******************************************************************************/
2543fc26aa0SKoan-Sin Tan static uintptr_t secure_entrypoint;
2553fc26aa0SKoan-Sin Tan
plat_power_domain_on(unsigned long mpidr)2563fc26aa0SKoan-Sin Tan static int plat_power_domain_on(unsigned long mpidr)
2573fc26aa0SKoan-Sin Tan {
2583fc26aa0SKoan-Sin Tan int rc = PSCI_E_SUCCESS;
2593fc26aa0SKoan-Sin Tan unsigned long cpu_id;
2603fc26aa0SKoan-Sin Tan unsigned long cluster_id;
2613fc26aa0SKoan-Sin Tan uintptr_t rv;
2623fc26aa0SKoan-Sin Tan
2633fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK;
2643fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK;
2653fc26aa0SKoan-Sin Tan
2663fc26aa0SKoan-Sin Tan if (cluster_id)
2673fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
2683fc26aa0SKoan-Sin Tan else
2693fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
2703fc26aa0SKoan-Sin Tan
2713fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint);
2723fc26aa0SKoan-Sin Tan INFO("mt_on[%ld:%ld], entry %x\n",
2733fc26aa0SKoan-Sin Tan cluster_id, cpu_id, mmio_read_32(rv));
2743fc26aa0SKoan-Sin Tan
2753fc26aa0SKoan-Sin Tan spm_hotplug_on(mpidr);
2763fc26aa0SKoan-Sin Tan return rc;
2773fc26aa0SKoan-Sin Tan }
2787d116dccSCC Ma
2797d116dccSCC Ma /*******************************************************************************
2807d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be turned
2817d116dccSCC Ma * off. The level and mpidr determine the affinity instance. The 'state' arg.
2827d116dccSCC Ma * allows the platform to decide whether the cluster is being turned off and
2837d116dccSCC Ma * take apt actions.
2847d116dccSCC Ma *
2857d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be
2867d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches
2877d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is
2887d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be
2897d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results.
2907d116dccSCC Ma ******************************************************************************/
plat_power_domain_off(const psci_power_state_t * state)2913fc26aa0SKoan-Sin Tan static void plat_power_domain_off(const psci_power_state_t *state)
2923fc26aa0SKoan-Sin Tan {
2933fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1();
2943fc26aa0SKoan-Sin Tan
2953fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */
2968bc20038SKoan-Sin Tan gicv2_cpuif_disable();
2973fc26aa0SKoan-Sin Tan
2983fc26aa0SKoan-Sin Tan spm_hotplug_off(mpidr);
2993fc26aa0SKoan-Sin Tan
3003fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_DOWN);
3013fc26aa0SKoan-Sin Tan
3023fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
3033fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */
3043fc26aa0SKoan-Sin Tan plat_cci_disable();
3053fc26aa0SKoan-Sin Tan
3063fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_DOWN);
3073fc26aa0SKoan-Sin Tan }
3083fc26aa0SKoan-Sin Tan }
3097d116dccSCC Ma
3107d116dccSCC Ma /*******************************************************************************
3117d116dccSCC Ma * MTK_platform handler called when an affinity instance is about to be
3127d116dccSCC Ma * suspended. The level and mpidr determine the affinity instance. The 'state'
3137d116dccSCC Ma * arg. allows the platform to decide whether the cluster is being turned off
3147d116dccSCC Ma * and take apt actions.
3157d116dccSCC Ma *
3167d116dccSCC Ma * CAUTION: This function is called with coherent stacks so that caches can be
3177d116dccSCC Ma * turned off, flushed and coherency disabled. There is no guarantee that caches
3187d116dccSCC Ma * will remain turned on across calls to this function as each affinity level is
3197d116dccSCC Ma * dealt with. So do not write & read global variables across calls. It will be
3207d116dccSCC Ma * wise to do flush a write to the global to prevent unpredictable results.
3217d116dccSCC Ma ******************************************************************************/
plat_power_domain_suspend(const psci_power_state_t * state)3223fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend(const psci_power_state_t *state)
3233fc26aa0SKoan-Sin Tan {
3243fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1();
3253fc26aa0SKoan-Sin Tan unsigned long cluster_id;
3263fc26aa0SKoan-Sin Tan unsigned long cpu_id;
3273fc26aa0SKoan-Sin Tan uintptr_t rv;
3283fc26aa0SKoan-Sin Tan
3293fc26aa0SKoan-Sin Tan cpu_id = mpidr & MPIDR_CPU_MASK;
3303fc26aa0SKoan-Sin Tan cluster_id = mpidr & MPIDR_CLUSTER_MASK;
3313fc26aa0SKoan-Sin Tan
3323fc26aa0SKoan-Sin Tan if (cluster_id)
3333fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
3343fc26aa0SKoan-Sin Tan else
3353fc26aa0SKoan-Sin Tan rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
3363fc26aa0SKoan-Sin Tan
3373fc26aa0SKoan-Sin Tan mmio_write_32(rv, secure_entrypoint);
3383fc26aa0SKoan-Sin Tan
3393fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
3403fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
3413fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
3423fc26aa0SKoan-Sin Tan spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
3433fc26aa0SKoan-Sin Tan }
3443fc26aa0SKoan-Sin Tan
3453fc26aa0SKoan-Sin Tan mt_platform_save_context(mpidr);
3463fc26aa0SKoan-Sin Tan
3473fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */
3483fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
3493fc26aa0SKoan-Sin Tan /* Disable coherency if this cluster is to be turned off */
3503fc26aa0SKoan-Sin Tan plat_cci_disable();
3513fc26aa0SKoan-Sin Tan }
3523fc26aa0SKoan-Sin Tan
3533fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
354*e9cf1bccSJulius Werner wdt_suspend();
3553fc26aa0SKoan-Sin Tan disable_scu(mpidr);
3563fc26aa0SKoan-Sin Tan generic_timer_backup();
3573fc26aa0SKoan-Sin Tan spm_system_suspend();
3583fc26aa0SKoan-Sin Tan /* Prevent interrupts from spuriously waking up this cpu */
3598bc20038SKoan-Sin Tan gicv2_cpuif_disable();
3603fc26aa0SKoan-Sin Tan }
3613fc26aa0SKoan-Sin Tan }
3627d116dccSCC Ma
3637d116dccSCC Ma /*******************************************************************************
3647d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered
3657d116dccSCC Ma * on after being turned off earlier. The level and mpidr determine the affinity
3667d116dccSCC Ma * instance. The 'state' arg. allows the platform to decide whether the cluster
3677d116dccSCC Ma * was turned off prior to wakeup and do what's necessary to setup it up
3687d116dccSCC Ma * correctly.
3697d116dccSCC Ma ******************************************************************************/
3703fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void);
3713fc26aa0SKoan-Sin Tan
plat_power_domain_on_finish(const psci_power_state_t * state)3723fc26aa0SKoan-Sin Tan static void plat_power_domain_on_finish(const psci_power_state_t *state)
3733fc26aa0SKoan-Sin Tan {
3743fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1();
3753fc26aa0SKoan-Sin Tan
3763fc26aa0SKoan-Sin Tan assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
3773fc26aa0SKoan-Sin Tan
3783fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
3793fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
3803fc26aa0SKoan-Sin Tan mtk_system_pwr_domain_resume();
3813fc26aa0SKoan-Sin Tan
3823fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
3833fc26aa0SKoan-Sin Tan plat_cci_enable();
3843fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CLUSTER_UP);
3853fc26aa0SKoan-Sin Tan }
3863fc26aa0SKoan-Sin Tan
3873fc26aa0SKoan-Sin Tan if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
3883fc26aa0SKoan-Sin Tan (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
3893fc26aa0SKoan-Sin Tan return;
3903fc26aa0SKoan-Sin Tan
3913fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */
3928bc20038SKoan-Sin Tan gicv2_cpuif_enable();
3938bc20038SKoan-Sin Tan gicv2_pcpu_distif_init();
3943fc26aa0SKoan-Sin Tan trace_power_flow(mpidr, CPU_UP);
3953fc26aa0SKoan-Sin Tan }
3967d116dccSCC Ma
3977d116dccSCC Ma /*******************************************************************************
3987d116dccSCC Ma * MTK_platform handler called when an affinity instance has just been powered
3997d116dccSCC Ma * on after having been suspended earlier. The level and mpidr determine the
4007d116dccSCC Ma * affinity instance.
4017d116dccSCC Ma ******************************************************************************/
plat_power_domain_suspend_finish(const psci_power_state_t * state)4023fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
4033fc26aa0SKoan-Sin Tan {
4043fc26aa0SKoan-Sin Tan unsigned long mpidr = read_mpidr_el1();
4057d116dccSCC Ma
4063fc26aa0SKoan-Sin Tan if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
4073fc26aa0SKoan-Sin Tan return;
4083fc26aa0SKoan-Sin Tan
4093fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
4103fc26aa0SKoan-Sin Tan /* Enable the gic cpu interface */
4118bc20038SKoan-Sin Tan plat_arm_gic_init();
4123fc26aa0SKoan-Sin Tan spm_system_suspend_finish();
4133fc26aa0SKoan-Sin Tan enable_scu(mpidr);
414*e9cf1bccSJulius Werner wdt_resume();
4153fc26aa0SKoan-Sin Tan }
4163fc26aa0SKoan-Sin Tan
4173fc26aa0SKoan-Sin Tan /* Perform the common cluster specific operations */
4183fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
4193fc26aa0SKoan-Sin Tan /* Enable coherency if this cluster was off */
4203fc26aa0SKoan-Sin Tan plat_cci_enable();
4213fc26aa0SKoan-Sin Tan }
4223fc26aa0SKoan-Sin Tan
4233fc26aa0SKoan-Sin Tan mt_platform_restore_context(mpidr);
4243fc26aa0SKoan-Sin Tan
4253fc26aa0SKoan-Sin Tan if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
4263fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
4273fc26aa0SKoan-Sin Tan if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
4283fc26aa0SKoan-Sin Tan spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
4293fc26aa0SKoan-Sin Tan }
4303fc26aa0SKoan-Sin Tan
4318bc20038SKoan-Sin Tan gicv2_pcpu_distif_init();
4323fc26aa0SKoan-Sin Tan }
4333fc26aa0SKoan-Sin Tan
plat_get_sys_suspend_power_state(psci_power_state_t * req_state)4343fc26aa0SKoan-Sin Tan static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
4353fc26aa0SKoan-Sin Tan {
4363fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= 2);
4373fc26aa0SKoan-Sin Tan
4383fc26aa0SKoan-Sin Tan for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
4393fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
4403fc26aa0SKoan-Sin Tan }
4417d116dccSCC Ma
4427d116dccSCC Ma /*******************************************************************************
4437d116dccSCC Ma * MTK handlers to shutdown/reboot the system
4447d116dccSCC Ma ******************************************************************************/
plat_system_off(void)4457d116dccSCC Ma static void __dead2 plat_system_off(void)
4467d116dccSCC Ma {
4477d116dccSCC Ma INFO("MTK System Off\n");
4487d116dccSCC Ma
4497d116dccSCC Ma rtc_bbpu_power_down();
4507d116dccSCC Ma
4517d116dccSCC Ma wfi();
4527d116dccSCC Ma ERROR("MTK System Off: operation not handled.\n");
4537d116dccSCC Ma panic();
4547d116dccSCC Ma }
4557d116dccSCC Ma
plat_system_reset(void)4567d116dccSCC Ma static void __dead2 plat_system_reset(void)
4577d116dccSCC Ma {
4587d116dccSCC Ma /* Write the System Configuration Control Register */
4597d116dccSCC Ma INFO("MTK System Reset\n");
4607d116dccSCC Ma
461*e9cf1bccSJulius Werner wdt_trigger_reset();
4627d116dccSCC Ma
4637d116dccSCC Ma wfi();
4647d116dccSCC Ma ERROR("MTK System Reset: operation not handled.\n");
4657d116dccSCC Ma panic();
4667d116dccSCC Ma }
4677d116dccSCC Ma
4689cfd83e9SKoan-Sin Tan #if !PSCI_EXTENDED_STATE_ID
plat_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)4693fc26aa0SKoan-Sin Tan static int plat_validate_power_state(unsigned int power_state,
4703fc26aa0SKoan-Sin Tan psci_power_state_t *req_state)
4713fc26aa0SKoan-Sin Tan {
4723fc26aa0SKoan-Sin Tan int pstate = psci_get_pstate_type(power_state);
4733fc26aa0SKoan-Sin Tan int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
4743fc26aa0SKoan-Sin Tan int i;
4753fc26aa0SKoan-Sin Tan
4763fc26aa0SKoan-Sin Tan assert(req_state);
4773fc26aa0SKoan-Sin Tan
4783fc26aa0SKoan-Sin Tan if (pwr_lvl > PLAT_MAX_PWR_LVL)
4793fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS;
4803fc26aa0SKoan-Sin Tan
4813fc26aa0SKoan-Sin Tan /* Sanity check the requested state */
4823fc26aa0SKoan-Sin Tan if (pstate == PSTATE_TYPE_STANDBY) {
4833fc26aa0SKoan-Sin Tan /*
4843fc26aa0SKoan-Sin Tan * It's possible to enter standby only on power level 0
4853fc26aa0SKoan-Sin Tan * Ignore any other power level.
4863fc26aa0SKoan-Sin Tan */
4873fc26aa0SKoan-Sin Tan if (pwr_lvl != 0)
4883fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS;
4893fc26aa0SKoan-Sin Tan
4903fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[MTK_PWR_LVL0] =
4913fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_RET;
4923fc26aa0SKoan-Sin Tan } else {
4933fc26aa0SKoan-Sin Tan for (i = 0; i <= pwr_lvl; i++)
4943fc26aa0SKoan-Sin Tan req_state->pwr_domain_state[i] =
4953fc26aa0SKoan-Sin Tan MTK_LOCAL_STATE_OFF;
4963fc26aa0SKoan-Sin Tan }
4973fc26aa0SKoan-Sin Tan
4983fc26aa0SKoan-Sin Tan /*
4993fc26aa0SKoan-Sin Tan * We expect the 'state id' to be zero.
5003fc26aa0SKoan-Sin Tan */
5013fc26aa0SKoan-Sin Tan if (psci_get_pstate_id(power_state))
5023fc26aa0SKoan-Sin Tan return PSCI_E_INVALID_PARAMS;
5033fc26aa0SKoan-Sin Tan
5043fc26aa0SKoan-Sin Tan return PSCI_E_SUCCESS;
5053fc26aa0SKoan-Sin Tan }
5069cfd83e9SKoan-Sin Tan #else
plat_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)5079cfd83e9SKoan-Sin Tan int plat_validate_power_state(unsigned int power_state,
5089cfd83e9SKoan-Sin Tan psci_power_state_t *req_state)
5099cfd83e9SKoan-Sin Tan {
5109cfd83e9SKoan-Sin Tan unsigned int state_id;
5119cfd83e9SKoan-Sin Tan int i;
5129cfd83e9SKoan-Sin Tan
5139cfd83e9SKoan-Sin Tan assert(req_state);
5149cfd83e9SKoan-Sin Tan
5159cfd83e9SKoan-Sin Tan /*
5169cfd83e9SKoan-Sin Tan * Currently we are using a linear search for finding the matching
5179cfd83e9SKoan-Sin Tan * entry in the idle power state array. This can be made a binary
5189cfd83e9SKoan-Sin Tan * search if the number of entries justify the additional complexity.
5199cfd83e9SKoan-Sin Tan */
5209cfd83e9SKoan-Sin Tan for (i = 0; !!mtk_pm_idle_states[i]; i++) {
5219cfd83e9SKoan-Sin Tan if (power_state == mtk_pm_idle_states[i])
5229cfd83e9SKoan-Sin Tan break;
5239cfd83e9SKoan-Sin Tan }
5249cfd83e9SKoan-Sin Tan
5259cfd83e9SKoan-Sin Tan /* Return error if entry not found in the idle state array */
5269cfd83e9SKoan-Sin Tan if (!mtk_pm_idle_states[i])
5279cfd83e9SKoan-Sin Tan return PSCI_E_INVALID_PARAMS;
5289cfd83e9SKoan-Sin Tan
5299cfd83e9SKoan-Sin Tan i = 0;
5309cfd83e9SKoan-Sin Tan state_id = psci_get_pstate_id(power_state);
5319cfd83e9SKoan-Sin Tan
5329cfd83e9SKoan-Sin Tan /* Parse the State ID and populate the state info parameter */
5339cfd83e9SKoan-Sin Tan while (state_id) {
5349cfd83e9SKoan-Sin Tan req_state->pwr_domain_state[i++] = state_id &
5359cfd83e9SKoan-Sin Tan MTK_LOCAL_PSTATE_MASK;
5369cfd83e9SKoan-Sin Tan state_id >>= MTK_LOCAL_PSTATE_WIDTH;
5379cfd83e9SKoan-Sin Tan }
5389cfd83e9SKoan-Sin Tan
5399cfd83e9SKoan-Sin Tan return PSCI_E_SUCCESS;
5409cfd83e9SKoan-Sin Tan }
5419cfd83e9SKoan-Sin Tan #endif
5423fc26aa0SKoan-Sin Tan
mtk_system_pwr_domain_resume(void)5433fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void)
5443fc26aa0SKoan-Sin Tan {
545d1d06275Skenny liang console_switch_state(CONSOLE_FLAG_BOOT);
5463fc26aa0SKoan-Sin Tan
5473fc26aa0SKoan-Sin Tan /* Assert system power domain is available on the platform */
5483fc26aa0SKoan-Sin Tan assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
5493fc26aa0SKoan-Sin Tan
5508bc20038SKoan-Sin Tan plat_arm_gic_init();
551d1d06275Skenny liang
552d1d06275Skenny liang console_switch_state(CONSOLE_FLAG_RUNTIME);
5533fc26aa0SKoan-Sin Tan }
5543fc26aa0SKoan-Sin Tan
5553fc26aa0SKoan-Sin Tan static const plat_psci_ops_t plat_plat_pm_ops = {
5563fc26aa0SKoan-Sin Tan .cpu_standby = plat_cpu_standby,
5573fc26aa0SKoan-Sin Tan .pwr_domain_on = plat_power_domain_on,
5583fc26aa0SKoan-Sin Tan .pwr_domain_on_finish = plat_power_domain_on_finish,
5593fc26aa0SKoan-Sin Tan .pwr_domain_off = plat_power_domain_off,
5603fc26aa0SKoan-Sin Tan .pwr_domain_suspend = plat_power_domain_suspend,
5613fc26aa0SKoan-Sin Tan .pwr_domain_suspend_finish = plat_power_domain_suspend_finish,
5623fc26aa0SKoan-Sin Tan .system_off = plat_system_off,
5633fc26aa0SKoan-Sin Tan .system_reset = plat_system_reset,
5643fc26aa0SKoan-Sin Tan .validate_power_state = plat_validate_power_state,
5653fc26aa0SKoan-Sin Tan .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
5663fc26aa0SKoan-Sin Tan };
5673fc26aa0SKoan-Sin Tan
plat_setup_psci_ops(uintptr_t sec_entrypoint,const plat_psci_ops_t ** psci_ops)5683fc26aa0SKoan-Sin Tan int plat_setup_psci_ops(uintptr_t sec_entrypoint,
5693fc26aa0SKoan-Sin Tan const plat_psci_ops_t **psci_ops)
5703fc26aa0SKoan-Sin Tan {
5713fc26aa0SKoan-Sin Tan *psci_ops = &plat_plat_pm_ops;
5723fc26aa0SKoan-Sin Tan secure_entrypoint = sec_entrypoint;
5733fc26aa0SKoan-Sin Tan return 0;
5743fc26aa0SKoan-Sin Tan }
5753fc26aa0SKoan-Sin Tan
5763fc26aa0SKoan-Sin Tan /*
5773fc26aa0SKoan-Sin Tan * The PSCI generic code uses this API to let the platform participate in state
5783fc26aa0SKoan-Sin Tan * coordination during a power management operation. It compares the platform
5793fc26aa0SKoan-Sin Tan * specific local power states requested by each cpu for a given power domain
5803fc26aa0SKoan-Sin Tan * and returns the coordinated target power state that the domain should
5813fc26aa0SKoan-Sin Tan * enter. A platform assigns a number to a local power state. This default
5823fc26aa0SKoan-Sin Tan * implementation assumes that the platform assigns these numbers in order of
5833fc26aa0SKoan-Sin Tan * increasing depth of the power state i.e. for two power states X & Y, if X < Y
5843fc26aa0SKoan-Sin Tan * then X represents a shallower power state than Y. As a result, the
5853fc26aa0SKoan-Sin Tan * coordinated target local power state for a power domain will be the minimum
5863fc26aa0SKoan-Sin Tan * of the requested local power states.
5873fc26aa0SKoan-Sin Tan */
plat_get_target_pwr_state(unsigned int lvl,const plat_local_state_t * states,unsigned int ncpu)5883fc26aa0SKoan-Sin Tan plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
5893fc26aa0SKoan-Sin Tan const plat_local_state_t *states,
5903fc26aa0SKoan-Sin Tan unsigned int ncpu)
5913fc26aa0SKoan-Sin Tan {
5923fc26aa0SKoan-Sin Tan plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
5933fc26aa0SKoan-Sin Tan
5943fc26aa0SKoan-Sin Tan assert(ncpu);
5953fc26aa0SKoan-Sin Tan
5963fc26aa0SKoan-Sin Tan do {
5973fc26aa0SKoan-Sin Tan temp = *states++;
5983fc26aa0SKoan-Sin Tan if (temp < target)
5993fc26aa0SKoan-Sin Tan target = temp;
6003fc26aa0SKoan-Sin Tan } while (--ncpu);
6013fc26aa0SKoan-Sin Tan
6023fc26aa0SKoan-Sin Tan return target;
6033fc26aa0SKoan-Sin Tan }
604