xref: /rk3399_ARM-atf/plat/mediatek/mt8173/plat_pm.c (revision 8bc20038afc4ac87a35b507bfdf1e5d2301bcfcf)
17d116dccSCC Ma /*
27d116dccSCC Ma  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
37d116dccSCC Ma  *
47d116dccSCC Ma  * Redistribution and use in source and binary forms, with or without
57d116dccSCC Ma  * modification, are permitted provided that the following conditions are met:
67d116dccSCC Ma  *
77d116dccSCC Ma  * Redistributions of source code must retain the above copyright notice, this
87d116dccSCC Ma  * list of conditions and the following disclaimer.
97d116dccSCC Ma  *
107d116dccSCC Ma  * Redistributions in binary form must reproduce the above copyright notice,
117d116dccSCC Ma  * this list of conditions and the following disclaimer in the documentation
127d116dccSCC Ma  * and/or other materials provided with the distribution.
137d116dccSCC Ma  *
147d116dccSCC Ma  * Neither the name of ARM nor the names of its contributors may be used
157d116dccSCC Ma  * to endorse or promote products derived from this software without specific
167d116dccSCC Ma  * prior written permission.
177d116dccSCC Ma  *
187d116dccSCC Ma  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197d116dccSCC Ma  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207d116dccSCC Ma  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217d116dccSCC Ma  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227d116dccSCC Ma  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237d116dccSCC Ma  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247d116dccSCC Ma  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257d116dccSCC Ma  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267d116dccSCC Ma  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277d116dccSCC Ma  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287d116dccSCC Ma  * POSSIBILITY OF SUCH DAMAGE.
297d116dccSCC Ma  */
307d116dccSCC Ma 
317d116dccSCC Ma #include <arch_helpers.h>
327d116dccSCC Ma #include <assert.h>
337d116dccSCC Ma #include <bakery_lock.h>
347d116dccSCC Ma #include <cci.h>
357d116dccSCC Ma #include <console.h>
367d116dccSCC Ma #include <debug.h>
377d116dccSCC Ma #include <errno.h>
38*8bc20038SKoan-Sin Tan #include <gicv2.h>
397d116dccSCC Ma #include <mcucfg.h>
407d116dccSCC Ma #include <mmio.h>
417d116dccSCC Ma #include <mt8173_def.h>
427d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */
43*8bc20038SKoan-Sin Tan #include <plat_arm.h>
447d116dccSCC Ma #include <plat_private.h>
457d116dccSCC Ma #include <power_tracer.h>
467d116dccSCC Ma #include <psci.h>
477d116dccSCC Ma #include <rtc.h>
487d116dccSCC Ma #include <scu.h>
497d116dccSCC Ma #include <spm_hotplug.h>
507d116dccSCC Ma #include <spm_mcdi.h>
517d116dccSCC Ma #include <spm_suspend.h>
527d116dccSCC Ma 
533fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT
543fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL0	0
553fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL1	1
563fc26aa0SKoan-Sin Tan #define MTK_PWR_LVL2	2
573fc26aa0SKoan-Sin Tan 
583fc26aa0SKoan-Sin Tan /* Macros to read the MTK power domain state */
593fc26aa0SKoan-Sin Tan #define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
603fc26aa0SKoan-Sin Tan #define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
613fc26aa0SKoan-Sin Tan #define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
623fc26aa0SKoan-Sin Tan 			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
633fc26aa0SKoan-Sin Tan #endif
643fc26aa0SKoan-Sin Tan 
659cfd83e9SKoan-Sin Tan #if PSCI_EXTENDED_STATE_ID
669cfd83e9SKoan-Sin Tan /*
679cfd83e9SKoan-Sin Tan  *  The table storing the valid idle power states. Ensure that the
689cfd83e9SKoan-Sin Tan  *  array entries are populated in ascending order of state-id to
699cfd83e9SKoan-Sin Tan  *  enable us to use binary search during power state validation.
709cfd83e9SKoan-Sin Tan  *  The table must be terminated by a NULL entry.
719cfd83e9SKoan-Sin Tan  */
729cfd83e9SKoan-Sin Tan const unsigned int mtk_pm_idle_states[] = {
739cfd83e9SKoan-Sin Tan 	/* State-id - 0x001 */
749cfd83e9SKoan-Sin Tan 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
759cfd83e9SKoan-Sin Tan 		MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
769cfd83e9SKoan-Sin Tan 	/* State-id - 0x002 */
779cfd83e9SKoan-Sin Tan 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
789cfd83e9SKoan-Sin Tan 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
799cfd83e9SKoan-Sin Tan 	/* State-id - 0x022 */
809cfd83e9SKoan-Sin Tan 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
819cfd83e9SKoan-Sin Tan 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
829cfd83e9SKoan-Sin Tan #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
839cfd83e9SKoan-Sin Tan 	/* State-id - 0x222 */
849cfd83e9SKoan-Sin Tan 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
859cfd83e9SKoan-Sin Tan 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
869cfd83e9SKoan-Sin Tan #endif
879cfd83e9SKoan-Sin Tan 	0,
889cfd83e9SKoan-Sin Tan };
899cfd83e9SKoan-Sin Tan #endif
909cfd83e9SKoan-Sin Tan 
917d116dccSCC Ma struct core_context {
927d116dccSCC Ma 	unsigned long timer_data[8];
937d116dccSCC Ma 	unsigned int count;
947d116dccSCC Ma 	unsigned int rst;
957d116dccSCC Ma 	unsigned int abt;
967d116dccSCC Ma 	unsigned int brk;
977d116dccSCC Ma };
987d116dccSCC Ma 
997d116dccSCC Ma struct cluster_context {
1007d116dccSCC Ma 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
1017d116dccSCC Ma };
1027d116dccSCC Ma 
1037d116dccSCC Ma /*
1047d116dccSCC Ma  * Top level structure to hold the complete context of a multi cluster system
1057d116dccSCC Ma  */
1067d116dccSCC Ma struct system_context {
1077d116dccSCC Ma 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
1087d116dccSCC Ma };
1097d116dccSCC Ma 
1107d116dccSCC Ma /*
1117d116dccSCC Ma  * Top level structure which encapsulates the context of the entire system
1127d116dccSCC Ma  */
1137d116dccSCC Ma static struct system_context dormant_data[1];
1147d116dccSCC Ma 
1157d116dccSCC Ma static inline struct cluster_context *system_cluster(
1167d116dccSCC Ma 						struct system_context *system,
1177d116dccSCC Ma 						uint32_t clusterid)
1187d116dccSCC Ma {
1197d116dccSCC Ma 	return &system->cluster[clusterid];
1207d116dccSCC Ma }
1217d116dccSCC Ma 
1227d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster,
1237d116dccSCC Ma 						uint32_t cpuid)
1247d116dccSCC Ma {
1257d116dccSCC Ma 	return &cluster->core[cpuid];
1267d116dccSCC Ma }
1277d116dccSCC Ma 
1287d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr)
1297d116dccSCC Ma {
1307d116dccSCC Ma 	uint32_t clusterid;
1317d116dccSCC Ma 
1327d116dccSCC Ma 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
1337d116dccSCC Ma 
1347d116dccSCC Ma 	return system_cluster(dormant_data, clusterid);
1357d116dccSCC Ma }
1367d116dccSCC Ma 
1377d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr)
1387d116dccSCC Ma {
1397d116dccSCC Ma 	struct cluster_context *cluster;
1407d116dccSCC Ma 	uint32_t cpuid;
1417d116dccSCC Ma 
1427d116dccSCC Ma 	cluster = get_cluster_data(mpidr);
1437d116dccSCC Ma 	cpuid = mpidr & MPIDR_CPU_MASK;
1447d116dccSCC Ma 
1457d116dccSCC Ma 	return cluster_core(cluster, cpuid);
1467d116dccSCC Ma }
1477d116dccSCC Ma 
1487d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container)
1497d116dccSCC Ma {
1507d116dccSCC Ma 	uint64_t ctl;
1517d116dccSCC Ma 	uint64_t val;
1527d116dccSCC Ma 
1537d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
1547d116dccSCC Ma 			 "mrs	%x1, cntp_cval_el0\n\t"
1557d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #0]"
1567d116dccSCC Ma 			 : "=&r" (ctl), "=&r" (val)
1577d116dccSCC Ma 			 : "r" (container)
1587d116dccSCC Ma 			 : "memory");
1597d116dccSCC Ma 
1607d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
1617d116dccSCC Ma 			 "mrs	%x1, cntp_ctl_el0\n\t"
1627d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #16]"
1637d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1647d116dccSCC Ma 			 : "r" (container)
1657d116dccSCC Ma 			 : "memory");
1667d116dccSCC Ma 
1677d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
1687d116dccSCC Ma 			 "mrs	%x1, cntv_ctl_el0\n\t"
1697d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #32]"
1707d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1717d116dccSCC Ma 			 : "r" (container)
1727d116dccSCC Ma 			 : "memory");
1737d116dccSCC Ma }
1747d116dccSCC Ma 
1757d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container)
1767d116dccSCC Ma {
1777d116dccSCC Ma 	uint64_t ctl;
1787d116dccSCC Ma 	uint64_t val;
1797d116dccSCC Ma 
1807d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
1817d116dccSCC Ma 			 "msr	cntkctl_el1, %x0\n\t"
1827d116dccSCC Ma 			 "msr	cntp_cval_el0, %x1"
1837d116dccSCC Ma 			 : "=&r" (ctl), "=&r" (val)
1847d116dccSCC Ma 			 : "r" (container)
1857d116dccSCC Ma 			 : "memory");
1867d116dccSCC Ma 
1877d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
1887d116dccSCC Ma 			 "msr	cntp_tval_el0, %x0\n\t"
1897d116dccSCC Ma 			 "msr	cntp_ctl_el0, %x1"
1907d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1917d116dccSCC Ma 			 : "r" (container)
1927d116dccSCC Ma 			 : "memory");
1937d116dccSCC Ma 
1947d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
1957d116dccSCC Ma 			 "msr	cntv_tval_el0, %x0\n\t"
1967d116dccSCC Ma 			 "msr	cntv_ctl_el0, %x1"
1977d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1987d116dccSCC Ma 			 : "r" (container)
1997d116dccSCC Ma 			 : "memory");
2007d116dccSCC Ma }
2017d116dccSCC Ma 
2027d116dccSCC Ma static inline uint64_t read_cntpctl(void)
2037d116dccSCC Ma {
2047d116dccSCC Ma 	uint64_t cntpctl;
2057d116dccSCC Ma 
2067d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
2077d116dccSCC Ma 			 : "=r" (cntpctl) : : "memory");
2087d116dccSCC Ma 
2097d116dccSCC Ma 	return cntpctl;
2107d116dccSCC Ma }
2117d116dccSCC Ma 
2127d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl)
2137d116dccSCC Ma {
2147d116dccSCC Ma 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
2157d116dccSCC Ma }
2167d116dccSCC Ma 
2177d116dccSCC Ma static void stop_generic_timer(void)
2187d116dccSCC Ma {
2197d116dccSCC Ma 	/*
2207d116dccSCC Ma 	 * Disable the timer and mask the irq to prevent
2217d116dccSCC Ma 	 * suprious interrupts on this cpu interface. It
2227d116dccSCC Ma 	 * will bite us when we come back if we don't. It
2237d116dccSCC Ma 	 * will be replayed on the inbound cluster.
2247d116dccSCC Ma 	 */
2257d116dccSCC Ma 	uint64_t cntpctl = read_cntpctl();
2267d116dccSCC Ma 
2277d116dccSCC Ma 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
2287d116dccSCC Ma }
2297d116dccSCC Ma 
2307d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr)
2317d116dccSCC Ma {
2327d116dccSCC Ma 	struct core_context *core;
2337d116dccSCC Ma 
2347d116dccSCC Ma 	core = get_core_data(mpidr);
2357d116dccSCC Ma 	mt_save_generic_timer(core->timer_data);
2367d116dccSCC Ma 
2377d116dccSCC Ma 	/* disable timer irq, and upper layer should enable it again. */
2387d116dccSCC Ma 	stop_generic_timer();
2397d116dccSCC Ma }
2407d116dccSCC Ma 
2417d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr)
2427d116dccSCC Ma {
2437d116dccSCC Ma 	struct core_context *core;
2447d116dccSCC Ma 
2457d116dccSCC Ma 	core = get_core_data(mpidr);
2467d116dccSCC Ma 	mt_restore_generic_timer(core->timer_data);
2477d116dccSCC Ma }
2487d116dccSCC Ma 
2497d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr)
2507d116dccSCC Ma {
2517d116dccSCC Ma 	/* mcusys_save_context: */
2527d116dccSCC Ma 	mt_cpu_save(mpidr);
2537d116dccSCC Ma }
2547d116dccSCC Ma 
2557d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr)
2567d116dccSCC Ma {
2577d116dccSCC Ma 	/* mcusys_restore_context: */
2587d116dccSCC Ma 	mt_cpu_restore(mpidr);
2597d116dccSCC Ma }
2607d116dccSCC Ma 
2613fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
2627d116dccSCC Ma /*******************************************************************************
2637d116dccSCC Ma * Private function which is used to determine if any platform actions
2647d116dccSCC Ma * should be performed for the specified affinity instance given its
2657d116dccSCC Ma * state. Nothing needs to be done if the 'state' is not off or if this is not
2667d116dccSCC Ma * the highest affinity level which will enter the 'state'.
2677d116dccSCC Ma *******************************************************************************/
2687d116dccSCC Ma static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
2697d116dccSCC Ma {
2707d116dccSCC Ma 	unsigned int max_phys_off_afflvl;
2717d116dccSCC Ma 
2727d116dccSCC Ma 	assert(afflvl <= MPIDR_AFFLVL2);
2737d116dccSCC Ma 
2747d116dccSCC Ma 	if (state != PSCI_STATE_OFF)
2757d116dccSCC Ma 		return -EAGAIN;
2767d116dccSCC Ma 
2777d116dccSCC Ma 	/*
2787d116dccSCC Ma 	 * Find the highest affinity level which will be suspended and postpone
2797d116dccSCC Ma 	 * all the platform specific actions until that level is hit.
2807d116dccSCC Ma 	 */
2817d116dccSCC Ma 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
2827d116dccSCC Ma 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
2837d116dccSCC Ma 	if (afflvl != max_phys_off_afflvl)
2847d116dccSCC Ma 		return -EAGAIN;
2857d116dccSCC Ma 
2867d116dccSCC Ma 	return 0;
2877d116dccSCC Ma }
2887d116dccSCC Ma 
2897d116dccSCC Ma /*******************************************************************************
2907d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to enter
2917d116dccSCC Ma  * standby.
2927d116dccSCC Ma  ******************************************************************************/
2937d116dccSCC Ma static void plat_affinst_standby(unsigned int power_state)
2947d116dccSCC Ma {
2957d116dccSCC Ma 	unsigned int target_afflvl;
2967d116dccSCC Ma 
2977d116dccSCC Ma 	/* Sanity check the requested state */
2987d116dccSCC Ma 	target_afflvl = psci_get_pstate_afflvl(power_state);
2997d116dccSCC Ma 
3007d116dccSCC Ma 	/*
3017d116dccSCC Ma 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
3027d116dccSCC Ma 	 * on the MTK_platform. Ignore any other affinity level.
3037d116dccSCC Ma 	 */
3047d116dccSCC Ma 	if (target_afflvl == MPIDR_AFFLVL0) {
3057d116dccSCC Ma 		/*
3067d116dccSCC Ma 		 * Enter standby state. dsb is good practice before using wfi
3077d116dccSCC Ma 		 * to enter low power states.
3087d116dccSCC Ma 		 */
3097d116dccSCC Ma 		dsb();
3107d116dccSCC Ma 		wfi();
3117d116dccSCC Ma 	}
3127d116dccSCC Ma }
3133fc26aa0SKoan-Sin Tan #else
3143fc26aa0SKoan-Sin Tan static void plat_cpu_standby(plat_local_state_t cpu_state)
3153fc26aa0SKoan-Sin Tan {
3163fc26aa0SKoan-Sin Tan 	unsigned int scr;
3173fc26aa0SKoan-Sin Tan 
3183fc26aa0SKoan-Sin Tan 	scr = read_scr_el3();
3193fc26aa0SKoan-Sin Tan 	write_scr_el3(scr | SCR_IRQ_BIT);
3203fc26aa0SKoan-Sin Tan 	isb();
3213fc26aa0SKoan-Sin Tan 	dsb();
3223fc26aa0SKoan-Sin Tan 	wfi();
3233fc26aa0SKoan-Sin Tan 	write_scr_el3(scr);
3243fc26aa0SKoan-Sin Tan }
3253fc26aa0SKoan-Sin Tan #endif
3267d116dccSCC Ma 
3277d116dccSCC Ma /*******************************************************************************
3287d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be turned
3297d116dccSCC Ma  * on. The level and mpidr determine the affinity instance.
3307d116dccSCC Ma  ******************************************************************************/
3313fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
3327d116dccSCC Ma static int plat_affinst_on(unsigned long mpidr,
3337d116dccSCC Ma 		    unsigned long sec_entrypoint,
3347d116dccSCC Ma 		    unsigned int afflvl,
3357d116dccSCC Ma 		    unsigned int state)
3367d116dccSCC Ma {
3377d116dccSCC Ma 	int rc = PSCI_E_SUCCESS;
3387d116dccSCC Ma 	unsigned long cpu_id;
3397d116dccSCC Ma 	unsigned long cluster_id;
3407d116dccSCC Ma 	uintptr_t rv;
3417d116dccSCC Ma 
3427d116dccSCC Ma 	/*
3437d116dccSCC Ma 	 * It's possible to turn on only affinity level 0 i.e. a cpu
3447d116dccSCC Ma 	 * on the MTK_platform. Ignore any other affinity level.
3457d116dccSCC Ma 	 */
3467d116dccSCC Ma 	if (afflvl != MPIDR_AFFLVL0)
3477d116dccSCC Ma 		return rc;
3487d116dccSCC Ma 
3497d116dccSCC Ma 	cpu_id = mpidr & MPIDR_CPU_MASK;
3507d116dccSCC Ma 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
3517d116dccSCC Ma 
3527d116dccSCC Ma 	if (cluster_id)
3537d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
3547d116dccSCC Ma 	else
3557d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
3567d116dccSCC Ma 
3577d116dccSCC Ma 	mmio_write_32(rv, sec_entrypoint);
3587d116dccSCC Ma 	INFO("mt_on[%ld:%ld], entry %x\n",
3597d116dccSCC Ma 		cluster_id, cpu_id, mmio_read_32(rv));
3607d116dccSCC Ma 
3617d116dccSCC Ma 	spm_hotplug_on(mpidr);
3627d116dccSCC Ma 
3637d116dccSCC Ma 	return rc;
3647d116dccSCC Ma }
3653fc26aa0SKoan-Sin Tan #else
3663fc26aa0SKoan-Sin Tan static uintptr_t secure_entrypoint;
3673fc26aa0SKoan-Sin Tan 
3683fc26aa0SKoan-Sin Tan static int plat_power_domain_on(unsigned long mpidr)
3693fc26aa0SKoan-Sin Tan {
3703fc26aa0SKoan-Sin Tan 	int rc = PSCI_E_SUCCESS;
3713fc26aa0SKoan-Sin Tan 	unsigned long cpu_id;
3723fc26aa0SKoan-Sin Tan 	unsigned long cluster_id;
3733fc26aa0SKoan-Sin Tan 	uintptr_t rv;
3743fc26aa0SKoan-Sin Tan 
3753fc26aa0SKoan-Sin Tan 	cpu_id = mpidr & MPIDR_CPU_MASK;
3763fc26aa0SKoan-Sin Tan 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
3773fc26aa0SKoan-Sin Tan 
3783fc26aa0SKoan-Sin Tan 	if (cluster_id)
3793fc26aa0SKoan-Sin Tan 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
3803fc26aa0SKoan-Sin Tan 	else
3813fc26aa0SKoan-Sin Tan 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
3823fc26aa0SKoan-Sin Tan 
3833fc26aa0SKoan-Sin Tan 	mmio_write_32(rv, secure_entrypoint);
3843fc26aa0SKoan-Sin Tan 	INFO("mt_on[%ld:%ld], entry %x\n",
3853fc26aa0SKoan-Sin Tan 		cluster_id, cpu_id, mmio_read_32(rv));
3863fc26aa0SKoan-Sin Tan 
3873fc26aa0SKoan-Sin Tan 	spm_hotplug_on(mpidr);
3883fc26aa0SKoan-Sin Tan 	return rc;
3893fc26aa0SKoan-Sin Tan }
3903fc26aa0SKoan-Sin Tan #endif
3917d116dccSCC Ma 
3927d116dccSCC Ma /*******************************************************************************
3937d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be turned
3947d116dccSCC Ma  * off. The level and mpidr determine the affinity instance. The 'state' arg.
3957d116dccSCC Ma  * allows the platform to decide whether the cluster is being turned off and
3967d116dccSCC Ma  * take apt actions.
3977d116dccSCC Ma  *
3987d116dccSCC Ma  * CAUTION: This function is called with coherent stacks so that caches can be
3997d116dccSCC Ma  * turned off, flushed and coherency disabled. There is no guarantee that caches
4007d116dccSCC Ma  * will remain turned on across calls to this function as each affinity level is
4017d116dccSCC Ma  * dealt with. So do not write & read global variables across calls. It will be
4027d116dccSCC Ma  * wise to do flush a write to the global to prevent unpredictable results.
4037d116dccSCC Ma  ******************************************************************************/
4043fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
4057d116dccSCC Ma static void plat_affinst_off(unsigned int afflvl, unsigned int state)
4067d116dccSCC Ma {
4077d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
4087d116dccSCC Ma 
4097d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
4107d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
4117d116dccSCC Ma 		return;
4127d116dccSCC Ma 
4137d116dccSCC Ma 	/* Prevent interrupts from spuriously waking up this cpu */
414*8bc20038SKoan-Sin Tan 	gicv2_cpuif_disable();
4157d116dccSCC Ma 
4167d116dccSCC Ma 	spm_hotplug_off(mpidr);
4177d116dccSCC Ma 
4187d116dccSCC Ma 	trace_power_flow(mpidr, CPU_DOWN);
4197d116dccSCC Ma 
4207d116dccSCC Ma 	if (afflvl != MPIDR_AFFLVL0) {
4217d116dccSCC Ma 		/* Disable coherency if this cluster is to be turned off */
4227d116dccSCC Ma 		plat_cci_disable();
4237d116dccSCC Ma 
4247d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_DOWN);
4257d116dccSCC Ma 	}
4267d116dccSCC Ma }
4273fc26aa0SKoan-Sin Tan #else
4283fc26aa0SKoan-Sin Tan static void plat_power_domain_off(const psci_power_state_t *state)
4293fc26aa0SKoan-Sin Tan {
4303fc26aa0SKoan-Sin Tan 	unsigned long mpidr = read_mpidr_el1();
4313fc26aa0SKoan-Sin Tan 
4323fc26aa0SKoan-Sin Tan 	/* Prevent interrupts from spuriously waking up this cpu */
433*8bc20038SKoan-Sin Tan 	gicv2_cpuif_disable();
4343fc26aa0SKoan-Sin Tan 
4353fc26aa0SKoan-Sin Tan 	spm_hotplug_off(mpidr);
4363fc26aa0SKoan-Sin Tan 
4373fc26aa0SKoan-Sin Tan 	trace_power_flow(mpidr, CPU_DOWN);
4383fc26aa0SKoan-Sin Tan 
4393fc26aa0SKoan-Sin Tan 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
4403fc26aa0SKoan-Sin Tan 		/* Disable coherency if this cluster is to be turned off */
4413fc26aa0SKoan-Sin Tan 		plat_cci_disable();
4423fc26aa0SKoan-Sin Tan 
4433fc26aa0SKoan-Sin Tan 		trace_power_flow(mpidr, CLUSTER_DOWN);
4443fc26aa0SKoan-Sin Tan 	}
4453fc26aa0SKoan-Sin Tan }
4463fc26aa0SKoan-Sin Tan #endif
4477d116dccSCC Ma 
4487d116dccSCC Ma /*******************************************************************************
4497d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be
4507d116dccSCC Ma  * suspended. The level and mpidr determine the affinity instance. The 'state'
4517d116dccSCC Ma  * arg. allows the platform to decide whether the cluster is being turned off
4527d116dccSCC Ma  * and take apt actions.
4537d116dccSCC Ma  *
4547d116dccSCC Ma  * CAUTION: This function is called with coherent stacks so that caches can be
4557d116dccSCC Ma  * turned off, flushed and coherency disabled. There is no guarantee that caches
4567d116dccSCC Ma  * will remain turned on across calls to this function as each affinity level is
4577d116dccSCC Ma  * dealt with. So do not write & read global variables across calls. It will be
4587d116dccSCC Ma  * wise to do flush a write to the global to prevent unpredictable results.
4597d116dccSCC Ma  ******************************************************************************/
4603fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
4617d116dccSCC Ma static void plat_affinst_suspend(unsigned long sec_entrypoint,
4627d116dccSCC Ma 			  unsigned int afflvl,
4637d116dccSCC Ma 			  unsigned int state)
4647d116dccSCC Ma {
4657d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
4667d116dccSCC Ma 	unsigned long cluster_id;
4677d116dccSCC Ma 	unsigned long cpu_id;
4687d116dccSCC Ma 	uintptr_t rv;
4697d116dccSCC Ma 
4707d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
4717d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
4727d116dccSCC Ma 		return;
4737d116dccSCC Ma 
4747d116dccSCC Ma 	cpu_id = mpidr & MPIDR_CPU_MASK;
4757d116dccSCC Ma 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
4767d116dccSCC Ma 
4777d116dccSCC Ma 	if (cluster_id)
4787d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
4797d116dccSCC Ma 	else
4807d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
4817d116dccSCC Ma 
4827d116dccSCC Ma 	mmio_write_32(rv, sec_entrypoint);
4837d116dccSCC Ma 
4848e53ec53SJimmy Huang 	if (afflvl < MPIDR_AFFLVL2)
4858e53ec53SJimmy Huang 		spm_mcdi_prepare_for_off_state(mpidr, afflvl);
4867d116dccSCC Ma 
4877d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL0)
4887d116dccSCC Ma 		mt_platform_save_context(mpidr);
4897d116dccSCC Ma 
4907d116dccSCC Ma 	/* Perform the common cluster specific operations */
4917d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
4927d116dccSCC Ma 		/* Disable coherency if this cluster is to be turned off */
4937d116dccSCC Ma 		plat_cci_disable();
4947d116dccSCC Ma 	}
4957d116dccSCC Ma 
4967d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL2) {
4978e53ec53SJimmy Huang 		disable_scu(mpidr);
4987d116dccSCC Ma 		generic_timer_backup();
4997d116dccSCC Ma 		spm_system_suspend();
5007d116dccSCC Ma 		/* Prevent interrupts from spuriously waking up this cpu */
501*8bc20038SKoan-Sin Tan 		gicv2_cpuif_disable();
5027d116dccSCC Ma 	}
5037d116dccSCC Ma }
5043fc26aa0SKoan-Sin Tan #else
5053fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend(const psci_power_state_t *state)
5063fc26aa0SKoan-Sin Tan {
5073fc26aa0SKoan-Sin Tan 	unsigned long mpidr = read_mpidr_el1();
5083fc26aa0SKoan-Sin Tan 	unsigned long cluster_id;
5093fc26aa0SKoan-Sin Tan 	unsigned long cpu_id;
5103fc26aa0SKoan-Sin Tan 	uintptr_t rv;
5113fc26aa0SKoan-Sin Tan 
5123fc26aa0SKoan-Sin Tan 	cpu_id = mpidr & MPIDR_CPU_MASK;
5133fc26aa0SKoan-Sin Tan 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
5143fc26aa0SKoan-Sin Tan 
5153fc26aa0SKoan-Sin Tan 	if (cluster_id)
5163fc26aa0SKoan-Sin Tan 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
5173fc26aa0SKoan-Sin Tan 	else
5183fc26aa0SKoan-Sin Tan 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
5193fc26aa0SKoan-Sin Tan 
5203fc26aa0SKoan-Sin Tan 	mmio_write_32(rv, secure_entrypoint);
5213fc26aa0SKoan-Sin Tan 
5223fc26aa0SKoan-Sin Tan 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
5233fc26aa0SKoan-Sin Tan 		spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
5243fc26aa0SKoan-Sin Tan 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
5253fc26aa0SKoan-Sin Tan 			spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
5263fc26aa0SKoan-Sin Tan 	}
5273fc26aa0SKoan-Sin Tan 
5283fc26aa0SKoan-Sin Tan 	mt_platform_save_context(mpidr);
5293fc26aa0SKoan-Sin Tan 
5303fc26aa0SKoan-Sin Tan 	/* Perform the common cluster specific operations */
5313fc26aa0SKoan-Sin Tan 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
5323fc26aa0SKoan-Sin Tan 		/* Disable coherency if this cluster is to be turned off */
5333fc26aa0SKoan-Sin Tan 		plat_cci_disable();
5343fc26aa0SKoan-Sin Tan 	}
5353fc26aa0SKoan-Sin Tan 
5363fc26aa0SKoan-Sin Tan 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
5373fc26aa0SKoan-Sin Tan 		disable_scu(mpidr);
5383fc26aa0SKoan-Sin Tan 		generic_timer_backup();
5393fc26aa0SKoan-Sin Tan 		spm_system_suspend();
5403fc26aa0SKoan-Sin Tan 		/* Prevent interrupts from spuriously waking up this cpu */
541*8bc20038SKoan-Sin Tan 		gicv2_cpuif_disable();
5423fc26aa0SKoan-Sin Tan 	}
5433fc26aa0SKoan-Sin Tan }
5443fc26aa0SKoan-Sin Tan #endif
5457d116dccSCC Ma 
5467d116dccSCC Ma /*******************************************************************************
5477d116dccSCC Ma  * MTK_platform handler called when an affinity instance has just been powered
5487d116dccSCC Ma  * on after being turned off earlier. The level and mpidr determine the affinity
5497d116dccSCC Ma  * instance. The 'state' arg. allows the platform to decide whether the cluster
5507d116dccSCC Ma  * was turned off prior to wakeup and do what's necessary to setup it up
5517d116dccSCC Ma  * correctly.
5527d116dccSCC Ma  ******************************************************************************/
5533fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
5547d116dccSCC Ma static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
5557d116dccSCC Ma {
5567d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
5577d116dccSCC Ma 
5587d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
5597d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
5607d116dccSCC Ma 		return;
5617d116dccSCC Ma 
5627d116dccSCC Ma 	/* Perform the common cluster specific operations */
5637d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
5647d116dccSCC Ma 		/* Enable coherency if this cluster was off */
5657d116dccSCC Ma 		plat_cci_enable();
5667d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_UP);
5677d116dccSCC Ma 	}
5687d116dccSCC Ma 
5697d116dccSCC Ma 	/* Enable the gic cpu interface */
570*8bc20038SKoan-Sin Tan 	gicv2_cpuif_enable();
571*8bc20038SKoan-Sin Tan 	gicv2_pcpu_distif_init();
5727d116dccSCC Ma 	trace_power_flow(mpidr, CPU_UP);
5737d116dccSCC Ma }
5743fc26aa0SKoan-Sin Tan #else
5753fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void);
5763fc26aa0SKoan-Sin Tan 
5773fc26aa0SKoan-Sin Tan static void plat_power_domain_on_finish(const psci_power_state_t *state)
5783fc26aa0SKoan-Sin Tan {
5793fc26aa0SKoan-Sin Tan 	unsigned long mpidr = read_mpidr_el1();
5803fc26aa0SKoan-Sin Tan 
5813fc26aa0SKoan-Sin Tan 	assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
5823fc26aa0SKoan-Sin Tan 
5833fc26aa0SKoan-Sin Tan 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
5843fc26aa0SKoan-Sin Tan 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
5853fc26aa0SKoan-Sin Tan 		mtk_system_pwr_domain_resume();
5863fc26aa0SKoan-Sin Tan 
5873fc26aa0SKoan-Sin Tan 	if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
5883fc26aa0SKoan-Sin Tan 		plat_cci_enable();
5893fc26aa0SKoan-Sin Tan 		trace_power_flow(mpidr, CLUSTER_UP);
5903fc26aa0SKoan-Sin Tan 	}
5913fc26aa0SKoan-Sin Tan 
5923fc26aa0SKoan-Sin Tan 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
5933fc26aa0SKoan-Sin Tan 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
5943fc26aa0SKoan-Sin Tan 		return;
5953fc26aa0SKoan-Sin Tan 
5963fc26aa0SKoan-Sin Tan 	/* Enable the gic cpu interface */
597*8bc20038SKoan-Sin Tan 	gicv2_cpuif_enable();
598*8bc20038SKoan-Sin Tan 	gicv2_pcpu_distif_init();
5993fc26aa0SKoan-Sin Tan 	trace_power_flow(mpidr, CPU_UP);
6003fc26aa0SKoan-Sin Tan }
6013fc26aa0SKoan-Sin Tan #endif
6027d116dccSCC Ma 
6037d116dccSCC Ma /*******************************************************************************
6047d116dccSCC Ma  * MTK_platform handler called when an affinity instance has just been powered
6057d116dccSCC Ma  * on after having been suspended earlier. The level and mpidr determine the
6067d116dccSCC Ma  * affinity instance.
6077d116dccSCC Ma  ******************************************************************************/
6083fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
6097d116dccSCC Ma static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
6107d116dccSCC Ma {
6117d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
6127d116dccSCC Ma 
6137d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
6147d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
6157d116dccSCC Ma 		return;
6167d116dccSCC Ma 
6177d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL2) {
6187d116dccSCC Ma 		/* Enable the gic cpu interface */
619*8bc20038SKoan-Sin Tan 		plat_arm_gic_init();
6207d116dccSCC Ma 		spm_system_suspend_finish();
6218e53ec53SJimmy Huang 		enable_scu(mpidr);
6227d116dccSCC Ma 	}
6237d116dccSCC Ma 
6247d116dccSCC Ma 	/* Perform the common cluster specific operations */
6257d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
6267d116dccSCC Ma 		/* Enable coherency if this cluster was off */
6277d116dccSCC Ma 		plat_cci_enable();
6287d116dccSCC Ma 	}
6297d116dccSCC Ma 
6307d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL0)
6317d116dccSCC Ma 		mt_platform_restore_context(mpidr);
6327d116dccSCC Ma 
6338e53ec53SJimmy Huang 	if (afflvl < MPIDR_AFFLVL2)
6348e53ec53SJimmy Huang 		spm_mcdi_finish_for_on_state(mpidr, afflvl);
6357d116dccSCC Ma 
636*8bc20038SKoan-Sin Tan 	gicv2_pcpu_distif_init();
6377d116dccSCC Ma }
6383fc26aa0SKoan-Sin Tan #else
6393fc26aa0SKoan-Sin Tan static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
6403fc26aa0SKoan-Sin Tan {
6413fc26aa0SKoan-Sin Tan 	unsigned long mpidr = read_mpidr_el1();
6427d116dccSCC Ma 
6433fc26aa0SKoan-Sin Tan 	if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
6443fc26aa0SKoan-Sin Tan 		return;
6453fc26aa0SKoan-Sin Tan 
6463fc26aa0SKoan-Sin Tan 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
6473fc26aa0SKoan-Sin Tan 		/* Enable the gic cpu interface */
648*8bc20038SKoan-Sin Tan 		plat_arm_gic_init();
6493fc26aa0SKoan-Sin Tan 		spm_system_suspend_finish();
6503fc26aa0SKoan-Sin Tan 		enable_scu(mpidr);
6513fc26aa0SKoan-Sin Tan 	}
6523fc26aa0SKoan-Sin Tan 
6533fc26aa0SKoan-Sin Tan 	/* Perform the common cluster specific operations */
6543fc26aa0SKoan-Sin Tan 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
6553fc26aa0SKoan-Sin Tan 		/* Enable coherency if this cluster was off */
6563fc26aa0SKoan-Sin Tan 		plat_cci_enable();
6573fc26aa0SKoan-Sin Tan 	}
6583fc26aa0SKoan-Sin Tan 
6593fc26aa0SKoan-Sin Tan 	mt_platform_restore_context(mpidr);
6603fc26aa0SKoan-Sin Tan 
6613fc26aa0SKoan-Sin Tan 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
6623fc26aa0SKoan-Sin Tan 		spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
6633fc26aa0SKoan-Sin Tan 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
6643fc26aa0SKoan-Sin Tan 			spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
6653fc26aa0SKoan-Sin Tan 	}
6663fc26aa0SKoan-Sin Tan 
667*8bc20038SKoan-Sin Tan 	gicv2_pcpu_distif_init();
6683fc26aa0SKoan-Sin Tan }
6693fc26aa0SKoan-Sin Tan #endif
6703fc26aa0SKoan-Sin Tan 
6713fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
6727d116dccSCC Ma static unsigned int plat_get_sys_suspend_power_state(void)
6737d116dccSCC Ma {
6747d116dccSCC Ma 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
6757d116dccSCC Ma 	return psci_make_powerstate(0, 1, 2);
6767d116dccSCC Ma }
6773fc26aa0SKoan-Sin Tan #else
6783fc26aa0SKoan-Sin Tan static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
6793fc26aa0SKoan-Sin Tan {
6803fc26aa0SKoan-Sin Tan 	assert(PLAT_MAX_PWR_LVL >= 2);
6813fc26aa0SKoan-Sin Tan 
6823fc26aa0SKoan-Sin Tan 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
6833fc26aa0SKoan-Sin Tan 		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
6843fc26aa0SKoan-Sin Tan }
6853fc26aa0SKoan-Sin Tan #endif
6867d116dccSCC Ma 
6877d116dccSCC Ma /*******************************************************************************
6887d116dccSCC Ma  * MTK handlers to shutdown/reboot the system
6897d116dccSCC Ma  ******************************************************************************/
6907d116dccSCC Ma static void __dead2 plat_system_off(void)
6917d116dccSCC Ma {
6927d116dccSCC Ma 	INFO("MTK System Off\n");
6937d116dccSCC Ma 
6947d116dccSCC Ma 	rtc_bbpu_power_down();
6957d116dccSCC Ma 
6967d116dccSCC Ma 	wfi();
6977d116dccSCC Ma 	ERROR("MTK System Off: operation not handled.\n");
6987d116dccSCC Ma 	panic();
6997d116dccSCC Ma }
7007d116dccSCC Ma 
7017d116dccSCC Ma static void __dead2 plat_system_reset(void)
7027d116dccSCC Ma {
7037d116dccSCC Ma 	/* Write the System Configuration Control Register */
7047d116dccSCC Ma 	INFO("MTK System Reset\n");
7057d116dccSCC Ma 
7062bab3d52SJimmy Huang 	mmio_clrsetbits_32(MTK_WDT_BASE,
7072bab3d52SJimmy Huang 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
7082bab3d52SJimmy Huang 		MTK_WDT_MODE_KEY);
7097d116dccSCC Ma 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
7107d116dccSCC Ma 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
7117d116dccSCC Ma 
7127d116dccSCC Ma 	wfi();
7137d116dccSCC Ma 	ERROR("MTK System Reset: operation not handled.\n");
7147d116dccSCC Ma 	panic();
7157d116dccSCC Ma }
7167d116dccSCC Ma 
7173fc26aa0SKoan-Sin Tan #if !ENABLE_PLAT_COMPAT
7189cfd83e9SKoan-Sin Tan #if !PSCI_EXTENDED_STATE_ID
7193fc26aa0SKoan-Sin Tan static int plat_validate_power_state(unsigned int power_state,
7203fc26aa0SKoan-Sin Tan 					psci_power_state_t *req_state)
7213fc26aa0SKoan-Sin Tan {
7223fc26aa0SKoan-Sin Tan 	int pstate = psci_get_pstate_type(power_state);
7233fc26aa0SKoan-Sin Tan 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
7243fc26aa0SKoan-Sin Tan 	int i;
7253fc26aa0SKoan-Sin Tan 
7263fc26aa0SKoan-Sin Tan 	assert(req_state);
7273fc26aa0SKoan-Sin Tan 
7283fc26aa0SKoan-Sin Tan 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
7293fc26aa0SKoan-Sin Tan 		return PSCI_E_INVALID_PARAMS;
7303fc26aa0SKoan-Sin Tan 
7313fc26aa0SKoan-Sin Tan 	/* Sanity check the requested state */
7323fc26aa0SKoan-Sin Tan 	if (pstate == PSTATE_TYPE_STANDBY) {
7333fc26aa0SKoan-Sin Tan 		/*
7343fc26aa0SKoan-Sin Tan 		 * It's possible to enter standby only on power level 0
7353fc26aa0SKoan-Sin Tan 		 * Ignore any other power level.
7363fc26aa0SKoan-Sin Tan 		 */
7373fc26aa0SKoan-Sin Tan 		if (pwr_lvl != 0)
7383fc26aa0SKoan-Sin Tan 			return PSCI_E_INVALID_PARAMS;
7393fc26aa0SKoan-Sin Tan 
7403fc26aa0SKoan-Sin Tan 		req_state->pwr_domain_state[MTK_PWR_LVL0] =
7413fc26aa0SKoan-Sin Tan 					MTK_LOCAL_STATE_RET;
7423fc26aa0SKoan-Sin Tan 	} else {
7433fc26aa0SKoan-Sin Tan 		for (i = 0; i <= pwr_lvl; i++)
7443fc26aa0SKoan-Sin Tan 			req_state->pwr_domain_state[i] =
7453fc26aa0SKoan-Sin Tan 					MTK_LOCAL_STATE_OFF;
7463fc26aa0SKoan-Sin Tan 	}
7473fc26aa0SKoan-Sin Tan 
7483fc26aa0SKoan-Sin Tan 	/*
7493fc26aa0SKoan-Sin Tan 	 * We expect the 'state id' to be zero.
7503fc26aa0SKoan-Sin Tan 	 */
7513fc26aa0SKoan-Sin Tan 	if (psci_get_pstate_id(power_state))
7523fc26aa0SKoan-Sin Tan 		return PSCI_E_INVALID_PARAMS;
7533fc26aa0SKoan-Sin Tan 
7543fc26aa0SKoan-Sin Tan 	return PSCI_E_SUCCESS;
7553fc26aa0SKoan-Sin Tan }
7569cfd83e9SKoan-Sin Tan #else
7579cfd83e9SKoan-Sin Tan int plat_validate_power_state(unsigned int power_state,
7589cfd83e9SKoan-Sin Tan 				psci_power_state_t *req_state)
7599cfd83e9SKoan-Sin Tan {
7609cfd83e9SKoan-Sin Tan 	unsigned int state_id;
7619cfd83e9SKoan-Sin Tan 	int i;
7629cfd83e9SKoan-Sin Tan 
7639cfd83e9SKoan-Sin Tan 	assert(req_state);
7649cfd83e9SKoan-Sin Tan 
7659cfd83e9SKoan-Sin Tan 	/*
7669cfd83e9SKoan-Sin Tan 	 *  Currently we are using a linear search for finding the matching
7679cfd83e9SKoan-Sin Tan 	 *  entry in the idle power state array. This can be made a binary
7689cfd83e9SKoan-Sin Tan 	 *  search if the number of entries justify the additional complexity.
7699cfd83e9SKoan-Sin Tan 	 */
7709cfd83e9SKoan-Sin Tan 	for (i = 0; !!mtk_pm_idle_states[i]; i++) {
7719cfd83e9SKoan-Sin Tan 		if (power_state == mtk_pm_idle_states[i])
7729cfd83e9SKoan-Sin Tan 			break;
7739cfd83e9SKoan-Sin Tan 	}
7749cfd83e9SKoan-Sin Tan 
7759cfd83e9SKoan-Sin Tan 	/* Return error if entry not found in the idle state array */
7769cfd83e9SKoan-Sin Tan 	if (!mtk_pm_idle_states[i])
7779cfd83e9SKoan-Sin Tan 		return PSCI_E_INVALID_PARAMS;
7789cfd83e9SKoan-Sin Tan 
7799cfd83e9SKoan-Sin Tan 	i = 0;
7809cfd83e9SKoan-Sin Tan 	state_id = psci_get_pstate_id(power_state);
7819cfd83e9SKoan-Sin Tan 
7829cfd83e9SKoan-Sin Tan 	/* Parse the State ID and populate the state info parameter */
7839cfd83e9SKoan-Sin Tan 	while (state_id) {
7849cfd83e9SKoan-Sin Tan 		req_state->pwr_domain_state[i++] = state_id &
7859cfd83e9SKoan-Sin Tan 						MTK_LOCAL_PSTATE_MASK;
7869cfd83e9SKoan-Sin Tan 		state_id >>= MTK_LOCAL_PSTATE_WIDTH;
7879cfd83e9SKoan-Sin Tan 	}
7889cfd83e9SKoan-Sin Tan 
7899cfd83e9SKoan-Sin Tan 	return PSCI_E_SUCCESS;
7909cfd83e9SKoan-Sin Tan }
7919cfd83e9SKoan-Sin Tan #endif
7923fc26aa0SKoan-Sin Tan 
7933fc26aa0SKoan-Sin Tan void mtk_system_pwr_domain_resume(void)
7943fc26aa0SKoan-Sin Tan {
7953fc26aa0SKoan-Sin Tan 	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
7963fc26aa0SKoan-Sin Tan 
7973fc26aa0SKoan-Sin Tan 	/* Assert system power domain is available on the platform */
7983fc26aa0SKoan-Sin Tan 	assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
7993fc26aa0SKoan-Sin Tan 
800*8bc20038SKoan-Sin Tan 	plat_arm_gic_init();
8013fc26aa0SKoan-Sin Tan }
8023fc26aa0SKoan-Sin Tan #endif
8033fc26aa0SKoan-Sin Tan 
8043fc26aa0SKoan-Sin Tan #if ENABLE_PLAT_COMPAT
8057d116dccSCC Ma /*******************************************************************************
8067d116dccSCC Ma  * Export the platform handlers to enable psci to invoke them
8077d116dccSCC Ma  ******************************************************************************/
8087d116dccSCC Ma static const plat_pm_ops_t plat_plat_pm_ops = {
8097d116dccSCC Ma 	.affinst_standby		= plat_affinst_standby,
8107d116dccSCC Ma 	.affinst_on			= plat_affinst_on,
8117d116dccSCC Ma 	.affinst_off			= plat_affinst_off,
8127d116dccSCC Ma 	.affinst_suspend		= plat_affinst_suspend,
8137d116dccSCC Ma 	.affinst_on_finish		= plat_affinst_on_finish,
8147d116dccSCC Ma 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
8157d116dccSCC Ma 	.system_off			= plat_system_off,
8167d116dccSCC Ma 	.system_reset			= plat_system_reset,
8177d116dccSCC Ma 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
8187d116dccSCC Ma };
8197d116dccSCC Ma 
8207d116dccSCC Ma /*******************************************************************************
8217d116dccSCC Ma  * Export the platform specific power ops & initialize the mtk_platform power
8227d116dccSCC Ma  * controller
8237d116dccSCC Ma  ******************************************************************************/
8247d116dccSCC Ma int platform_setup_pm(const plat_pm_ops_t **plat_ops)
8257d116dccSCC Ma {
8267d116dccSCC Ma 	*plat_ops = &plat_plat_pm_ops;
8277d116dccSCC Ma 	return 0;
8287d116dccSCC Ma }
8293fc26aa0SKoan-Sin Tan #else
8303fc26aa0SKoan-Sin Tan static const plat_psci_ops_t plat_plat_pm_ops = {
8313fc26aa0SKoan-Sin Tan 	.cpu_standby			= plat_cpu_standby,
8323fc26aa0SKoan-Sin Tan 	.pwr_domain_on			= plat_power_domain_on,
8333fc26aa0SKoan-Sin Tan 	.pwr_domain_on_finish		= plat_power_domain_on_finish,
8343fc26aa0SKoan-Sin Tan 	.pwr_domain_off			= plat_power_domain_off,
8353fc26aa0SKoan-Sin Tan 	.pwr_domain_suspend		= plat_power_domain_suspend,
8363fc26aa0SKoan-Sin Tan 	.pwr_domain_suspend_finish	= plat_power_domain_suspend_finish,
8373fc26aa0SKoan-Sin Tan 	.system_off			= plat_system_off,
8383fc26aa0SKoan-Sin Tan 	.system_reset			= plat_system_reset,
8393fc26aa0SKoan-Sin Tan 	.validate_power_state		= plat_validate_power_state,
8403fc26aa0SKoan-Sin Tan 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
8413fc26aa0SKoan-Sin Tan };
8423fc26aa0SKoan-Sin Tan 
8433fc26aa0SKoan-Sin Tan int plat_setup_psci_ops(uintptr_t sec_entrypoint,
8443fc26aa0SKoan-Sin Tan 			const plat_psci_ops_t **psci_ops)
8453fc26aa0SKoan-Sin Tan {
8463fc26aa0SKoan-Sin Tan 	*psci_ops = &plat_plat_pm_ops;
8473fc26aa0SKoan-Sin Tan 	secure_entrypoint = sec_entrypoint;
8483fc26aa0SKoan-Sin Tan 	return 0;
8493fc26aa0SKoan-Sin Tan }
8503fc26aa0SKoan-Sin Tan 
8513fc26aa0SKoan-Sin Tan /*
8523fc26aa0SKoan-Sin Tan  * The PSCI generic code uses this API to let the platform participate in state
8533fc26aa0SKoan-Sin Tan  * coordination during a power management operation. It compares the platform
8543fc26aa0SKoan-Sin Tan  * specific local power states requested by each cpu for a given power domain
8553fc26aa0SKoan-Sin Tan  * and returns the coordinated target power state that the domain should
8563fc26aa0SKoan-Sin Tan  * enter. A platform assigns a number to a local power state. This default
8573fc26aa0SKoan-Sin Tan  * implementation assumes that the platform assigns these numbers in order of
8583fc26aa0SKoan-Sin Tan  * increasing depth of the power state i.e. for two power states X & Y, if X < Y
8593fc26aa0SKoan-Sin Tan  * then X represents a shallower power state than Y. As a result, the
8603fc26aa0SKoan-Sin Tan  * coordinated target local power state for a power domain will be the minimum
8613fc26aa0SKoan-Sin Tan  * of the requested local power states.
8623fc26aa0SKoan-Sin Tan  */
8633fc26aa0SKoan-Sin Tan plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
8643fc26aa0SKoan-Sin Tan 					     const plat_local_state_t *states,
8653fc26aa0SKoan-Sin Tan 					     unsigned int ncpu)
8663fc26aa0SKoan-Sin Tan {
8673fc26aa0SKoan-Sin Tan 	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
8683fc26aa0SKoan-Sin Tan 
8693fc26aa0SKoan-Sin Tan 	assert(ncpu);
8703fc26aa0SKoan-Sin Tan 
8713fc26aa0SKoan-Sin Tan 	do {
8723fc26aa0SKoan-Sin Tan 		temp = *states++;
8733fc26aa0SKoan-Sin Tan 		if (temp < target)
8743fc26aa0SKoan-Sin Tan 			target = temp;
8753fc26aa0SKoan-Sin Tan 	} while (--ncpu);
8763fc26aa0SKoan-Sin Tan 
8773fc26aa0SKoan-Sin Tan 	return target;
8783fc26aa0SKoan-Sin Tan }
8793fc26aa0SKoan-Sin Tan #endif
880