xref: /rk3399_ARM-atf/plat/mediatek/mt8173/plat_pm.c (revision 2bab3d527375eee8eb060e8b1ab2c10141e88c06)
17d116dccSCC Ma /*
27d116dccSCC Ma  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
37d116dccSCC Ma  *
47d116dccSCC Ma  * Redistribution and use in source and binary forms, with or without
57d116dccSCC Ma  * modification, are permitted provided that the following conditions are met:
67d116dccSCC Ma  *
77d116dccSCC Ma  * Redistributions of source code must retain the above copyright notice, this
87d116dccSCC Ma  * list of conditions and the following disclaimer.
97d116dccSCC Ma  *
107d116dccSCC Ma  * Redistributions in binary form must reproduce the above copyright notice,
117d116dccSCC Ma  * this list of conditions and the following disclaimer in the documentation
127d116dccSCC Ma  * and/or other materials provided with the distribution.
137d116dccSCC Ma  *
147d116dccSCC Ma  * Neither the name of ARM nor the names of its contributors may be used
157d116dccSCC Ma  * to endorse or promote products derived from this software without specific
167d116dccSCC Ma  * prior written permission.
177d116dccSCC Ma  *
187d116dccSCC Ma  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197d116dccSCC Ma  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207d116dccSCC Ma  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217d116dccSCC Ma  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227d116dccSCC Ma  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237d116dccSCC Ma  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247d116dccSCC Ma  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257d116dccSCC Ma  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267d116dccSCC Ma  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277d116dccSCC Ma  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287d116dccSCC Ma  * POSSIBILITY OF SUCH DAMAGE.
297d116dccSCC Ma  */
307d116dccSCC Ma 
317d116dccSCC Ma #include <arch_helpers.h>
327d116dccSCC Ma #include <arm_gic.h>
337d116dccSCC Ma #include <assert.h>
347d116dccSCC Ma #include <bakery_lock.h>
357d116dccSCC Ma #include <cci.h>
367d116dccSCC Ma #include <console.h>
377d116dccSCC Ma #include <debug.h>
387d116dccSCC Ma #include <errno.h>
397d116dccSCC Ma #include <gpio.h>
407d116dccSCC Ma #include <mcucfg.h>
417d116dccSCC Ma #include <mmio.h>
427d116dccSCC Ma #include <mt8173_def.h>
437d116dccSCC Ma #include <mt_cpuxgpt.h> /* generic_timer_backup() */
447d116dccSCC Ma #include <plat_private.h>
457d116dccSCC Ma #include <power_tracer.h>
467d116dccSCC Ma #include <psci.h>
477d116dccSCC Ma #include <rtc.h>
487d116dccSCC Ma #include <scu.h>
497d116dccSCC Ma #include <spm_hotplug.h>
507d116dccSCC Ma #include <spm_mcdi.h>
517d116dccSCC Ma #include <spm_suspend.h>
527d116dccSCC Ma 
537d116dccSCC Ma struct core_context {
547d116dccSCC Ma 	unsigned long timer_data[8];
557d116dccSCC Ma 	unsigned int count;
567d116dccSCC Ma 	unsigned int rst;
577d116dccSCC Ma 	unsigned int abt;
587d116dccSCC Ma 	unsigned int brk;
597d116dccSCC Ma };
607d116dccSCC Ma 
617d116dccSCC Ma struct cluster_context {
627d116dccSCC Ma 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
637d116dccSCC Ma };
647d116dccSCC Ma 
657d116dccSCC Ma /*
667d116dccSCC Ma  * Top level structure to hold the complete context of a multi cluster system
677d116dccSCC Ma  */
687d116dccSCC Ma struct system_context {
697d116dccSCC Ma 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
707d116dccSCC Ma };
717d116dccSCC Ma 
727d116dccSCC Ma /*
737d116dccSCC Ma  * Top level structure which encapsulates the context of the entire system
747d116dccSCC Ma  */
757d116dccSCC Ma static struct system_context dormant_data[1];
767d116dccSCC Ma 
777d116dccSCC Ma static inline struct cluster_context *system_cluster(
787d116dccSCC Ma 						struct system_context *system,
797d116dccSCC Ma 						uint32_t clusterid)
807d116dccSCC Ma {
817d116dccSCC Ma 	return &system->cluster[clusterid];
827d116dccSCC Ma }
837d116dccSCC Ma 
847d116dccSCC Ma static inline struct core_context *cluster_core(struct cluster_context *cluster,
857d116dccSCC Ma 						uint32_t cpuid)
867d116dccSCC Ma {
877d116dccSCC Ma 	return &cluster->core[cpuid];
887d116dccSCC Ma }
897d116dccSCC Ma 
907d116dccSCC Ma static struct cluster_context *get_cluster_data(unsigned long mpidr)
917d116dccSCC Ma {
927d116dccSCC Ma 	uint32_t clusterid;
937d116dccSCC Ma 
947d116dccSCC Ma 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
957d116dccSCC Ma 
967d116dccSCC Ma 	return system_cluster(dormant_data, clusterid);
977d116dccSCC Ma }
987d116dccSCC Ma 
997d116dccSCC Ma static struct core_context *get_core_data(unsigned long mpidr)
1007d116dccSCC Ma {
1017d116dccSCC Ma 	struct cluster_context *cluster;
1027d116dccSCC Ma 	uint32_t cpuid;
1037d116dccSCC Ma 
1047d116dccSCC Ma 	cluster = get_cluster_data(mpidr);
1057d116dccSCC Ma 	cpuid = mpidr & MPIDR_CPU_MASK;
1067d116dccSCC Ma 
1077d116dccSCC Ma 	return cluster_core(cluster, cpuid);
1087d116dccSCC Ma }
1097d116dccSCC Ma 
1107d116dccSCC Ma static void mt_save_generic_timer(unsigned long *container)
1117d116dccSCC Ma {
1127d116dccSCC Ma 	uint64_t ctl;
1137d116dccSCC Ma 	uint64_t val;
1147d116dccSCC Ma 
1157d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
1167d116dccSCC Ma 			 "mrs	%x1, cntp_cval_el0\n\t"
1177d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #0]"
1187d116dccSCC Ma 			 : "=&r" (ctl), "=&r" (val)
1197d116dccSCC Ma 			 : "r" (container)
1207d116dccSCC Ma 			 : "memory");
1217d116dccSCC Ma 
1227d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
1237d116dccSCC Ma 			 "mrs	%x1, cntp_ctl_el0\n\t"
1247d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #16]"
1257d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1267d116dccSCC Ma 			 : "r" (container)
1277d116dccSCC Ma 			 : "memory");
1287d116dccSCC Ma 
1297d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
1307d116dccSCC Ma 			 "mrs	%x1, cntv_ctl_el0\n\t"
1317d116dccSCC Ma 			 "stp	%x0, %x1, [%2, #32]"
1327d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1337d116dccSCC Ma 			 : "r" (container)
1347d116dccSCC Ma 			 : "memory");
1357d116dccSCC Ma }
1367d116dccSCC Ma 
1377d116dccSCC Ma static void mt_restore_generic_timer(unsigned long *container)
1387d116dccSCC Ma {
1397d116dccSCC Ma 	uint64_t ctl;
1407d116dccSCC Ma 	uint64_t val;
1417d116dccSCC Ma 
1427d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
1437d116dccSCC Ma 			 "msr	cntkctl_el1, %x0\n\t"
1447d116dccSCC Ma 			 "msr	cntp_cval_el0, %x1"
1457d116dccSCC Ma 			 : "=&r" (ctl), "=&r" (val)
1467d116dccSCC Ma 			 : "r" (container)
1477d116dccSCC Ma 			 : "memory");
1487d116dccSCC Ma 
1497d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
1507d116dccSCC Ma 			 "msr	cntp_tval_el0, %x0\n\t"
1517d116dccSCC Ma 			 "msr	cntp_ctl_el0, %x1"
1527d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1537d116dccSCC Ma 			 : "r" (container)
1547d116dccSCC Ma 			 : "memory");
1557d116dccSCC Ma 
1567d116dccSCC Ma 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
1577d116dccSCC Ma 			 "msr	cntv_tval_el0, %x0\n\t"
1587d116dccSCC Ma 			 "msr	cntv_ctl_el0, %x1"
1597d116dccSCC Ma 			 : "=&r" (val), "=&r" (ctl)
1607d116dccSCC Ma 			 : "r" (container)
1617d116dccSCC Ma 			 : "memory");
1627d116dccSCC Ma }
1637d116dccSCC Ma 
1647d116dccSCC Ma static inline uint64_t read_cntpctl(void)
1657d116dccSCC Ma {
1667d116dccSCC Ma 	uint64_t cntpctl;
1677d116dccSCC Ma 
1687d116dccSCC Ma 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
1697d116dccSCC Ma 			 : "=r" (cntpctl) : : "memory");
1707d116dccSCC Ma 
1717d116dccSCC Ma 	return cntpctl;
1727d116dccSCC Ma }
1737d116dccSCC Ma 
1747d116dccSCC Ma static inline void write_cntpctl(uint64_t cntpctl)
1757d116dccSCC Ma {
1767d116dccSCC Ma 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
1777d116dccSCC Ma }
1787d116dccSCC Ma 
1797d116dccSCC Ma static void stop_generic_timer(void)
1807d116dccSCC Ma {
1817d116dccSCC Ma 	/*
1827d116dccSCC Ma 	 * Disable the timer and mask the irq to prevent
1837d116dccSCC Ma 	 * suprious interrupts on this cpu interface. It
1847d116dccSCC Ma 	 * will bite us when we come back if we don't. It
1857d116dccSCC Ma 	 * will be replayed on the inbound cluster.
1867d116dccSCC Ma 	 */
1877d116dccSCC Ma 	uint64_t cntpctl = read_cntpctl();
1887d116dccSCC Ma 
1897d116dccSCC Ma 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
1907d116dccSCC Ma }
1917d116dccSCC Ma 
1927d116dccSCC Ma static void mt_cpu_save(unsigned long mpidr)
1937d116dccSCC Ma {
1947d116dccSCC Ma 	struct core_context *core;
1957d116dccSCC Ma 
1967d116dccSCC Ma 	core = get_core_data(mpidr);
1977d116dccSCC Ma 	mt_save_generic_timer(core->timer_data);
1987d116dccSCC Ma 
1997d116dccSCC Ma 	/* disable timer irq, and upper layer should enable it again. */
2007d116dccSCC Ma 	stop_generic_timer();
2017d116dccSCC Ma }
2027d116dccSCC Ma 
2037d116dccSCC Ma static void mt_cpu_restore(unsigned long mpidr)
2047d116dccSCC Ma {
2057d116dccSCC Ma 	struct core_context *core;
2067d116dccSCC Ma 
2077d116dccSCC Ma 	core = get_core_data(mpidr);
2087d116dccSCC Ma 	mt_restore_generic_timer(core->timer_data);
2097d116dccSCC Ma }
2107d116dccSCC Ma 
2117d116dccSCC Ma static void mt_platform_save_context(unsigned long mpidr)
2127d116dccSCC Ma {
2137d116dccSCC Ma 	/* mcusys_save_context: */
2147d116dccSCC Ma 	mt_cpu_save(mpidr);
2157d116dccSCC Ma }
2167d116dccSCC Ma 
2177d116dccSCC Ma static void mt_platform_restore_context(unsigned long mpidr)
2187d116dccSCC Ma {
2197d116dccSCC Ma 	/* mcusys_restore_context: */
2207d116dccSCC Ma 	mt_cpu_restore(mpidr);
2217d116dccSCC Ma }
2227d116dccSCC Ma 
2237d116dccSCC Ma /*******************************************************************************
2247d116dccSCC Ma * Private function which is used to determine if any platform actions
2257d116dccSCC Ma * should be performed for the specified affinity instance given its
2267d116dccSCC Ma * state. Nothing needs to be done if the 'state' is not off or if this is not
2277d116dccSCC Ma * the highest affinity level which will enter the 'state'.
2287d116dccSCC Ma *******************************************************************************/
2297d116dccSCC Ma static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
2307d116dccSCC Ma {
2317d116dccSCC Ma 	unsigned int max_phys_off_afflvl;
2327d116dccSCC Ma 
2337d116dccSCC Ma 	assert(afflvl <= MPIDR_AFFLVL2);
2347d116dccSCC Ma 
2357d116dccSCC Ma 	if (state != PSCI_STATE_OFF)
2367d116dccSCC Ma 		return -EAGAIN;
2377d116dccSCC Ma 
2387d116dccSCC Ma 	/*
2397d116dccSCC Ma 	 * Find the highest affinity level which will be suspended and postpone
2407d116dccSCC Ma 	 * all the platform specific actions until that level is hit.
2417d116dccSCC Ma 	 */
2427d116dccSCC Ma 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
2437d116dccSCC Ma 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
2447d116dccSCC Ma 	if (afflvl != max_phys_off_afflvl)
2457d116dccSCC Ma 		return -EAGAIN;
2467d116dccSCC Ma 
2477d116dccSCC Ma 	return 0;
2487d116dccSCC Ma }
2497d116dccSCC Ma 
2507d116dccSCC Ma /*******************************************************************************
2517d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to enter
2527d116dccSCC Ma  * standby.
2537d116dccSCC Ma  ******************************************************************************/
2547d116dccSCC Ma static void plat_affinst_standby(unsigned int power_state)
2557d116dccSCC Ma {
2567d116dccSCC Ma 	unsigned int target_afflvl;
2577d116dccSCC Ma 
2587d116dccSCC Ma 	/* Sanity check the requested state */
2597d116dccSCC Ma 	target_afflvl = psci_get_pstate_afflvl(power_state);
2607d116dccSCC Ma 
2617d116dccSCC Ma 	/*
2627d116dccSCC Ma 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
2637d116dccSCC Ma 	 * on the MTK_platform. Ignore any other affinity level.
2647d116dccSCC Ma 	 */
2657d116dccSCC Ma 	if (target_afflvl == MPIDR_AFFLVL0) {
2667d116dccSCC Ma 		/*
2677d116dccSCC Ma 		 * Enter standby state. dsb is good practice before using wfi
2687d116dccSCC Ma 		 * to enter low power states.
2697d116dccSCC Ma 		 */
2707d116dccSCC Ma 		dsb();
2717d116dccSCC Ma 		wfi();
2727d116dccSCC Ma 	}
2737d116dccSCC Ma }
2747d116dccSCC Ma 
2757d116dccSCC Ma /*******************************************************************************
2767d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be turned
2777d116dccSCC Ma  * on. The level and mpidr determine the affinity instance.
2787d116dccSCC Ma  ******************************************************************************/
2797d116dccSCC Ma static int plat_affinst_on(unsigned long mpidr,
2807d116dccSCC Ma 		    unsigned long sec_entrypoint,
2817d116dccSCC Ma 		    unsigned int afflvl,
2827d116dccSCC Ma 		    unsigned int state)
2837d116dccSCC Ma {
2847d116dccSCC Ma 	int rc = PSCI_E_SUCCESS;
2857d116dccSCC Ma 	unsigned long cpu_id;
2867d116dccSCC Ma 	unsigned long cluster_id;
2877d116dccSCC Ma 	uintptr_t rv;
2887d116dccSCC Ma 
2897d116dccSCC Ma 	/*
2907d116dccSCC Ma 	 * It's possible to turn on only affinity level 0 i.e. a cpu
2917d116dccSCC Ma 	 * on the MTK_platform. Ignore any other affinity level.
2927d116dccSCC Ma 	 */
2937d116dccSCC Ma 	if (afflvl != MPIDR_AFFLVL0)
2947d116dccSCC Ma 		return rc;
2957d116dccSCC Ma 
2967d116dccSCC Ma 	cpu_id = mpidr & MPIDR_CPU_MASK;
2977d116dccSCC Ma 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
2987d116dccSCC Ma 
2997d116dccSCC Ma 	if (cluster_id)
3007d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
3017d116dccSCC Ma 	else
3027d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
3037d116dccSCC Ma 
3047d116dccSCC Ma 	mmio_write_32(rv, sec_entrypoint);
3057d116dccSCC Ma 	INFO("mt_on[%ld:%ld], entry %x\n",
3067d116dccSCC Ma 		cluster_id, cpu_id, mmio_read_32(rv));
3077d116dccSCC Ma 
3087d116dccSCC Ma 	spm_hotplug_on(mpidr);
3097d116dccSCC Ma 
3107d116dccSCC Ma 	return rc;
3117d116dccSCC Ma }
3127d116dccSCC Ma 
3137d116dccSCC Ma /*******************************************************************************
3147d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be turned
3157d116dccSCC Ma  * off. The level and mpidr determine the affinity instance. The 'state' arg.
3167d116dccSCC Ma  * allows the platform to decide whether the cluster is being turned off and
3177d116dccSCC Ma  * take apt actions.
3187d116dccSCC Ma  *
3197d116dccSCC Ma  * CAUTION: This function is called with coherent stacks so that caches can be
3207d116dccSCC Ma  * turned off, flushed and coherency disabled. There is no guarantee that caches
3217d116dccSCC Ma  * will remain turned on across calls to this function as each affinity level is
3227d116dccSCC Ma  * dealt with. So do not write & read global variables across calls. It will be
3237d116dccSCC Ma  * wise to do flush a write to the global to prevent unpredictable results.
3247d116dccSCC Ma  ******************************************************************************/
3257d116dccSCC Ma static void plat_affinst_off(unsigned int afflvl, unsigned int state)
3267d116dccSCC Ma {
3277d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
3287d116dccSCC Ma 
3297d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
3307d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
3317d116dccSCC Ma 		return;
3327d116dccSCC Ma 
3337d116dccSCC Ma 	/* Prevent interrupts from spuriously waking up this cpu */
3347d116dccSCC Ma 	arm_gic_cpuif_deactivate();
3357d116dccSCC Ma 
3367d116dccSCC Ma 	spm_hotplug_off(mpidr);
3377d116dccSCC Ma 
3387d116dccSCC Ma 	trace_power_flow(mpidr, CPU_DOWN);
3397d116dccSCC Ma 
3407d116dccSCC Ma 	if (afflvl != MPIDR_AFFLVL0) {
3417d116dccSCC Ma 		/* Disable coherency if this cluster is to be turned off */
3427d116dccSCC Ma 		plat_cci_disable();
3437d116dccSCC Ma 
3447d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_DOWN);
3457d116dccSCC Ma 	}
3467d116dccSCC Ma }
3477d116dccSCC Ma 
3487d116dccSCC Ma /*******************************************************************************
3497d116dccSCC Ma  * MTK_platform handler called when an affinity instance is about to be
3507d116dccSCC Ma  * suspended. The level and mpidr determine the affinity instance. The 'state'
3517d116dccSCC Ma  * arg. allows the platform to decide whether the cluster is being turned off
3527d116dccSCC Ma  * and take apt actions.
3537d116dccSCC Ma  *
3547d116dccSCC Ma  * CAUTION: This function is called with coherent stacks so that caches can be
3557d116dccSCC Ma  * turned off, flushed and coherency disabled. There is no guarantee that caches
3567d116dccSCC Ma  * will remain turned on across calls to this function as each affinity level is
3577d116dccSCC Ma  * dealt with. So do not write & read global variables across calls. It will be
3587d116dccSCC Ma  * wise to do flush a write to the global to prevent unpredictable results.
3597d116dccSCC Ma  ******************************************************************************/
3607d116dccSCC Ma static void plat_affinst_suspend(unsigned long sec_entrypoint,
3617d116dccSCC Ma 			  unsigned int afflvl,
3627d116dccSCC Ma 			  unsigned int state)
3637d116dccSCC Ma {
3647d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
3657d116dccSCC Ma 	unsigned long cluster_id;
3667d116dccSCC Ma 	unsigned long cpu_id;
3677d116dccSCC Ma 	uintptr_t rv;
3687d116dccSCC Ma 
3697d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
3707d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
3717d116dccSCC Ma 		return;
3727d116dccSCC Ma 
3737d116dccSCC Ma 	cpu_id = mpidr & MPIDR_CPU_MASK;
3747d116dccSCC Ma 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
3757d116dccSCC Ma 
3767d116dccSCC Ma 	if (cluster_id)
3777d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
3787d116dccSCC Ma 	else
3797d116dccSCC Ma 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
3807d116dccSCC Ma 
3817d116dccSCC Ma 	mmio_write_32(rv, sec_entrypoint);
3827d116dccSCC Ma 
3837d116dccSCC Ma 	if (afflvl == MPIDR_AFFLVL0)
3847d116dccSCC Ma 		spm_mcdi_prepare(mpidr);
3857d116dccSCC Ma 
3867d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL0)
3877d116dccSCC Ma 		mt_platform_save_context(mpidr);
3887d116dccSCC Ma 
3897d116dccSCC Ma 	/* Perform the common cluster specific operations */
3907d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
3917d116dccSCC Ma 		/* Disable coherency if this cluster is to be turned off */
3927d116dccSCC Ma 		plat_cci_disable();
3937d116dccSCC Ma 		disable_scu(mpidr);
3947d116dccSCC Ma 
3957d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_SUSPEND);
3967d116dccSCC Ma 	}
3977d116dccSCC Ma 
3987d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL2) {
3997d116dccSCC Ma 		generic_timer_backup();
4007d116dccSCC Ma 		spm_system_suspend();
4017d116dccSCC Ma 		/* Prevent interrupts from spuriously waking up this cpu */
4027d116dccSCC Ma 		arm_gic_cpuif_deactivate();
4037d116dccSCC Ma 	}
4047d116dccSCC Ma }
4057d116dccSCC Ma 
4067d116dccSCC Ma /*******************************************************************************
4077d116dccSCC Ma  * MTK_platform handler called when an affinity instance has just been powered
4087d116dccSCC Ma  * on after being turned off earlier. The level and mpidr determine the affinity
4097d116dccSCC Ma  * instance. The 'state' arg. allows the platform to decide whether the cluster
4107d116dccSCC Ma  * was turned off prior to wakeup and do what's necessary to setup it up
4117d116dccSCC Ma  * correctly.
4127d116dccSCC Ma  ******************************************************************************/
4137d116dccSCC Ma static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
4147d116dccSCC Ma {
4157d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
4167d116dccSCC Ma 
4177d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
4187d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
4197d116dccSCC Ma 		return;
4207d116dccSCC Ma 
4217d116dccSCC Ma 	/* Perform the common cluster specific operations */
4227d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
4237d116dccSCC Ma 		enable_scu(mpidr);
4247d116dccSCC Ma 
4257d116dccSCC Ma 		/* Enable coherency if this cluster was off */
4267d116dccSCC Ma 		plat_cci_enable();
4277d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_UP);
4287d116dccSCC Ma 	}
4297d116dccSCC Ma 
4307d116dccSCC Ma 	/* Enable the gic cpu interface */
4317d116dccSCC Ma 	arm_gic_cpuif_setup();
4327d116dccSCC Ma 	arm_gic_pcpu_distif_setup();
4337d116dccSCC Ma 	trace_power_flow(mpidr, CPU_UP);
4347d116dccSCC Ma }
4357d116dccSCC Ma 
4367d116dccSCC Ma /*******************************************************************************
4377d116dccSCC Ma  * MTK_platform handler called when an affinity instance has just been powered
4387d116dccSCC Ma  * on after having been suspended earlier. The level and mpidr determine the
4397d116dccSCC Ma  * affinity instance.
4407d116dccSCC Ma  ******************************************************************************/
4417d116dccSCC Ma static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
4427d116dccSCC Ma {
4437d116dccSCC Ma 	unsigned long mpidr = read_mpidr_el1();
4447d116dccSCC Ma 
4457d116dccSCC Ma 	/* Determine if any platform actions need to be executed. */
4467d116dccSCC Ma 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
4477d116dccSCC Ma 		return;
4487d116dccSCC Ma 
4497d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL2) {
4507d116dccSCC Ma 		/* Enable the gic cpu interface */
4517d116dccSCC Ma 		arm_gic_setup();
4527d116dccSCC Ma 		arm_gic_cpuif_setup();
4537d116dccSCC Ma 		spm_system_suspend_finish();
4547d116dccSCC Ma 	}
4557d116dccSCC Ma 
4567d116dccSCC Ma 	/* Perform the common cluster specific operations */
4577d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL1) {
4587d116dccSCC Ma 		enable_scu(mpidr);
4597d116dccSCC Ma 
4607d116dccSCC Ma 		/* Enable coherency if this cluster was off */
4617d116dccSCC Ma 		plat_cci_enable();
4627d116dccSCC Ma 		trace_power_flow(mpidr, CLUSTER_UP);
4637d116dccSCC Ma 	}
4647d116dccSCC Ma 
4657d116dccSCC Ma 	if (afflvl >= MPIDR_AFFLVL0)
4667d116dccSCC Ma 		mt_platform_restore_context(mpidr);
4677d116dccSCC Ma 
4687d116dccSCC Ma 	if (afflvl == MPIDR_AFFLVL0)
4697d116dccSCC Ma 		spm_mcdi_finish(mpidr);
4707d116dccSCC Ma 
4717d116dccSCC Ma 	arm_gic_pcpu_distif_setup();
4727d116dccSCC Ma }
4737d116dccSCC Ma 
4747d116dccSCC Ma static unsigned int plat_get_sys_suspend_power_state(void)
4757d116dccSCC Ma {
4767d116dccSCC Ma 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
4777d116dccSCC Ma 	return psci_make_powerstate(0, 1, 2);
4787d116dccSCC Ma }
4797d116dccSCC Ma 
4807d116dccSCC Ma /*******************************************************************************
4817d116dccSCC Ma  * MTK handlers to shutdown/reboot the system
4827d116dccSCC Ma  ******************************************************************************/
4837d116dccSCC Ma static void __dead2 plat_system_off(void)
4847d116dccSCC Ma {
4857d116dccSCC Ma 	INFO("MTK System Off\n");
4867d116dccSCC Ma 
4877d116dccSCC Ma 	gpio_set(GPIO120, GPIO_OUT_ZERO);
4887d116dccSCC Ma 	rtc_bbpu_power_down();
4897d116dccSCC Ma 
4907d116dccSCC Ma 	wfi();
4917d116dccSCC Ma 	ERROR("MTK System Off: operation not handled.\n");
4927d116dccSCC Ma 	panic();
4937d116dccSCC Ma }
4947d116dccSCC Ma 
4957d116dccSCC Ma static void __dead2 plat_system_reset(void)
4967d116dccSCC Ma {
4977d116dccSCC Ma 	/* Write the System Configuration Control Register */
4987d116dccSCC Ma 	INFO("MTK System Reset\n");
4997d116dccSCC Ma 
500*2bab3d52SJimmy Huang 	mmio_clrsetbits_32(MTK_WDT_BASE,
501*2bab3d52SJimmy Huang 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
502*2bab3d52SJimmy Huang 		MTK_WDT_MODE_KEY);
5037d116dccSCC Ma 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
5047d116dccSCC Ma 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
5057d116dccSCC Ma 
5067d116dccSCC Ma 	wfi();
5077d116dccSCC Ma 	ERROR("MTK System Reset: operation not handled.\n");
5087d116dccSCC Ma 	panic();
5097d116dccSCC Ma }
5107d116dccSCC Ma 
5117d116dccSCC Ma /*******************************************************************************
5127d116dccSCC Ma  * Export the platform handlers to enable psci to invoke them
5137d116dccSCC Ma  ******************************************************************************/
5147d116dccSCC Ma static const plat_pm_ops_t plat_plat_pm_ops = {
5157d116dccSCC Ma 	.affinst_standby		= plat_affinst_standby,
5167d116dccSCC Ma 	.affinst_on			= plat_affinst_on,
5177d116dccSCC Ma 	.affinst_off			= plat_affinst_off,
5187d116dccSCC Ma 	.affinst_suspend		= plat_affinst_suspend,
5197d116dccSCC Ma 	.affinst_on_finish		= plat_affinst_on_finish,
5207d116dccSCC Ma 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
5217d116dccSCC Ma 	.system_off			= plat_system_off,
5227d116dccSCC Ma 	.system_reset			= plat_system_reset,
5237d116dccSCC Ma 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
5247d116dccSCC Ma };
5257d116dccSCC Ma 
5267d116dccSCC Ma /*******************************************************************************
5277d116dccSCC Ma  * Export the platform specific power ops & initialize the mtk_platform power
5287d116dccSCC Ma  * controller
5297d116dccSCC Ma  ******************************************************************************/
5307d116dccSCC Ma int platform_setup_pm(const plat_pm_ops_t **plat_ops)
5317d116dccSCC Ma {
5327d116dccSCC Ma 	*plat_ops = &plat_plat_pm_ops;
5337d116dccSCC Ma 	return 0;
5347d116dccSCC Ma }
535