xref: /rk3399_ARM-atf/plat/mediatek/mt8173/plat_pm.c (revision 3fc26aa0938a838686644c146ee84c562d963c34)
1 /*
2  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch_helpers.h>
32 #include <arm_gic.h>
33 #include <assert.h>
34 #include <bakery_lock.h>
35 #include <cci.h>
36 #include <console.h>
37 #include <debug.h>
38 #include <errno.h>
39 #include <mcucfg.h>
40 #include <mmio.h>
41 #include <mt8173_def.h>
42 #include <mt_cpuxgpt.h> /* generic_timer_backup() */
43 #include <plat_private.h>
44 #include <power_tracer.h>
45 #include <psci.h>
46 #include <rtc.h>
47 #include <scu.h>
48 #include <spm_hotplug.h>
49 #include <spm_mcdi.h>
50 #include <spm_suspend.h>
51 
52 #if !ENABLE_PLAT_COMPAT
53 #define MTK_PWR_LVL0	0
54 #define MTK_PWR_LVL1	1
55 #define MTK_PWR_LVL2	2
56 
57 /* Macros to read the MTK power domain state */
58 #define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
59 #define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
60 #define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
61 			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
62 #endif
63 
64 struct core_context {
65 	unsigned long timer_data[8];
66 	unsigned int count;
67 	unsigned int rst;
68 	unsigned int abt;
69 	unsigned int brk;
70 };
71 
72 struct cluster_context {
73 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
74 };
75 
76 /*
77  * Top level structure to hold the complete context of a multi cluster system
78  */
79 struct system_context {
80 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
81 };
82 
83 /*
84  * Top level structure which encapsulates the context of the entire system
85  */
86 static struct system_context dormant_data[1];
87 
88 static inline struct cluster_context *system_cluster(
89 						struct system_context *system,
90 						uint32_t clusterid)
91 {
92 	return &system->cluster[clusterid];
93 }
94 
95 static inline struct core_context *cluster_core(struct cluster_context *cluster,
96 						uint32_t cpuid)
97 {
98 	return &cluster->core[cpuid];
99 }
100 
101 static struct cluster_context *get_cluster_data(unsigned long mpidr)
102 {
103 	uint32_t clusterid;
104 
105 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
106 
107 	return system_cluster(dormant_data, clusterid);
108 }
109 
110 static struct core_context *get_core_data(unsigned long mpidr)
111 {
112 	struct cluster_context *cluster;
113 	uint32_t cpuid;
114 
115 	cluster = get_cluster_data(mpidr);
116 	cpuid = mpidr & MPIDR_CPU_MASK;
117 
118 	return cluster_core(cluster, cpuid);
119 }
120 
121 static void mt_save_generic_timer(unsigned long *container)
122 {
123 	uint64_t ctl;
124 	uint64_t val;
125 
126 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
127 			 "mrs	%x1, cntp_cval_el0\n\t"
128 			 "stp	%x0, %x1, [%2, #0]"
129 			 : "=&r" (ctl), "=&r" (val)
130 			 : "r" (container)
131 			 : "memory");
132 
133 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
134 			 "mrs	%x1, cntp_ctl_el0\n\t"
135 			 "stp	%x0, %x1, [%2, #16]"
136 			 : "=&r" (val), "=&r" (ctl)
137 			 : "r" (container)
138 			 : "memory");
139 
140 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
141 			 "mrs	%x1, cntv_ctl_el0\n\t"
142 			 "stp	%x0, %x1, [%2, #32]"
143 			 : "=&r" (val), "=&r" (ctl)
144 			 : "r" (container)
145 			 : "memory");
146 }
147 
148 static void mt_restore_generic_timer(unsigned long *container)
149 {
150 	uint64_t ctl;
151 	uint64_t val;
152 
153 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
154 			 "msr	cntkctl_el1, %x0\n\t"
155 			 "msr	cntp_cval_el0, %x1"
156 			 : "=&r" (ctl), "=&r" (val)
157 			 : "r" (container)
158 			 : "memory");
159 
160 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
161 			 "msr	cntp_tval_el0, %x0\n\t"
162 			 "msr	cntp_ctl_el0, %x1"
163 			 : "=&r" (val), "=&r" (ctl)
164 			 : "r" (container)
165 			 : "memory");
166 
167 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
168 			 "msr	cntv_tval_el0, %x0\n\t"
169 			 "msr	cntv_ctl_el0, %x1"
170 			 : "=&r" (val), "=&r" (ctl)
171 			 : "r" (container)
172 			 : "memory");
173 }
174 
175 static inline uint64_t read_cntpctl(void)
176 {
177 	uint64_t cntpctl;
178 
179 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
180 			 : "=r" (cntpctl) : : "memory");
181 
182 	return cntpctl;
183 }
184 
185 static inline void write_cntpctl(uint64_t cntpctl)
186 {
187 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
188 }
189 
190 static void stop_generic_timer(void)
191 {
192 	/*
193 	 * Disable the timer and mask the irq to prevent
194 	 * suprious interrupts on this cpu interface. It
195 	 * will bite us when we come back if we don't. It
196 	 * will be replayed on the inbound cluster.
197 	 */
198 	uint64_t cntpctl = read_cntpctl();
199 
200 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
201 }
202 
203 static void mt_cpu_save(unsigned long mpidr)
204 {
205 	struct core_context *core;
206 
207 	core = get_core_data(mpidr);
208 	mt_save_generic_timer(core->timer_data);
209 
210 	/* disable timer irq, and upper layer should enable it again. */
211 	stop_generic_timer();
212 }
213 
214 static void mt_cpu_restore(unsigned long mpidr)
215 {
216 	struct core_context *core;
217 
218 	core = get_core_data(mpidr);
219 	mt_restore_generic_timer(core->timer_data);
220 }
221 
222 static void mt_platform_save_context(unsigned long mpidr)
223 {
224 	/* mcusys_save_context: */
225 	mt_cpu_save(mpidr);
226 }
227 
228 static void mt_platform_restore_context(unsigned long mpidr)
229 {
230 	/* mcusys_restore_context: */
231 	mt_cpu_restore(mpidr);
232 }
233 
234 #if ENABLE_PLAT_COMPAT
235 /*******************************************************************************
236 * Private function which is used to determine if any platform actions
237 * should be performed for the specified affinity instance given its
238 * state. Nothing needs to be done if the 'state' is not off or if this is not
239 * the highest affinity level which will enter the 'state'.
240 *******************************************************************************/
241 static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
242 {
243 	unsigned int max_phys_off_afflvl;
244 
245 	assert(afflvl <= MPIDR_AFFLVL2);
246 
247 	if (state != PSCI_STATE_OFF)
248 		return -EAGAIN;
249 
250 	/*
251 	 * Find the highest affinity level which will be suspended and postpone
252 	 * all the platform specific actions until that level is hit.
253 	 */
254 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
255 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
256 	if (afflvl != max_phys_off_afflvl)
257 		return -EAGAIN;
258 
259 	return 0;
260 }
261 
262 /*******************************************************************************
263  * MTK_platform handler called when an affinity instance is about to enter
264  * standby.
265  ******************************************************************************/
266 static void plat_affinst_standby(unsigned int power_state)
267 {
268 	unsigned int target_afflvl;
269 
270 	/* Sanity check the requested state */
271 	target_afflvl = psci_get_pstate_afflvl(power_state);
272 
273 	/*
274 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
275 	 * on the MTK_platform. Ignore any other affinity level.
276 	 */
277 	if (target_afflvl == MPIDR_AFFLVL0) {
278 		/*
279 		 * Enter standby state. dsb is good practice before using wfi
280 		 * to enter low power states.
281 		 */
282 		dsb();
283 		wfi();
284 	}
285 }
286 #else
287 static void plat_cpu_standby(plat_local_state_t cpu_state)
288 {
289 	unsigned int scr;
290 
291 	scr = read_scr_el3();
292 	write_scr_el3(scr | SCR_IRQ_BIT);
293 	isb();
294 	dsb();
295 	wfi();
296 	write_scr_el3(scr);
297 }
298 #endif
299 
300 /*******************************************************************************
301  * MTK_platform handler called when an affinity instance is about to be turned
302  * on. The level and mpidr determine the affinity instance.
303  ******************************************************************************/
304 #if ENABLE_PLAT_COMPAT
305 static int plat_affinst_on(unsigned long mpidr,
306 		    unsigned long sec_entrypoint,
307 		    unsigned int afflvl,
308 		    unsigned int state)
309 {
310 	int rc = PSCI_E_SUCCESS;
311 	unsigned long cpu_id;
312 	unsigned long cluster_id;
313 	uintptr_t rv;
314 
315 	/*
316 	 * It's possible to turn on only affinity level 0 i.e. a cpu
317 	 * on the MTK_platform. Ignore any other affinity level.
318 	 */
319 	if (afflvl != MPIDR_AFFLVL0)
320 		return rc;
321 
322 	cpu_id = mpidr & MPIDR_CPU_MASK;
323 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
324 
325 	if (cluster_id)
326 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
327 	else
328 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
329 
330 	mmio_write_32(rv, sec_entrypoint);
331 	INFO("mt_on[%ld:%ld], entry %x\n",
332 		cluster_id, cpu_id, mmio_read_32(rv));
333 
334 	spm_hotplug_on(mpidr);
335 
336 	return rc;
337 }
338 #else
339 static uintptr_t secure_entrypoint;
340 
341 static int plat_power_domain_on(unsigned long mpidr)
342 {
343 	int rc = PSCI_E_SUCCESS;
344 	unsigned long cpu_id;
345 	unsigned long cluster_id;
346 	uintptr_t rv;
347 
348 	cpu_id = mpidr & MPIDR_CPU_MASK;
349 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
350 
351 	if (cluster_id)
352 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
353 	else
354 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
355 
356 	mmio_write_32(rv, secure_entrypoint);
357 	INFO("mt_on[%ld:%ld], entry %x\n",
358 		cluster_id, cpu_id, mmio_read_32(rv));
359 
360 	spm_hotplug_on(mpidr);
361 	return rc;
362 }
363 #endif
364 
365 /*******************************************************************************
366  * MTK_platform handler called when an affinity instance is about to be turned
367  * off. The level and mpidr determine the affinity instance. The 'state' arg.
368  * allows the platform to decide whether the cluster is being turned off and
369  * take apt actions.
370  *
371  * CAUTION: This function is called with coherent stacks so that caches can be
372  * turned off, flushed and coherency disabled. There is no guarantee that caches
373  * will remain turned on across calls to this function as each affinity level is
374  * dealt with. So do not write & read global variables across calls. It will be
375  * wise to do flush a write to the global to prevent unpredictable results.
376  ******************************************************************************/
377 #if ENABLE_PLAT_COMPAT
378 static void plat_affinst_off(unsigned int afflvl, unsigned int state)
379 {
380 	unsigned long mpidr = read_mpidr_el1();
381 
382 	/* Determine if any platform actions need to be executed. */
383 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
384 		return;
385 
386 	/* Prevent interrupts from spuriously waking up this cpu */
387 	arm_gic_cpuif_deactivate();
388 
389 	spm_hotplug_off(mpidr);
390 
391 	trace_power_flow(mpidr, CPU_DOWN);
392 
393 	if (afflvl != MPIDR_AFFLVL0) {
394 		/* Disable coherency if this cluster is to be turned off */
395 		plat_cci_disable();
396 
397 		trace_power_flow(mpidr, CLUSTER_DOWN);
398 	}
399 }
400 #else
401 static void plat_power_domain_off(const psci_power_state_t *state)
402 {
403 	unsigned long mpidr = read_mpidr_el1();
404 
405 	/* Prevent interrupts from spuriously waking up this cpu */
406 	arm_gic_cpuif_deactivate();
407 
408 	spm_hotplug_off(mpidr);
409 
410 	trace_power_flow(mpidr, CPU_DOWN);
411 
412 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
413 		/* Disable coherency if this cluster is to be turned off */
414 		plat_cci_disable();
415 
416 		trace_power_flow(mpidr, CLUSTER_DOWN);
417 	}
418 }
419 #endif
420 
421 /*******************************************************************************
422  * MTK_platform handler called when an affinity instance is about to be
423  * suspended. The level and mpidr determine the affinity instance. The 'state'
424  * arg. allows the platform to decide whether the cluster is being turned off
425  * and take apt actions.
426  *
427  * CAUTION: This function is called with coherent stacks so that caches can be
428  * turned off, flushed and coherency disabled. There is no guarantee that caches
429  * will remain turned on across calls to this function as each affinity level is
430  * dealt with. So do not write & read global variables across calls. It will be
431  * wise to do flush a write to the global to prevent unpredictable results.
432  ******************************************************************************/
433 #if ENABLE_PLAT_COMPAT
434 static void plat_affinst_suspend(unsigned long sec_entrypoint,
435 			  unsigned int afflvl,
436 			  unsigned int state)
437 {
438 	unsigned long mpidr = read_mpidr_el1();
439 	unsigned long cluster_id;
440 	unsigned long cpu_id;
441 	uintptr_t rv;
442 
443 	/* Determine if any platform actions need to be executed. */
444 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
445 		return;
446 
447 	cpu_id = mpidr & MPIDR_CPU_MASK;
448 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
449 
450 	if (cluster_id)
451 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
452 	else
453 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
454 
455 	mmio_write_32(rv, sec_entrypoint);
456 
457 	if (afflvl < MPIDR_AFFLVL2)
458 		spm_mcdi_prepare_for_off_state(mpidr, afflvl);
459 
460 	if (afflvl >= MPIDR_AFFLVL0)
461 		mt_platform_save_context(mpidr);
462 
463 	/* Perform the common cluster specific operations */
464 	if (afflvl >= MPIDR_AFFLVL1) {
465 		/* Disable coherency if this cluster is to be turned off */
466 		plat_cci_disable();
467 	}
468 
469 	if (afflvl >= MPIDR_AFFLVL2) {
470 		disable_scu(mpidr);
471 		generic_timer_backup();
472 		spm_system_suspend();
473 		/* Prevent interrupts from spuriously waking up this cpu */
474 		arm_gic_cpuif_deactivate();
475 	}
476 }
477 #else
478 static void plat_power_domain_suspend(const psci_power_state_t *state)
479 {
480 	unsigned long mpidr = read_mpidr_el1();
481 	unsigned long cluster_id;
482 	unsigned long cpu_id;
483 	uintptr_t rv;
484 
485 	cpu_id = mpidr & MPIDR_CPU_MASK;
486 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
487 
488 	if (cluster_id)
489 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
490 	else
491 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
492 
493 	mmio_write_32(rv, secure_entrypoint);
494 
495 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
496 		spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
497 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
498 			spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
499 	}
500 
501 	mt_platform_save_context(mpidr);
502 
503 	/* Perform the common cluster specific operations */
504 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
505 		/* Disable coherency if this cluster is to be turned off */
506 		plat_cci_disable();
507 	}
508 
509 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
510 		disable_scu(mpidr);
511 		generic_timer_backup();
512 		spm_system_suspend();
513 		/* Prevent interrupts from spuriously waking up this cpu */
514 		arm_gic_cpuif_deactivate();
515 	}
516 }
517 #endif
518 
519 /*******************************************************************************
520  * MTK_platform handler called when an affinity instance has just been powered
521  * on after being turned off earlier. The level and mpidr determine the affinity
522  * instance. The 'state' arg. allows the platform to decide whether the cluster
523  * was turned off prior to wakeup and do what's necessary to setup it up
524  * correctly.
525  ******************************************************************************/
526 #if ENABLE_PLAT_COMPAT
527 static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
528 {
529 	unsigned long mpidr = read_mpidr_el1();
530 
531 	/* Determine if any platform actions need to be executed. */
532 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
533 		return;
534 
535 	/* Perform the common cluster specific operations */
536 	if (afflvl >= MPIDR_AFFLVL1) {
537 		/* Enable coherency if this cluster was off */
538 		plat_cci_enable();
539 		trace_power_flow(mpidr, CLUSTER_UP);
540 	}
541 
542 	/* Enable the gic cpu interface */
543 	arm_gic_cpuif_setup();
544 	arm_gic_pcpu_distif_setup();
545 	trace_power_flow(mpidr, CPU_UP);
546 }
547 #else
548 void mtk_system_pwr_domain_resume(void);
549 
550 static void plat_power_domain_on_finish(const psci_power_state_t *state)
551 {
552 	unsigned long mpidr = read_mpidr_el1();
553 
554 	assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
555 
556 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
557 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
558 		mtk_system_pwr_domain_resume();
559 
560 	if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
561 		plat_cci_enable();
562 		trace_power_flow(mpidr, CLUSTER_UP);
563 	}
564 
565 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
566 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
567 		return;
568 
569 	/* Enable the gic cpu interface */
570 	arm_gic_cpuif_setup();
571 	arm_gic_pcpu_distif_setup();
572 	trace_power_flow(mpidr, CPU_UP);
573 }
574 #endif
575 
576 /*******************************************************************************
577  * MTK_platform handler called when an affinity instance has just been powered
578  * on after having been suspended earlier. The level and mpidr determine the
579  * affinity instance.
580  ******************************************************************************/
581 #if ENABLE_PLAT_COMPAT
582 static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
583 {
584 	unsigned long mpidr = read_mpidr_el1();
585 
586 	/* Determine if any platform actions need to be executed. */
587 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
588 		return;
589 
590 	if (afflvl >= MPIDR_AFFLVL2) {
591 		/* Enable the gic cpu interface */
592 		arm_gic_setup();
593 		arm_gic_cpuif_setup();
594 		spm_system_suspend_finish();
595 		enable_scu(mpidr);
596 	}
597 
598 	/* Perform the common cluster specific operations */
599 	if (afflvl >= MPIDR_AFFLVL1) {
600 		/* Enable coherency if this cluster was off */
601 		plat_cci_enable();
602 	}
603 
604 	if (afflvl >= MPIDR_AFFLVL0)
605 		mt_platform_restore_context(mpidr);
606 
607 	if (afflvl < MPIDR_AFFLVL2)
608 		spm_mcdi_finish_for_on_state(mpidr, afflvl);
609 
610 	arm_gic_pcpu_distif_setup();
611 }
612 #else
613 static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
614 {
615 	unsigned long mpidr = read_mpidr_el1();
616 
617 	if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
618 		return;
619 
620 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
621 		/* Enable the gic cpu interface */
622 		arm_gic_setup();
623 		arm_gic_cpuif_setup();
624 		spm_system_suspend_finish();
625 		enable_scu(mpidr);
626 	}
627 
628 	/* Perform the common cluster specific operations */
629 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
630 		/* Enable coherency if this cluster was off */
631 		plat_cci_enable();
632 	}
633 
634 	mt_platform_restore_context(mpidr);
635 
636 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
637 		spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
638 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
639 			spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
640 	}
641 
642 	arm_gic_pcpu_distif_setup();
643 }
644 #endif
645 
646 #if ENABLE_PLAT_COMPAT
647 static unsigned int plat_get_sys_suspend_power_state(void)
648 {
649 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
650 	return psci_make_powerstate(0, 1, 2);
651 }
652 #else
653 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
654 {
655 	assert(PLAT_MAX_PWR_LVL >= 2);
656 
657 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
658 		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
659 }
660 #endif
661 
662 /*******************************************************************************
663  * MTK handlers to shutdown/reboot the system
664  ******************************************************************************/
665 static void __dead2 plat_system_off(void)
666 {
667 	INFO("MTK System Off\n");
668 
669 	rtc_bbpu_power_down();
670 
671 	wfi();
672 	ERROR("MTK System Off: operation not handled.\n");
673 	panic();
674 }
675 
676 static void __dead2 plat_system_reset(void)
677 {
678 	/* Write the System Configuration Control Register */
679 	INFO("MTK System Reset\n");
680 
681 	mmio_clrsetbits_32(MTK_WDT_BASE,
682 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
683 		MTK_WDT_MODE_KEY);
684 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
685 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
686 
687 	wfi();
688 	ERROR("MTK System Reset: operation not handled.\n");
689 	panic();
690 }
691 
692 #if !ENABLE_PLAT_COMPAT
693 static int plat_validate_power_state(unsigned int power_state,
694 					psci_power_state_t *req_state)
695 {
696 	int pstate = psci_get_pstate_type(power_state);
697 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
698 	int i;
699 
700 	assert(req_state);
701 
702 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
703 		return PSCI_E_INVALID_PARAMS;
704 
705 	/* Sanity check the requested state */
706 	if (pstate == PSTATE_TYPE_STANDBY) {
707 		/*
708 		 * It's possible to enter standby only on power level 0
709 		 * Ignore any other power level.
710 		 */
711 		if (pwr_lvl != 0)
712 			return PSCI_E_INVALID_PARAMS;
713 
714 		req_state->pwr_domain_state[MTK_PWR_LVL0] =
715 					MTK_LOCAL_STATE_RET;
716 	} else {
717 		for (i = 0; i <= pwr_lvl; i++)
718 			req_state->pwr_domain_state[i] =
719 					MTK_LOCAL_STATE_OFF;
720 	}
721 
722 	/*
723 	 * We expect the 'state id' to be zero.
724 	 */
725 	if (psci_get_pstate_id(power_state))
726 		return PSCI_E_INVALID_PARAMS;
727 
728 	return PSCI_E_SUCCESS;
729 }
730 
731 void mtk_system_pwr_domain_resume(void)
732 {
733 	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
734 
735 	/* Assert system power domain is available on the platform */
736 	assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
737 
738 	arm_gic_cpuif_setup();
739 	arm_gic_pcpu_distif_setup();
740 }
741 #endif
742 
743 #if ENABLE_PLAT_COMPAT
744 /*******************************************************************************
745  * Export the platform handlers to enable psci to invoke them
746  ******************************************************************************/
747 static const plat_pm_ops_t plat_plat_pm_ops = {
748 	.affinst_standby		= plat_affinst_standby,
749 	.affinst_on			= plat_affinst_on,
750 	.affinst_off			= plat_affinst_off,
751 	.affinst_suspend		= plat_affinst_suspend,
752 	.affinst_on_finish		= plat_affinst_on_finish,
753 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
754 	.system_off			= plat_system_off,
755 	.system_reset			= plat_system_reset,
756 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
757 };
758 
759 /*******************************************************************************
760  * Export the platform specific power ops & initialize the mtk_platform power
761  * controller
762  ******************************************************************************/
763 int platform_setup_pm(const plat_pm_ops_t **plat_ops)
764 {
765 	*plat_ops = &plat_plat_pm_ops;
766 	return 0;
767 }
768 #else
769 static const plat_psci_ops_t plat_plat_pm_ops = {
770 	.cpu_standby			= plat_cpu_standby,
771 	.pwr_domain_on			= plat_power_domain_on,
772 	.pwr_domain_on_finish		= plat_power_domain_on_finish,
773 	.pwr_domain_off			= plat_power_domain_off,
774 	.pwr_domain_suspend		= plat_power_domain_suspend,
775 	.pwr_domain_suspend_finish	= plat_power_domain_suspend_finish,
776 	.system_off			= plat_system_off,
777 	.system_reset			= plat_system_reset,
778 	.validate_power_state		= plat_validate_power_state,
779 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
780 };
781 
782 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
783 			const plat_psci_ops_t **psci_ops)
784 {
785 	*psci_ops = &plat_plat_pm_ops;
786 	secure_entrypoint = sec_entrypoint;
787 	return 0;
788 }
789 
790 /*
791  * The PSCI generic code uses this API to let the platform participate in state
792  * coordination during a power management operation. It compares the platform
793  * specific local power states requested by each cpu for a given power domain
794  * and returns the coordinated target power state that the domain should
795  * enter. A platform assigns a number to a local power state. This default
796  * implementation assumes that the platform assigns these numbers in order of
797  * increasing depth of the power state i.e. for two power states X & Y, if X < Y
798  * then X represents a shallower power state than Y. As a result, the
799  * coordinated target local power state for a power domain will be the minimum
800  * of the requested local power states.
801  */
802 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
803 					     const plat_local_state_t *states,
804 					     unsigned int ncpu)
805 {
806 	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
807 
808 	assert(ncpu);
809 
810 	do {
811 		temp = *states++;
812 		if (temp < target)
813 			target = temp;
814 	} while (--ncpu);
815 
816 	return target;
817 }
818 #endif
819