xref: /rk3399_ARM-atf/plat/mediatek/mt8173/plat_pm.c (revision 532ed6183868036e4a4f83cd7a71b93266a3bdb7)
1 /*
2  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch_helpers.h>
32 #include <arm_gic.h>
33 #include <assert.h>
34 #include <bakery_lock.h>
35 #include <cci.h>
36 #include <console.h>
37 #include <debug.h>
38 #include <errno.h>
39 #include <mcucfg.h>
40 #include <mmio.h>
41 #include <mt8173_def.h>
42 #include <mt_cpuxgpt.h> /* generic_timer_backup() */
43 #include <plat_private.h>
44 #include <power_tracer.h>
45 #include <psci.h>
46 #include <rtc.h>
47 #include <scu.h>
48 #include <spm_hotplug.h>
49 #include <spm_mcdi.h>
50 #include <spm_suspend.h>
51 
52 struct core_context {
53 	unsigned long timer_data[8];
54 	unsigned int count;
55 	unsigned int rst;
56 	unsigned int abt;
57 	unsigned int brk;
58 };
59 
60 struct cluster_context {
61 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
62 };
63 
64 /*
65  * Top level structure to hold the complete context of a multi cluster system
66  */
67 struct system_context {
68 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
69 };
70 
71 /*
72  * Top level structure which encapsulates the context of the entire system
73  */
74 static struct system_context dormant_data[1];
75 
76 static inline struct cluster_context *system_cluster(
77 						struct system_context *system,
78 						uint32_t clusterid)
79 {
80 	return &system->cluster[clusterid];
81 }
82 
83 static inline struct core_context *cluster_core(struct cluster_context *cluster,
84 						uint32_t cpuid)
85 {
86 	return &cluster->core[cpuid];
87 }
88 
89 static struct cluster_context *get_cluster_data(unsigned long mpidr)
90 {
91 	uint32_t clusterid;
92 
93 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
94 
95 	return system_cluster(dormant_data, clusterid);
96 }
97 
98 static struct core_context *get_core_data(unsigned long mpidr)
99 {
100 	struct cluster_context *cluster;
101 	uint32_t cpuid;
102 
103 	cluster = get_cluster_data(mpidr);
104 	cpuid = mpidr & MPIDR_CPU_MASK;
105 
106 	return cluster_core(cluster, cpuid);
107 }
108 
109 static void mt_save_generic_timer(unsigned long *container)
110 {
111 	uint64_t ctl;
112 	uint64_t val;
113 
114 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
115 			 "mrs	%x1, cntp_cval_el0\n\t"
116 			 "stp	%x0, %x1, [%2, #0]"
117 			 : "=&r" (ctl), "=&r" (val)
118 			 : "r" (container)
119 			 : "memory");
120 
121 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
122 			 "mrs	%x1, cntp_ctl_el0\n\t"
123 			 "stp	%x0, %x1, [%2, #16]"
124 			 : "=&r" (val), "=&r" (ctl)
125 			 : "r" (container)
126 			 : "memory");
127 
128 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
129 			 "mrs	%x1, cntv_ctl_el0\n\t"
130 			 "stp	%x0, %x1, [%2, #32]"
131 			 : "=&r" (val), "=&r" (ctl)
132 			 : "r" (container)
133 			 : "memory");
134 }
135 
136 static void mt_restore_generic_timer(unsigned long *container)
137 {
138 	uint64_t ctl;
139 	uint64_t val;
140 
141 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
142 			 "msr	cntkctl_el1, %x0\n\t"
143 			 "msr	cntp_cval_el0, %x1"
144 			 : "=&r" (ctl), "=&r" (val)
145 			 : "r" (container)
146 			 : "memory");
147 
148 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
149 			 "msr	cntp_tval_el0, %x0\n\t"
150 			 "msr	cntp_ctl_el0, %x1"
151 			 : "=&r" (val), "=&r" (ctl)
152 			 : "r" (container)
153 			 : "memory");
154 
155 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
156 			 "msr	cntv_tval_el0, %x0\n\t"
157 			 "msr	cntv_ctl_el0, %x1"
158 			 : "=&r" (val), "=&r" (ctl)
159 			 : "r" (container)
160 			 : "memory");
161 }
162 
163 static inline uint64_t read_cntpctl(void)
164 {
165 	uint64_t cntpctl;
166 
167 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
168 			 : "=r" (cntpctl) : : "memory");
169 
170 	return cntpctl;
171 }
172 
173 static inline void write_cntpctl(uint64_t cntpctl)
174 {
175 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
176 }
177 
178 static void stop_generic_timer(void)
179 {
180 	/*
181 	 * Disable the timer and mask the irq to prevent
182 	 * suprious interrupts on this cpu interface. It
183 	 * will bite us when we come back if we don't. It
184 	 * will be replayed on the inbound cluster.
185 	 */
186 	uint64_t cntpctl = read_cntpctl();
187 
188 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
189 }
190 
191 static void mt_cpu_save(unsigned long mpidr)
192 {
193 	struct core_context *core;
194 
195 	core = get_core_data(mpidr);
196 	mt_save_generic_timer(core->timer_data);
197 
198 	/* disable timer irq, and upper layer should enable it again. */
199 	stop_generic_timer();
200 }
201 
202 static void mt_cpu_restore(unsigned long mpidr)
203 {
204 	struct core_context *core;
205 
206 	core = get_core_data(mpidr);
207 	mt_restore_generic_timer(core->timer_data);
208 }
209 
210 static void mt_platform_save_context(unsigned long mpidr)
211 {
212 	/* mcusys_save_context: */
213 	mt_cpu_save(mpidr);
214 }
215 
216 static void mt_platform_restore_context(unsigned long mpidr)
217 {
218 	/* mcusys_restore_context: */
219 	mt_cpu_restore(mpidr);
220 }
221 
222 /*******************************************************************************
223 * Private function which is used to determine if any platform actions
224 * should be performed for the specified affinity instance given its
225 * state. Nothing needs to be done if the 'state' is not off or if this is not
226 * the highest affinity level which will enter the 'state'.
227 *******************************************************************************/
228 static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
229 {
230 	unsigned int max_phys_off_afflvl;
231 
232 	assert(afflvl <= MPIDR_AFFLVL2);
233 
234 	if (state != PSCI_STATE_OFF)
235 		return -EAGAIN;
236 
237 	/*
238 	 * Find the highest affinity level which will be suspended and postpone
239 	 * all the platform specific actions until that level is hit.
240 	 */
241 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
242 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
243 	if (afflvl != max_phys_off_afflvl)
244 		return -EAGAIN;
245 
246 	return 0;
247 }
248 
249 /*******************************************************************************
250  * MTK_platform handler called when an affinity instance is about to enter
251  * standby.
252  ******************************************************************************/
253 static void plat_affinst_standby(unsigned int power_state)
254 {
255 	unsigned int target_afflvl;
256 
257 	/* Sanity check the requested state */
258 	target_afflvl = psci_get_pstate_afflvl(power_state);
259 
260 	/*
261 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
262 	 * on the MTK_platform. Ignore any other affinity level.
263 	 */
264 	if (target_afflvl == MPIDR_AFFLVL0) {
265 		/*
266 		 * Enter standby state. dsb is good practice before using wfi
267 		 * to enter low power states.
268 		 */
269 		dsb();
270 		wfi();
271 	}
272 }
273 
274 /*******************************************************************************
275  * MTK_platform handler called when an affinity instance is about to be turned
276  * on. The level and mpidr determine the affinity instance.
277  ******************************************************************************/
278 static int plat_affinst_on(unsigned long mpidr,
279 		    unsigned long sec_entrypoint,
280 		    unsigned int afflvl,
281 		    unsigned int state)
282 {
283 	int rc = PSCI_E_SUCCESS;
284 	unsigned long cpu_id;
285 	unsigned long cluster_id;
286 	uintptr_t rv;
287 
288 	/*
289 	 * It's possible to turn on only affinity level 0 i.e. a cpu
290 	 * on the MTK_platform. Ignore any other affinity level.
291 	 */
292 	if (afflvl != MPIDR_AFFLVL0)
293 		return rc;
294 
295 	cpu_id = mpidr & MPIDR_CPU_MASK;
296 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
297 
298 	if (cluster_id)
299 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
300 	else
301 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
302 
303 	mmio_write_32(rv, sec_entrypoint);
304 	INFO("mt_on[%ld:%ld], entry %x\n",
305 		cluster_id, cpu_id, mmio_read_32(rv));
306 
307 	spm_hotplug_on(mpidr);
308 
309 	return rc;
310 }
311 
312 /*******************************************************************************
313  * MTK_platform handler called when an affinity instance is about to be turned
314  * off. The level and mpidr determine the affinity instance. The 'state' arg.
315  * allows the platform to decide whether the cluster is being turned off and
316  * take apt actions.
317  *
318  * CAUTION: This function is called with coherent stacks so that caches can be
319  * turned off, flushed and coherency disabled. There is no guarantee that caches
320  * will remain turned on across calls to this function as each affinity level is
321  * dealt with. So do not write & read global variables across calls. It will be
322  * wise to do flush a write to the global to prevent unpredictable results.
323  ******************************************************************************/
324 static void plat_affinst_off(unsigned int afflvl, unsigned int state)
325 {
326 	unsigned long mpidr = read_mpidr_el1();
327 
328 	/* Determine if any platform actions need to be executed. */
329 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
330 		return;
331 
332 	/* Prevent interrupts from spuriously waking up this cpu */
333 	arm_gic_cpuif_deactivate();
334 
335 	spm_hotplug_off(mpidr);
336 
337 	trace_power_flow(mpidr, CPU_DOWN);
338 
339 	if (afflvl != MPIDR_AFFLVL0) {
340 		/* Disable coherency if this cluster is to be turned off */
341 		plat_cci_disable();
342 
343 		trace_power_flow(mpidr, CLUSTER_DOWN);
344 	}
345 }
346 
347 /*******************************************************************************
348  * MTK_platform handler called when an affinity instance is about to be
349  * suspended. The level and mpidr determine the affinity instance. The 'state'
350  * arg. allows the platform to decide whether the cluster is being turned off
351  * and take apt actions.
352  *
353  * CAUTION: This function is called with coherent stacks so that caches can be
354  * turned off, flushed and coherency disabled. There is no guarantee that caches
355  * will remain turned on across calls to this function as each affinity level is
356  * dealt with. So do not write & read global variables across calls. It will be
357  * wise to do flush a write to the global to prevent unpredictable results.
358  ******************************************************************************/
359 static void plat_affinst_suspend(unsigned long sec_entrypoint,
360 			  unsigned int afflvl,
361 			  unsigned int state)
362 {
363 	unsigned long mpidr = read_mpidr_el1();
364 	unsigned long cluster_id;
365 	unsigned long cpu_id;
366 	uintptr_t rv;
367 
368 	/* Determine if any platform actions need to be executed. */
369 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
370 		return;
371 
372 	cpu_id = mpidr & MPIDR_CPU_MASK;
373 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
374 
375 	if (cluster_id)
376 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
377 	else
378 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
379 
380 	mmio_write_32(rv, sec_entrypoint);
381 
382 	if (afflvl < MPIDR_AFFLVL2)
383 		spm_mcdi_prepare_for_off_state(mpidr, afflvl);
384 
385 	if (afflvl >= MPIDR_AFFLVL0)
386 		mt_platform_save_context(mpidr);
387 
388 	/* Perform the common cluster specific operations */
389 	if (afflvl >= MPIDR_AFFLVL1) {
390 		/* Disable coherency if this cluster is to be turned off */
391 		plat_cci_disable();
392 	}
393 
394 	if (afflvl >= MPIDR_AFFLVL2) {
395 		disable_scu(mpidr);
396 		generic_timer_backup();
397 		spm_system_suspend();
398 		/* Prevent interrupts from spuriously waking up this cpu */
399 		arm_gic_cpuif_deactivate();
400 	}
401 }
402 
403 /*******************************************************************************
404  * MTK_platform handler called when an affinity instance has just been powered
405  * on after being turned off earlier. The level and mpidr determine the affinity
406  * instance. The 'state' arg. allows the platform to decide whether the cluster
407  * was turned off prior to wakeup and do what's necessary to setup it up
408  * correctly.
409  ******************************************************************************/
410 static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
411 {
412 	unsigned long mpidr = read_mpidr_el1();
413 
414 	/* Determine if any platform actions need to be executed. */
415 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
416 		return;
417 
418 	/* Perform the common cluster specific operations */
419 	if (afflvl >= MPIDR_AFFLVL1) {
420 		/* Enable coherency if this cluster was off */
421 		plat_cci_enable();
422 		trace_power_flow(mpidr, CLUSTER_UP);
423 	}
424 
425 	/* Enable the gic cpu interface */
426 	arm_gic_cpuif_setup();
427 	arm_gic_pcpu_distif_setup();
428 	trace_power_flow(mpidr, CPU_UP);
429 }
430 
431 /*******************************************************************************
432  * MTK_platform handler called when an affinity instance has just been powered
433  * on after having been suspended earlier. The level and mpidr determine the
434  * affinity instance.
435  ******************************************************************************/
436 static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
437 {
438 	unsigned long mpidr = read_mpidr_el1();
439 
440 	/* Determine if any platform actions need to be executed. */
441 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
442 		return;
443 
444 	if (afflvl >= MPIDR_AFFLVL2) {
445 		/* Enable the gic cpu interface */
446 		arm_gic_setup();
447 		arm_gic_cpuif_setup();
448 		spm_system_suspend_finish();
449 		enable_scu(mpidr);
450 	}
451 
452 	/* Perform the common cluster specific operations */
453 	if (afflvl >= MPIDR_AFFLVL1) {
454 		/* Enable coherency if this cluster was off */
455 		plat_cci_enable();
456 	}
457 
458 	if (afflvl >= MPIDR_AFFLVL0)
459 		mt_platform_restore_context(mpidr);
460 
461 	if (afflvl < MPIDR_AFFLVL2)
462 		spm_mcdi_finish_for_on_state(mpidr, afflvl);
463 
464 	arm_gic_pcpu_distif_setup();
465 }
466 
467 static unsigned int plat_get_sys_suspend_power_state(void)
468 {
469 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
470 	return psci_make_powerstate(0, 1, 2);
471 }
472 
473 /*******************************************************************************
474  * MTK handlers to shutdown/reboot the system
475  ******************************************************************************/
476 static void __dead2 plat_system_off(void)
477 {
478 	INFO("MTK System Off\n");
479 
480 	rtc_bbpu_power_down();
481 
482 	wfi();
483 	ERROR("MTK System Off: operation not handled.\n");
484 	panic();
485 }
486 
487 static void __dead2 plat_system_reset(void)
488 {
489 	/* Write the System Configuration Control Register */
490 	INFO("MTK System Reset\n");
491 
492 	mmio_clrsetbits_32(MTK_WDT_BASE,
493 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
494 		MTK_WDT_MODE_KEY);
495 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
496 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
497 
498 	wfi();
499 	ERROR("MTK System Reset: operation not handled.\n");
500 	panic();
501 }
502 
503 /*******************************************************************************
504  * Export the platform handlers to enable psci to invoke them
505  ******************************************************************************/
506 static const plat_pm_ops_t plat_plat_pm_ops = {
507 	.affinst_standby		= plat_affinst_standby,
508 	.affinst_on			= plat_affinst_on,
509 	.affinst_off			= plat_affinst_off,
510 	.affinst_suspend		= plat_affinst_suspend,
511 	.affinst_on_finish		= plat_affinst_on_finish,
512 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
513 	.system_off			= plat_system_off,
514 	.system_reset			= plat_system_reset,
515 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
516 };
517 
518 /*******************************************************************************
519  * Export the platform specific power ops & initialize the mtk_platform power
520  * controller
521  ******************************************************************************/
522 int platform_setup_pm(const plat_pm_ops_t **plat_ops)
523 {
524 	*plat_ops = &plat_plat_pm_ops;
525 	return 0;
526 }
527