xref: /rk3399_ARM-atf/plat/mediatek/mt8173/plat_pm.c (revision c3cf06f1a3a9b9ee8ac7a0ae505f95c45f7dca84)
1 /*
2  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bakery_lock.h>
10 #include <cci.h>
11 #include <console.h>
12 #include <debug.h>
13 #include <errno.h>
14 #include <gicv2.h>
15 #include <mcucfg.h>
16 #include <mmio.h>
17 #include <mt8173_def.h>
18 #include <mt_cpuxgpt.h> /* generic_timer_backup() */
19 #include <plat_arm.h>
20 #include <plat_private.h>
21 #include <power_tracer.h>
22 #include <psci.h>
23 #include <rtc.h>
24 #include <scu.h>
25 #include <spm_hotplug.h>
26 #include <spm_mcdi.h>
27 #include <spm_suspend.h>
28 
29 #define MTK_PWR_LVL0	0
30 #define MTK_PWR_LVL1	1
31 #define MTK_PWR_LVL2	2
32 
33 /* Macros to read the MTK power domain state */
34 #define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
35 #define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
36 #define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
37 			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
38 
39 #if PSCI_EXTENDED_STATE_ID
40 /*
41  *  The table storing the valid idle power states. Ensure that the
42  *  array entries are populated in ascending order of state-id to
43  *  enable us to use binary search during power state validation.
44  *  The table must be terminated by a NULL entry.
45  */
46 const unsigned int mtk_pm_idle_states[] = {
47 	/* State-id - 0x001 */
48 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
49 		MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
50 	/* State-id - 0x002 */
51 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
52 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
53 	/* State-id - 0x022 */
54 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
55 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
56 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
57 	/* State-id - 0x222 */
58 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
59 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
60 #endif
61 	0,
62 };
63 #endif
64 
65 struct core_context {
66 	unsigned long timer_data[8];
67 	unsigned int count;
68 	unsigned int rst;
69 	unsigned int abt;
70 	unsigned int brk;
71 };
72 
73 struct cluster_context {
74 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
75 };
76 
77 /*
78  * Top level structure to hold the complete context of a multi cluster system
79  */
80 struct system_context {
81 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
82 };
83 
84 /*
85  * Top level structure which encapsulates the context of the entire system
86  */
87 static struct system_context dormant_data[1];
88 
89 static inline struct cluster_context *system_cluster(
90 						struct system_context *system,
91 						uint32_t clusterid)
92 {
93 	return &system->cluster[clusterid];
94 }
95 
96 static inline struct core_context *cluster_core(struct cluster_context *cluster,
97 						uint32_t cpuid)
98 {
99 	return &cluster->core[cpuid];
100 }
101 
102 static struct cluster_context *get_cluster_data(unsigned long mpidr)
103 {
104 	uint32_t clusterid;
105 
106 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
107 
108 	return system_cluster(dormant_data, clusterid);
109 }
110 
111 static struct core_context *get_core_data(unsigned long mpidr)
112 {
113 	struct cluster_context *cluster;
114 	uint32_t cpuid;
115 
116 	cluster = get_cluster_data(mpidr);
117 	cpuid = mpidr & MPIDR_CPU_MASK;
118 
119 	return cluster_core(cluster, cpuid);
120 }
121 
122 static void mt_save_generic_timer(unsigned long *container)
123 {
124 	uint64_t ctl;
125 	uint64_t val;
126 
127 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
128 			 "mrs	%x1, cntp_cval_el0\n\t"
129 			 "stp	%x0, %x1, [%2, #0]"
130 			 : "=&r" (ctl), "=&r" (val)
131 			 : "r" (container)
132 			 : "memory");
133 
134 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
135 			 "mrs	%x1, cntp_ctl_el0\n\t"
136 			 "stp	%x0, %x1, [%2, #16]"
137 			 : "=&r" (val), "=&r" (ctl)
138 			 : "r" (container)
139 			 : "memory");
140 
141 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
142 			 "mrs	%x1, cntv_ctl_el0\n\t"
143 			 "stp	%x0, %x1, [%2, #32]"
144 			 : "=&r" (val), "=&r" (ctl)
145 			 : "r" (container)
146 			 : "memory");
147 }
148 
149 static void mt_restore_generic_timer(unsigned long *container)
150 {
151 	uint64_t ctl;
152 	uint64_t val;
153 
154 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
155 			 "msr	cntkctl_el1, %x0\n\t"
156 			 "msr	cntp_cval_el0, %x1"
157 			 : "=&r" (ctl), "=&r" (val)
158 			 : "r" (container)
159 			 : "memory");
160 
161 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
162 			 "msr	cntp_tval_el0, %x0\n\t"
163 			 "msr	cntp_ctl_el0, %x1"
164 			 : "=&r" (val), "=&r" (ctl)
165 			 : "r" (container)
166 			 : "memory");
167 
168 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
169 			 "msr	cntv_tval_el0, %x0\n\t"
170 			 "msr	cntv_ctl_el0, %x1"
171 			 : "=&r" (val), "=&r" (ctl)
172 			 : "r" (container)
173 			 : "memory");
174 }
175 
176 static inline uint64_t read_cntpctl(void)
177 {
178 	uint64_t cntpctl;
179 
180 	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
181 			 : "=r" (cntpctl) : : "memory");
182 
183 	return cntpctl;
184 }
185 
186 static inline void write_cntpctl(uint64_t cntpctl)
187 {
188 	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
189 }
190 
191 static void stop_generic_timer(void)
192 {
193 	/*
194 	 * Disable the timer and mask the irq to prevent
195 	 * suprious interrupts on this cpu interface. It
196 	 * will bite us when we come back if we don't. It
197 	 * will be replayed on the inbound cluster.
198 	 */
199 	uint64_t cntpctl = read_cntpctl();
200 
201 	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
202 }
203 
204 static void mt_cpu_save(unsigned long mpidr)
205 {
206 	struct core_context *core;
207 
208 	core = get_core_data(mpidr);
209 	mt_save_generic_timer(core->timer_data);
210 
211 	/* disable timer irq, and upper layer should enable it again. */
212 	stop_generic_timer();
213 }
214 
215 static void mt_cpu_restore(unsigned long mpidr)
216 {
217 	struct core_context *core;
218 
219 	core = get_core_data(mpidr);
220 	mt_restore_generic_timer(core->timer_data);
221 }
222 
223 static void mt_platform_save_context(unsigned long mpidr)
224 {
225 	/* mcusys_save_context: */
226 	mt_cpu_save(mpidr);
227 }
228 
229 static void mt_platform_restore_context(unsigned long mpidr)
230 {
231 	/* mcusys_restore_context: */
232 	mt_cpu_restore(mpidr);
233 }
234 
235 static void plat_cpu_standby(plat_local_state_t cpu_state)
236 {
237 	unsigned int scr;
238 
239 	scr = read_scr_el3();
240 	write_scr_el3(scr | SCR_IRQ_BIT);
241 	isb();
242 	dsb();
243 	wfi();
244 	write_scr_el3(scr);
245 }
246 
247 /*******************************************************************************
248  * MTK_platform handler called when an affinity instance is about to be turned
249  * on. The level and mpidr determine the affinity instance.
250  ******************************************************************************/
251 static uintptr_t secure_entrypoint;
252 
253 static int plat_power_domain_on(unsigned long mpidr)
254 {
255 	int rc = PSCI_E_SUCCESS;
256 	unsigned long cpu_id;
257 	unsigned long cluster_id;
258 	uintptr_t rv;
259 
260 	cpu_id = mpidr & MPIDR_CPU_MASK;
261 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
262 
263 	if (cluster_id)
264 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
265 	else
266 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
267 
268 	mmio_write_32(rv, secure_entrypoint);
269 	INFO("mt_on[%ld:%ld], entry %x\n",
270 		cluster_id, cpu_id, mmio_read_32(rv));
271 
272 	spm_hotplug_on(mpidr);
273 	return rc;
274 }
275 
276 /*******************************************************************************
277  * MTK_platform handler called when an affinity instance is about to be turned
278  * off. The level and mpidr determine the affinity instance. The 'state' arg.
279  * allows the platform to decide whether the cluster is being turned off and
280  * take apt actions.
281  *
282  * CAUTION: This function is called with coherent stacks so that caches can be
283  * turned off, flushed and coherency disabled. There is no guarantee that caches
284  * will remain turned on across calls to this function as each affinity level is
285  * dealt with. So do not write & read global variables across calls. It will be
286  * wise to do flush a write to the global to prevent unpredictable results.
287  ******************************************************************************/
288 static void plat_power_domain_off(const psci_power_state_t *state)
289 {
290 	unsigned long mpidr = read_mpidr_el1();
291 
292 	/* Prevent interrupts from spuriously waking up this cpu */
293 	gicv2_cpuif_disable();
294 
295 	spm_hotplug_off(mpidr);
296 
297 	trace_power_flow(mpidr, CPU_DOWN);
298 
299 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
300 		/* Disable coherency if this cluster is to be turned off */
301 		plat_cci_disable();
302 
303 		trace_power_flow(mpidr, CLUSTER_DOWN);
304 	}
305 }
306 
307 /*******************************************************************************
308  * MTK_platform handler called when an affinity instance is about to be
309  * suspended. The level and mpidr determine the affinity instance. The 'state'
310  * arg. allows the platform to decide whether the cluster is being turned off
311  * and take apt actions.
312  *
313  * CAUTION: This function is called with coherent stacks so that caches can be
314  * turned off, flushed and coherency disabled. There is no guarantee that caches
315  * will remain turned on across calls to this function as each affinity level is
316  * dealt with. So do not write & read global variables across calls. It will be
317  * wise to do flush a write to the global to prevent unpredictable results.
318  ******************************************************************************/
319 static void plat_power_domain_suspend(const psci_power_state_t *state)
320 {
321 	unsigned long mpidr = read_mpidr_el1();
322 	unsigned long cluster_id;
323 	unsigned long cpu_id;
324 	uintptr_t rv;
325 
326 	cpu_id = mpidr & MPIDR_CPU_MASK;
327 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
328 
329 	if (cluster_id)
330 		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
331 	else
332 		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
333 
334 	mmio_write_32(rv, secure_entrypoint);
335 
336 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
337 		spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
338 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
339 			spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
340 	}
341 
342 	mt_platform_save_context(mpidr);
343 
344 	/* Perform the common cluster specific operations */
345 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
346 		/* Disable coherency if this cluster is to be turned off */
347 		plat_cci_disable();
348 	}
349 
350 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
351 		disable_scu(mpidr);
352 		generic_timer_backup();
353 		spm_system_suspend();
354 		/* Prevent interrupts from spuriously waking up this cpu */
355 		gicv2_cpuif_disable();
356 	}
357 }
358 
359 /*******************************************************************************
360  * MTK_platform handler called when an affinity instance has just been powered
361  * on after being turned off earlier. The level and mpidr determine the affinity
362  * instance. The 'state' arg. allows the platform to decide whether the cluster
363  * was turned off prior to wakeup and do what's necessary to setup it up
364  * correctly.
365  ******************************************************************************/
366 void mtk_system_pwr_domain_resume(void);
367 
368 static void plat_power_domain_on_finish(const psci_power_state_t *state)
369 {
370 	unsigned long mpidr = read_mpidr_el1();
371 
372 	assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
373 
374 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
375 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
376 		mtk_system_pwr_domain_resume();
377 
378 	if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
379 		plat_cci_enable();
380 		trace_power_flow(mpidr, CLUSTER_UP);
381 	}
382 
383 	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
384 		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
385 		return;
386 
387 	/* Enable the gic cpu interface */
388 	gicv2_cpuif_enable();
389 	gicv2_pcpu_distif_init();
390 	trace_power_flow(mpidr, CPU_UP);
391 }
392 
393 /*******************************************************************************
394  * MTK_platform handler called when an affinity instance has just been powered
395  * on after having been suspended earlier. The level and mpidr determine the
396  * affinity instance.
397  ******************************************************************************/
398 static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
399 {
400 	unsigned long mpidr = read_mpidr_el1();
401 
402 	if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
403 		return;
404 
405 	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
406 		/* Enable the gic cpu interface */
407 		plat_arm_gic_init();
408 		spm_system_suspend_finish();
409 		enable_scu(mpidr);
410 	}
411 
412 	/* Perform the common cluster specific operations */
413 	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
414 		/* Enable coherency if this cluster was off */
415 		plat_cci_enable();
416 	}
417 
418 	mt_platform_restore_context(mpidr);
419 
420 	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
421 		spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
422 		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
423 			spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
424 	}
425 
426 	gicv2_pcpu_distif_init();
427 }
428 
429 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
430 {
431 	assert(PLAT_MAX_PWR_LVL >= 2);
432 
433 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
434 		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
435 }
436 
437 /*******************************************************************************
438  * MTK handlers to shutdown/reboot the system
439  ******************************************************************************/
440 static void __dead2 plat_system_off(void)
441 {
442 	INFO("MTK System Off\n");
443 
444 	rtc_bbpu_power_down();
445 
446 	wfi();
447 	ERROR("MTK System Off: operation not handled.\n");
448 	panic();
449 }
450 
451 static void __dead2 plat_system_reset(void)
452 {
453 	/* Write the System Configuration Control Register */
454 	INFO("MTK System Reset\n");
455 
456 	mmio_clrsetbits_32(MTK_WDT_BASE,
457 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
458 		MTK_WDT_MODE_KEY);
459 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
460 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
461 
462 	wfi();
463 	ERROR("MTK System Reset: operation not handled.\n");
464 	panic();
465 }
466 
467 #if !PSCI_EXTENDED_STATE_ID
468 static int plat_validate_power_state(unsigned int power_state,
469 					psci_power_state_t *req_state)
470 {
471 	int pstate = psci_get_pstate_type(power_state);
472 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
473 	int i;
474 
475 	assert(req_state);
476 
477 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
478 		return PSCI_E_INVALID_PARAMS;
479 
480 	/* Sanity check the requested state */
481 	if (pstate == PSTATE_TYPE_STANDBY) {
482 		/*
483 		 * It's possible to enter standby only on power level 0
484 		 * Ignore any other power level.
485 		 */
486 		if (pwr_lvl != 0)
487 			return PSCI_E_INVALID_PARAMS;
488 
489 		req_state->pwr_domain_state[MTK_PWR_LVL0] =
490 					MTK_LOCAL_STATE_RET;
491 	} else {
492 		for (i = 0; i <= pwr_lvl; i++)
493 			req_state->pwr_domain_state[i] =
494 					MTK_LOCAL_STATE_OFF;
495 	}
496 
497 	/*
498 	 * We expect the 'state id' to be zero.
499 	 */
500 	if (psci_get_pstate_id(power_state))
501 		return PSCI_E_INVALID_PARAMS;
502 
503 	return PSCI_E_SUCCESS;
504 }
505 #else
506 int plat_validate_power_state(unsigned int power_state,
507 				psci_power_state_t *req_state)
508 {
509 	unsigned int state_id;
510 	int i;
511 
512 	assert(req_state);
513 
514 	/*
515 	 *  Currently we are using a linear search for finding the matching
516 	 *  entry in the idle power state array. This can be made a binary
517 	 *  search if the number of entries justify the additional complexity.
518 	 */
519 	for (i = 0; !!mtk_pm_idle_states[i]; i++) {
520 		if (power_state == mtk_pm_idle_states[i])
521 			break;
522 	}
523 
524 	/* Return error if entry not found in the idle state array */
525 	if (!mtk_pm_idle_states[i])
526 		return PSCI_E_INVALID_PARAMS;
527 
528 	i = 0;
529 	state_id = psci_get_pstate_id(power_state);
530 
531 	/* Parse the State ID and populate the state info parameter */
532 	while (state_id) {
533 		req_state->pwr_domain_state[i++] = state_id &
534 						MTK_LOCAL_PSTATE_MASK;
535 		state_id >>= MTK_LOCAL_PSTATE_WIDTH;
536 	}
537 
538 	return PSCI_E_SUCCESS;
539 }
540 #endif
541 
542 void mtk_system_pwr_domain_resume(void)
543 {
544 	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
545 
546 	/* Assert system power domain is available on the platform */
547 	assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
548 
549 	plat_arm_gic_init();
550 }
551 
552 static const plat_psci_ops_t plat_plat_pm_ops = {
553 	.cpu_standby			= plat_cpu_standby,
554 	.pwr_domain_on			= plat_power_domain_on,
555 	.pwr_domain_on_finish		= plat_power_domain_on_finish,
556 	.pwr_domain_off			= plat_power_domain_off,
557 	.pwr_domain_suspend		= plat_power_domain_suspend,
558 	.pwr_domain_suspend_finish	= plat_power_domain_suspend_finish,
559 	.system_off			= plat_system_off,
560 	.system_reset			= plat_system_reset,
561 	.validate_power_state		= plat_validate_power_state,
562 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
563 };
564 
565 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
566 			const plat_psci_ops_t **psci_ops)
567 {
568 	*psci_ops = &plat_plat_pm_ops;
569 	secure_entrypoint = sec_entrypoint;
570 	return 0;
571 }
572 
573 /*
574  * The PSCI generic code uses this API to let the platform participate in state
575  * coordination during a power management operation. It compares the platform
576  * specific local power states requested by each cpu for a given power domain
577  * and returns the coordinated target power state that the domain should
578  * enter. A platform assigns a number to a local power state. This default
579  * implementation assumes that the platform assigns these numbers in order of
580  * increasing depth of the power state i.e. for two power states X & Y, if X < Y
581  * then X represents a shallower power state than Y. As a result, the
582  * coordinated target local power state for a power domain will be the minimum
583  * of the requested local power states.
584  */
585 plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
586 					     const plat_local_state_t *states,
587 					     unsigned int ncpu)
588 {
589 	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
590 
591 	assert(ncpu);
592 
593 	do {
594 		temp = *states++;
595 		if (temp < target)
596 			target = temp;
597 	} while (--ncpu);
598 
599 	return target;
600 }
601