xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision b495791ba28ae36078e09d32877fca8e97088410)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <cortex_a57.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <plat/common/platform.h>
20 
21 #include <mce.h>
22 #include <smmu.h>
23 #include <t18x_ari.h>
24 #include <tegra_private.h>
25 
26 extern void memcpy16(void *dest, const void *src, unsigned int length);
27 
28 extern void prepare_cpu_pwr_dwn(void);
29 extern void tegra186_cpu_reset_handler(void);
30 extern uint32_t __tegra186_cpu_reset_handler_end,
31 		__tegra186_smmu_context;
32 
33 /* state id mask */
34 #define TEGRA186_STATE_ID_MASK		0xF
35 /* constants to get power state's wake time */
36 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0
37 #define TEGRA186_WAKE_TIME_SHIFT	4
38 /* default core wake mask for CPU_SUSPEND */
39 #define TEGRA186_CORE_WAKE_MASK		0x180c
40 /* context size to save during system suspend */
41 #define TEGRA186_SE_CONTEXT_SIZE	3
42 
43 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
44 static struct t18x_psci_percpu_data {
45 	unsigned int wake_time;
46 } __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
47 
48 /* System power down state */
49 uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
50 
51 int32_t tegra_soc_validate_power_state(unsigned int power_state,
52 					psci_power_state_t *req_state)
53 {
54 	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
55 	int cpu = plat_my_core_pos();
56 
57 	/* save the core wake time (in TSC ticks)*/
58 	percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
59 			<< TEGRA186_WAKE_TIME_SHIFT;
60 
61 	/*
62 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
63 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
64 	 * is called with caches disabled. It is possible to read a stale value
65 	 * from DRAM in that function, because the L2 cache is not flushed
66 	 * unless the cluster is entering CC6/CC7.
67 	 */
68 	clean_dcache_range((uint64_t)&percpu_data[cpu],
69 			sizeof(percpu_data[cpu]));
70 
71 	/* Sanity check the requested state id */
72 	switch (state_id) {
73 	case PSTATE_ID_CORE_IDLE:
74 	case PSTATE_ID_CORE_POWERDN:
75 
76 		/* Core powerdown request */
77 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
78 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
79 
80 		break;
81 
82 	default:
83 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
84 		return PSCI_E_INVALID_PARAMS;
85 	}
86 
87 	return PSCI_E_SUCCESS;
88 }
89 
90 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
91 {
92 	const plat_local_state_t *pwr_domain_state;
93 	unsigned int stateid_afflvl0, stateid_afflvl2;
94 	int cpu = plat_my_core_pos();
95 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
96 	mce_cstate_info_t cstate_info = { 0 };
97 	uint64_t smmu_ctx_base;
98 	uint32_t val;
99 
100 	/* get the state ID */
101 	pwr_domain_state = target_state->pwr_domain_state;
102 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
103 		TEGRA186_STATE_ID_MASK;
104 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
105 		TEGRA186_STATE_ID_MASK;
106 
107 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
108 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
109 
110 		/* Enter CPU idle/powerdown */
111 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
112 			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
113 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
114 				percpu_data[cpu].wake_time, 0);
115 
116 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
117 
118 		/* save SE registers */
119 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
120 				SE_MUTEX_WATCHDOG_NS_LIMIT);
121 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
122 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
123 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
124 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
125 
126 		/* save 'Secure Boot' Processor Feature Config Register */
127 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
128 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
129 
130 		/* save SMMU context to TZDRAM */
131 		smmu_ctx_base = params_from_bl2->tzdram_base +
132 			((uintptr_t)&__tegra186_smmu_context -
133 			 (uintptr_t)tegra186_cpu_reset_handler);
134 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
135 
136 		/* Prepare for system suspend */
137 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
138 		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
139 		cstate_info.system_state_force = 1;
140 		cstate_info.update_wake_mask = 1;
141 		mce_update_cstate_info(&cstate_info);
142 
143 		/* Loop until system suspend is allowed */
144 		do {
145 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
146 					TEGRA_ARI_CORE_C7,
147 					MCE_CORE_SLEEP_TIME_INFINITE,
148 					0);
149 		} while (val == 0);
150 
151 		/* Instruct the MCE to enter system suspend state */
152 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
153 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
154 	}
155 
156 	return PSCI_E_SUCCESS;
157 }
158 
159 /*******************************************************************************
160  * Platform handler to calculate the proper target power level at the
161  * specified affinity level
162  ******************************************************************************/
163 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
164 					     const plat_local_state_t *states,
165 					     unsigned int ncpu)
166 {
167 	plat_local_state_t target = *states;
168 	int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
169 	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
170 	mce_cstate_info_t cstate_info = { 0 };
171 
172 	/* get the power state at this level */
173 	if (lvl == MPIDR_AFFLVL1)
174 		target = *(states + core_pos);
175 	if (lvl == MPIDR_AFFLVL2)
176 		target = *(states + cpu);
177 
178 	/* CPU suspend */
179 	if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
180 
181 		/* Program default wake mask */
182 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
183 		cstate_info.update_wake_mask = 1;
184 		mce_update_cstate_info(&cstate_info);
185 
186 		/* Check if CCx state is allowed. */
187 		ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
188 				TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
189 				0);
190 		if (ret)
191 			return PSTATE_ID_CORE_POWERDN;
192 	}
193 
194 	/* CPU off */
195 	if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
196 
197 		/* find out the number of ON cpus in the cluster */
198 		do {
199 			target = *states++;
200 			if (target != PLAT_MAX_OFF_STATE)
201 				cluster_powerdn = 0;
202 		} while (--ncpu);
203 
204 		/* Enable cluster powerdn from last CPU in the cluster */
205 		if (cluster_powerdn) {
206 
207 			/* Enable CC7 state and turn off wake mask */
208 			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
209 			cstate_info.update_wake_mask = 1;
210 			mce_update_cstate_info(&cstate_info);
211 
212 			/* Check if CCx state is allowed. */
213 			ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
214 						  TEGRA_ARI_CORE_C7,
215 						  MCE_CORE_SLEEP_TIME_INFINITE,
216 						  0);
217 			if (ret)
218 				return PSTATE_ID_CORE_POWERDN;
219 
220 		} else {
221 
222 			/* Turn off wake_mask */
223 			cstate_info.update_wake_mask = 1;
224 			mce_update_cstate_info(&cstate_info);
225 		}
226 	}
227 
228 	/* System Suspend */
229 	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
230 	    (target == PSTATE_ID_SOC_POWERDN))
231 		return PSTATE_ID_SOC_POWERDN;
232 
233 	/* default state */
234 	return PSCI_LOCAL_STATE_RUN;
235 }
236 
237 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
238 {
239 	const plat_local_state_t *pwr_domain_state =
240 		target_state->pwr_domain_state;
241 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
242 	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
243 		TEGRA186_STATE_ID_MASK;
244 	uint64_t val;
245 
246 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
247 		/*
248 		 * The TZRAM loses power when we enter system suspend. To
249 		 * allow graceful exit from system suspend, we need to copy
250 		 * BL3-1 over to TZDRAM.
251 		 */
252 		val = params_from_bl2->tzdram_base +
253 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
254 			 (uintptr_t)tegra186_cpu_reset_handler);
255 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
256 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
257 	}
258 
259 	return PSCI_E_SUCCESS;
260 }
261 
262 int tegra_soc_pwr_domain_on(u_register_t mpidr)
263 {
264 	uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
265 	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
266 			MPIDR_AFFINITY_BITS;
267 
268 	if (target_cluster > MPIDR_AFFLVL1) {
269 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
270 		return PSCI_E_NOT_PRESENT;
271 	}
272 
273 	/* construct the target CPU # */
274 	target_cpu |= (target_cluster << 2);
275 
276 	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
277 
278 	return PSCI_E_SUCCESS;
279 }
280 
281 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
282 {
283 	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
284 	int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
285 	mce_cstate_info_t cstate_info = { 0 };
286 	uint64_t impl, val;
287 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
288 
289 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
290 
291 	/*
292 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
293 	 * A02p and beyond).
294 	 */
295 	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
296 	    (impl != (uint64_t)DENVER_IMPL)) {
297 
298 		val = read_l2ctlr_el1();
299 		val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
300 		write_l2ctlr_el1(val);
301 	}
302 
303 	/*
304 	 * Reset power state info for CPUs when onlining, we set
305 	 * deepest power when offlining a core but that may not be
306 	 * requested by non-secure sw which controls idle states. It
307 	 * will re-init this info from non-secure software when the
308 	 * core come online.
309 	 */
310 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
311 
312 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
313 		cstate_info.update_wake_mask = 1;
314 		mce_update_cstate_info(&cstate_info);
315 	}
316 
317 	/*
318 	 * Check if we are exiting from deep sleep and restore SE
319 	 * context if we are.
320 	 */
321 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
322 
323 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
324 			se_regs[0]);
325 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
326 			se_regs[1]);
327 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
328 			se_regs[2]);
329 
330 		/* Init SMMU */
331 		tegra_smmu_init();
332 
333 		/*
334 		 * Reset power state info for the last core doing SC7
335 		 * entry and exit, we set deepest power state as CC7
336 		 * and SC7 for SC7 entry which may not be requested by
337 		 * non-secure SW which controls idle states.
338 		 */
339 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
340 		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
341 		cstate_info.update_wake_mask = 1;
342 		mce_update_cstate_info(&cstate_info);
343 	}
344 
345 	return PSCI_E_SUCCESS;
346 }
347 
348 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
349 {
350 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
351 
352 	/* Disable Denver's DCO operations */
353 	if (impl == DENVER_IMPL)
354 		denver_disable_dco();
355 
356 	/* Turn off CPU */
357 	(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
358 			MCE_CORE_SLEEP_TIME_INFINITE, 0);
359 
360 	return PSCI_E_SUCCESS;
361 }
362 
363 __dead2 void tegra_soc_prepare_system_off(void)
364 {
365 	mce_cstate_info_t cstate_info = { 0 };
366 	uint32_t val;
367 
368 	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
369 
370 		/* power off the entire system */
371 		mce_enter_ccplex_state(tegra186_system_powerdn_state);
372 
373 	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
374 
375 		/* Prepare for quasi power down */
376 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
377 		cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
378 		cstate_info.system_state_force = 1;
379 		cstate_info.update_wake_mask = 1;
380 		mce_update_cstate_info(&cstate_info);
381 
382 		/* loop until other CPUs power down */
383 		do {
384 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
385 					TEGRA_ARI_CORE_C7,
386 					MCE_CORE_SLEEP_TIME_INFINITE,
387 					0);
388 		} while (val == 0);
389 
390 		/* Enter quasi power down state */
391 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
392 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
393 
394 		/* disable GICC */
395 		tegra_gic_cpuif_deactivate();
396 
397 		/* power down core */
398 		prepare_cpu_pwr_dwn();
399 
400 		/* flush L1/L2 data caches */
401 		dcsw_op_all(DCCISW);
402 
403 	} else {
404 		ERROR("%s: unsupported power down state (%d)\n", __func__,
405 			tegra186_system_powerdn_state);
406 	}
407 
408 	wfi();
409 
410 	/* wait for the system to power down */
411 	for (;;) {
412 		;
413 	}
414 }
415 
416 int tegra_soc_prepare_system_reset(void)
417 {
418 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
419 
420 	return PSCI_E_SUCCESS;
421 }
422