xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision 8336c94dc4c7b25d34bb6f3c5008720746407dad)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch.h>
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <context.h>
17 #include <cortex_a57.h>
18 #include <denver.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/psci/psci.h>
21 #include <plat/common/platform.h>
22 
23 #include <bpmp_ipc.h>
24 #include <mce.h>
25 #include <memctrl_v2.h>
26 #include <security_engine.h>
27 #include <smmu.h>
28 #include <t18x_ari.h>
29 #include <tegra186_private.h>
30 #include <tegra_private.h>
31 
32 extern void memcpy16(void *dest, const void *src, unsigned int length);
33 
34 /* state id mask */
35 #define TEGRA186_STATE_ID_MASK		0xFU
36 /* constants to get power state's wake time */
37 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
38 #define TEGRA186_WAKE_TIME_SHIFT	4U
39 /* default core wake mask for CPU_SUSPEND */
40 #define TEGRA186_CORE_WAKE_MASK		0x180cU
41 /* context size to save during system suspend */
42 #define TEGRA186_SE_CONTEXT_SIZE	3U
43 
44 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
45 static struct tegra_psci_percpu_data {
46 	uint32_t wake_time;
47 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
48 
49 int32_t tegra_soc_validate_power_state(uint32_t power_state,
50 					psci_power_state_t *req_state)
51 {
52 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
53 	uint32_t cpu = plat_my_core_pos();
54 	int32_t ret = PSCI_E_SUCCESS;
55 
56 	/* save the core wake time (in TSC ticks)*/
57 	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
58 			<< TEGRA186_WAKE_TIME_SHIFT;
59 
60 	/*
61 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
62 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
63 	 * is called with caches disabled. It is possible to read a stale value
64 	 * from DRAM in that function, because the L2 cache is not flushed
65 	 * unless the cluster is entering CC6/CC7.
66 	 */
67 	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
68 			sizeof(tegra_percpu_data[cpu]));
69 
70 	/* Sanity check the requested state id */
71 	switch (state_id) {
72 	case PSTATE_ID_CORE_IDLE:
73 	case PSTATE_ID_CORE_POWERDN:
74 
75 		/* Core powerdown request */
76 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
77 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
78 
79 		break;
80 
81 	default:
82 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
83 		ret = PSCI_E_INVALID_PARAMS;
84 		break;
85 	}
86 
87 	return ret;
88 }
89 
90 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
91 {
92 	(void)cpu_state;
93 	return PSCI_E_SUCCESS;
94 }
95 
96 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
97 {
98 	const plat_local_state_t *pwr_domain_state;
99 	uint8_t stateid_afflvl0, stateid_afflvl2;
100 	uint32_t cpu = plat_my_core_pos();
101 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
102 	mce_cstate_info_t cstate_info = { 0 };
103 	uint64_t mc_ctx_base;
104 	uint32_t val;
105 
106 	/* get the state ID */
107 	pwr_domain_state = target_state->pwr_domain_state;
108 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
109 		TEGRA186_STATE_ID_MASK;
110 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
111 		TEGRA186_STATE_ID_MASK;
112 
113 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
114 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
115 
116 		/* Enter CPU idle/powerdown */
117 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
118 			(uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
119 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
120 				tegra_percpu_data[cpu].wake_time, 0U);
121 
122 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
123 
124 		/* save SE registers */
125 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
126 				SE_MUTEX_WATCHDOG_NS_LIMIT);
127 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
128 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
129 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
130 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
131 
132 		/* save 'Secure Boot' Processor Feature Config Register */
133 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
134 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
135 
136 		/* save MC context to TZDRAM */
137 		mc_ctx_base = params_from_bl2->tzdram_base +
138 				tegra186_get_mc_ctx_offset();
139 		tegra_mc_save_context((uintptr_t)mc_ctx_base);
140 
141 		/* Prepare for system suspend */
142 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
143 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
144 		cstate_info.system_state_force = 1;
145 		cstate_info.update_wake_mask = 1;
146 		mce_update_cstate_info(&cstate_info);
147 
148 		/* Loop until system suspend is allowed */
149 		do {
150 			val = (uint32_t)mce_command_handler(
151 					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
152 					(uint64_t)TEGRA_ARI_CORE_C7,
153 					MCE_CORE_SLEEP_TIME_INFINITE,
154 					0U);
155 		} while (val == 0U);
156 
157 		/* Instruct the MCE to enter system suspend state */
158 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
159 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
160 
161 	} else {
162 		; /* do nothing */
163 	}
164 
165 	return PSCI_E_SUCCESS;
166 }
167 
168 /*******************************************************************************
169  * Helper function to check if this is the last ON CPU in the cluster
170  ******************************************************************************/
171 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
172 			uint32_t ncpu)
173 {
174 	plat_local_state_t target;
175 	bool last_on_cpu = true;
176 	uint32_t num_cpus = ncpu, pos = 0;
177 
178 	do {
179 		target = states[pos];
180 		if (target != PLAT_MAX_OFF_STATE) {
181 			last_on_cpu = false;
182 		}
183 		--num_cpus;
184 		pos++;
185 	} while (num_cpus != 0U);
186 
187 	return last_on_cpu;
188 }
189 
190 /*******************************************************************************
191  * Helper function to get target power state for the cluster
192  ******************************************************************************/
193 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
194 			uint32_t ncpu)
195 {
196 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
197 	uint32_t cpu = plat_my_core_pos();
198 	int32_t ret;
199 	plat_local_state_t target = states[core_pos];
200 	mce_cstate_info_t cstate_info = { 0 };
201 
202 	/* CPU suspend */
203 	if (target == PSTATE_ID_CORE_POWERDN) {
204 		/* Program default wake mask */
205 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
206 		cstate_info.update_wake_mask = 1;
207 		mce_update_cstate_info(&cstate_info);
208 
209 		/* Check if CCx state is allowed. */
210 		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
211 				(uint64_t)TEGRA_ARI_CORE_C7,
212 				tegra_percpu_data[cpu].wake_time,
213 				0U);
214 		if (ret == 0) {
215 			target = PSCI_LOCAL_STATE_RUN;
216 		}
217 	}
218 
219 	/* CPU off */
220 	if (target == PLAT_MAX_OFF_STATE) {
221 		/* Enable cluster powerdn from last CPU in the cluster */
222 		if (tegra_last_cpu_in_cluster(states, ncpu)) {
223 			/* Enable CC7 state and turn off wake mask */
224 			cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
225 			cstate_info.update_wake_mask = 1;
226 			mce_update_cstate_info(&cstate_info);
227 
228 			/* Check if CCx state is allowed. */
229 			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
230 						  (uint64_t)TEGRA_ARI_CORE_C7,
231 						  MCE_CORE_SLEEP_TIME_INFINITE,
232 						  0U);
233 			if (ret == 0) {
234 				target = PSCI_LOCAL_STATE_RUN;
235 			}
236 
237 		} else {
238 
239 			/* Turn off wake_mask */
240 			cstate_info.update_wake_mask = 1;
241 			mce_update_cstate_info(&cstate_info);
242 			target = PSCI_LOCAL_STATE_RUN;
243 		}
244 	}
245 
246 	return target;
247 }
248 
249 /*******************************************************************************
250  * Platform handler to calculate the proper target power level at the
251  * specified affinity level
252  ******************************************************************************/
253 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
254 					     const plat_local_state_t *states,
255 					     uint32_t ncpu)
256 {
257 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
258 	uint32_t cpu = plat_my_core_pos();
259 
260 	/* System Suspend */
261 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
262 	    (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
263 		target = PSTATE_ID_SOC_POWERDN;
264 	}
265 
266 	/* CPU off, CPU suspend */
267 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
268 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
269 	}
270 
271 	/* target cluster/system state */
272 	return target;
273 }
274 
275 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
276 {
277 	const plat_local_state_t *pwr_domain_state =
278 		target_state->pwr_domain_state;
279 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
280 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
281 		TEGRA186_STATE_ID_MASK;
282 	uint64_t val;
283 	uint64_t src_len_in_bytes = (uint64_t)(((uintptr_t)(&__BL31_END__) -
284 					(uintptr_t)BL31_BASE));
285 	int32_t ret;
286 
287 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
288 		val = params_from_bl2->tzdram_base +
289 		      tegra186_get_cpu_reset_handler_size();
290 
291 		/* Initialise communication channel with BPMP */
292 		assert(tegra_bpmp_ipc_init() == 0);
293 
294 		/* Enable SE clock */
295 		ret = tegra_bpmp_ipc_enable_clock(TEGRA186_CLK_SE);
296 		if (ret != 0) {
297 			ERROR("Failed to enable clock\n");
298 			return ret;
299 		}
300 
301 		/*
302 		 * Generate/save SHA256 of ATF during SC7 entry
303 		 */
304 		if (tegra_se_save_sha256_hash(BL31_BASE,
305 					(uint32_t)src_len_in_bytes) != 0) {
306 			ERROR("Hash calculation failed. Reboot\n");
307 			(void)tegra_soc_prepare_system_reset();
308 		}
309 
310 		/*
311 		 * The TZRAM loses power when we enter system suspend. To
312 		 * allow graceful exit from system suspend, we need to copy
313 		 * BL3-1 over to TZDRAM.
314 		 */
315 		val = params_from_bl2->tzdram_base +
316 			tegra186_get_cpu_reset_handler_size();
317 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
318 			 (uintptr_t)BL31_END - (uintptr_t)BL31_BASE);
319 
320 		ret = tegra_bpmp_ipc_disable_clock(TEGRA186_CLK_SE);
321 		if (ret != 0) {
322 			ERROR("Failed to disable clock\n");
323 			return ret;
324 		}
325 	}
326 
327 	return PSCI_E_SUCCESS;
328 }
329 
330 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
331 {
332 	return PSCI_E_NOT_SUPPORTED;
333 }
334 
335 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
336 {
337 	int32_t ret = PSCI_E_SUCCESS;
338 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
339 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
340 			MPIDR_AFFINITY_BITS;
341 
342 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
343 
344 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
345 		ret = PSCI_E_NOT_PRESENT;
346 
347 	} else {
348 		/* construct the target CPU # */
349 		target_cpu |= (target_cluster << 2);
350 
351 		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
352 	}
353 
354 	return ret;
355 }
356 
357 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
358 {
359 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
360 	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
361 	mce_cstate_info_t cstate_info = { 0 };
362 	uint64_t impl, val;
363 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
364 
365 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
366 
367 	/*
368 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
369 	 * A02p and beyond).
370 	 */
371 	if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
372 
373 		val = read_l2ctlr_el1();
374 		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
375 		write_l2ctlr_el1(val);
376 	}
377 
378 	/*
379 	 * Reset power state info for CPUs when onlining, we set
380 	 * deepest power when offlining a core but that may not be
381 	 * requested by non-secure sw which controls idle states. It
382 	 * will re-init this info from non-secure software when the
383 	 * core come online.
384 	 */
385 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
386 
387 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
388 		cstate_info.update_wake_mask = 1;
389 		mce_update_cstate_info(&cstate_info);
390 	}
391 
392 	/*
393 	 * Check if we are exiting from deep sleep and restore SE
394 	 * context if we are.
395 	 */
396 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
397 
398 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
399 			se_regs[0]);
400 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
401 			se_regs[1]);
402 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
403 			se_regs[2]);
404 
405 		/* Init SMMU */
406 		tegra_smmu_init();
407 
408 		/*
409 		 * Reset power state info for the last core doing SC7
410 		 * entry and exit, we set deepest power state as CC7
411 		 * and SC7 for SC7 entry which may not be requested by
412 		 * non-secure SW which controls idle states.
413 		 */
414 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
415 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
416 		cstate_info.update_wake_mask = 1;
417 		mce_update_cstate_info(&cstate_info);
418 	}
419 
420 	return PSCI_E_SUCCESS;
421 }
422 
423 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
424 {
425 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
426 
427 	(void)target_state;
428 
429 	/* Disable Denver's DCO operations */
430 	if (impl == DENVER_IMPL) {
431 		denver_disable_dco();
432 	}
433 
434 	/* Turn off CPU */
435 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
436 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
437 
438 	return PSCI_E_SUCCESS;
439 }
440 
441 __dead2 void tegra_soc_prepare_system_off(void)
442 {
443 	/* power off the entire system */
444 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
445 
446 	wfi();
447 
448 	/* wait for the system to power down */
449 	for (;;) {
450 		;
451 	}
452 }
453 
454 int32_t tegra_soc_prepare_system_reset(void)
455 {
456 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
457 
458 	return PSCI_E_SUCCESS;
459 }
460