xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision 889c07c7b1a9739bcc907ad1f988fa484d22f84c)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <cortex_a57.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <plat/common/platform.h>
20 
21 #include <mce.h>
22 #include <smmu.h>
23 #include <stdbool.h>
24 #include <t18x_ari.h>
25 #include <tegra186_private.h>
26 #include <tegra_private.h>
27 
28 extern void memcpy16(void *dest, const void *src, unsigned int length);
29 
30 /* state id mask */
31 #define TEGRA186_STATE_ID_MASK		0xFU
32 /* constants to get power state's wake time */
33 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
34 #define TEGRA186_WAKE_TIME_SHIFT	4U
35 /* default core wake mask for CPU_SUSPEND */
36 #define TEGRA186_CORE_WAKE_MASK		0x180cU
37 /* context size to save during system suspend */
38 #define TEGRA186_SE_CONTEXT_SIZE	3U
39 
40 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
41 static struct tegra_psci_percpu_data {
42 	uint32_t wake_time;
43 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
44 
45 int32_t tegra_soc_validate_power_state(uint32_t power_state,
46 					psci_power_state_t *req_state)
47 {
48 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
49 	uint32_t cpu = plat_my_core_pos();
50 	int32_t ret = PSCI_E_SUCCESS;
51 
52 	/* save the core wake time (in TSC ticks)*/
53 	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
54 			<< TEGRA186_WAKE_TIME_SHIFT;
55 
56 	/*
57 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
58 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
59 	 * is called with caches disabled. It is possible to read a stale value
60 	 * from DRAM in that function, because the L2 cache is not flushed
61 	 * unless the cluster is entering CC6/CC7.
62 	 */
63 	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
64 			sizeof(tegra_percpu_data[cpu]));
65 
66 	/* Sanity check the requested state id */
67 	switch (state_id) {
68 	case PSTATE_ID_CORE_IDLE:
69 	case PSTATE_ID_CORE_POWERDN:
70 
71 		/* Core powerdown request */
72 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
73 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
74 
75 		break;
76 
77 	default:
78 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
79 		ret = PSCI_E_INVALID_PARAMS;
80 		break;
81 	}
82 
83 	return ret;
84 }
85 
86 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
87 {
88 	const plat_local_state_t *pwr_domain_state;
89 	uint8_t stateid_afflvl0, stateid_afflvl2;
90 	uint32_t cpu = plat_my_core_pos();
91 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
92 	mce_cstate_info_t cstate_info = { 0 };
93 	uint64_t smmu_ctx_base;
94 	uint32_t val;
95 
96 	/* get the state ID */
97 	pwr_domain_state = target_state->pwr_domain_state;
98 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
99 		TEGRA186_STATE_ID_MASK;
100 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
101 		TEGRA186_STATE_ID_MASK;
102 
103 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
104 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
105 
106 		/* Enter CPU idle/powerdown */
107 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
108 			(uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
109 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
110 				tegra_percpu_data[cpu].wake_time, 0U);
111 
112 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
113 
114 		/* save SE registers */
115 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
116 				SE_MUTEX_WATCHDOG_NS_LIMIT);
117 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
118 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
119 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
120 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
121 
122 		/* save 'Secure Boot' Processor Feature Config Register */
123 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
124 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
125 
126 		/* save SMMU context to TZDRAM */
127 		smmu_ctx_base = params_from_bl2->tzdram_base +
128 				tegra186_get_smmu_ctx_offset();
129 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
130 
131 		/* Prepare for system suspend */
132 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
133 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
134 		cstate_info.system_state_force = 1;
135 		cstate_info.update_wake_mask = 1;
136 		mce_update_cstate_info(&cstate_info);
137 		/* Loop until system suspend is allowed */
138 		do {
139 			val = (uint32_t)mce_command_handler(
140 					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
141 					(uint64_t)TEGRA_ARI_CORE_C7,
142 					MCE_CORE_SLEEP_TIME_INFINITE,
143 					0U);
144 		} while (val == 0U);
145 
146 		/* Instruct the MCE to enter system suspend state */
147 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
148 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
149 	} else {
150 		; /* do nothing */
151 	}
152 
153 	return PSCI_E_SUCCESS;
154 }
155 
156 /*******************************************************************************
157  * Helper function to check if this is the last ON CPU in the cluster
158  ******************************************************************************/
159 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
160 			uint32_t ncpu)
161 {
162 	plat_local_state_t target;
163 	bool last_on_cpu = true;
164 	uint32_t num_cpus = ncpu, pos = 0;
165 
166 	do {
167 		target = states[pos];
168 		if (target != PLAT_MAX_OFF_STATE) {
169 			last_on_cpu = false;
170 		}
171 		--num_cpus;
172 		pos++;
173 	} while (num_cpus != 0U);
174 
175 	return last_on_cpu;
176 }
177 
178 /*******************************************************************************
179  * Helper function to get target power state for the cluster
180  ******************************************************************************/
181 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
182 			uint32_t ncpu)
183 {
184 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
185 	uint32_t cpu = plat_my_core_pos();
186 	int32_t ret;
187 	plat_local_state_t target = states[core_pos];
188 	mce_cstate_info_t cstate_info = { 0 };
189 
190 	/* CPU suspend */
191 	if (target == PSTATE_ID_CORE_POWERDN) {
192 		/* Program default wake mask */
193 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
194 		cstate_info.update_wake_mask = 1;
195 		mce_update_cstate_info(&cstate_info);
196 
197 		/* Check if CCx state is allowed. */
198 		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
199 				(uint64_t)TEGRA_ARI_CORE_C7,
200 				tegra_percpu_data[cpu].wake_time,
201 				0U);
202 		if (ret == 0) {
203 			target = PSCI_LOCAL_STATE_RUN;
204 		}
205 	}
206 
207 	/* CPU off */
208 	if (target == PLAT_MAX_OFF_STATE) {
209 		/* Enable cluster powerdn from last CPU in the cluster */
210 		if (tegra_last_cpu_in_cluster(states, ncpu)) {
211 			/* Enable CC7 state and turn off wake mask */
212 			cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
213 			cstate_info.update_wake_mask = 1;
214 			mce_update_cstate_info(&cstate_info);
215 
216 			/* Check if CCx state is allowed. */
217 			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
218 						  (uint64_t)TEGRA_ARI_CORE_C7,
219 						  MCE_CORE_SLEEP_TIME_INFINITE,
220 						  0U);
221 			if (ret == 0) {
222 				target = PSCI_LOCAL_STATE_RUN;
223 			}
224 
225 		} else {
226 
227 			/* Turn off wake_mask */
228 			cstate_info.update_wake_mask = 1;
229 			mce_update_cstate_info(&cstate_info);
230 			target = PSCI_LOCAL_STATE_RUN;
231 		}
232 	}
233 
234 	return target;
235 }
236 
237 /*******************************************************************************
238  * Platform handler to calculate the proper target power level at the
239  * specified affinity level
240  ******************************************************************************/
241 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
242 					     const plat_local_state_t *states,
243 					     uint32_t ncpu)
244 {
245 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
246 	uint32_t cpu = plat_my_core_pos();
247 
248 	/* System Suspend */
249 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
250 	    (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
251 		target = PSTATE_ID_SOC_POWERDN;
252 	}
253 
254 	/* CPU off, CPU suspend */
255 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
256 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
257 	}
258 
259 	/* target cluster/system state */
260 	return target;
261 }
262 
263 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
264 {
265 	const plat_local_state_t *pwr_domain_state =
266 		target_state->pwr_domain_state;
267 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
268 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
269 		TEGRA186_STATE_ID_MASK;
270 	uint64_t val;
271 
272 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
273 		/*
274 		 * The TZRAM loses power when we enter system suspend. To
275 		 * allow graceful exit from system suspend, we need to copy
276 		 * BL3-1 over to TZDRAM.
277 		 */
278 		val = params_from_bl2->tzdram_base +
279 			tegra186_get_cpu_reset_handler_size();
280 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
281 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
282 	}
283 
284 	return PSCI_E_SUCCESS;
285 }
286 
287 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
288 {
289 	int32_t ret = PSCI_E_SUCCESS;
290 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
291 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
292 			MPIDR_AFFINITY_BITS;
293 
294 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
295 
296 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
297 		ret = PSCI_E_NOT_PRESENT;
298 
299 	} else {
300 		/* construct the target CPU # */
301 		target_cpu |= (target_cluster << 2);
302 
303 		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
304 	}
305 
306 	return ret;
307 }
308 
309 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
310 {
311 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
312 	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
313 	mce_cstate_info_t cstate_info = { 0 };
314 	uint64_t impl, val;
315 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
316 
317 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
318 
319 	/*
320 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
321 	 * A02p and beyond).
322 	 */
323 	if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
324 
325 		val = read_l2ctlr_el1();
326 		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
327 		write_l2ctlr_el1(val);
328 	}
329 
330 	/*
331 	 * Reset power state info for CPUs when onlining, we set
332 	 * deepest power when offlining a core but that may not be
333 	 * requested by non-secure sw which controls idle states. It
334 	 * will re-init this info from non-secure software when the
335 	 * core come online.
336 	 */
337 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
338 
339 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
340 		cstate_info.update_wake_mask = 1;
341 		mce_update_cstate_info(&cstate_info);
342 	}
343 
344 	/*
345 	 * Check if we are exiting from deep sleep and restore SE
346 	 * context if we are.
347 	 */
348 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
349 
350 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
351 			se_regs[0]);
352 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
353 			se_regs[1]);
354 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
355 			se_regs[2]);
356 
357 		/* Init SMMU */
358 		tegra_smmu_init();
359 
360 		/*
361 		 * Reset power state info for the last core doing SC7
362 		 * entry and exit, we set deepest power state as CC7
363 		 * and SC7 for SC7 entry which may not be requested by
364 		 * non-secure SW which controls idle states.
365 		 */
366 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
367 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
368 		cstate_info.update_wake_mask = 1;
369 		mce_update_cstate_info(&cstate_info);
370 	}
371 
372 	return PSCI_E_SUCCESS;
373 }
374 
375 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
376 {
377 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
378 
379 	(void)target_state;
380 
381 	/* Disable Denver's DCO operations */
382 	if (impl == DENVER_IMPL) {
383 		denver_disable_dco();
384 	}
385 
386 	/* Turn off CPU */
387 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
388 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
389 
390 	return PSCI_E_SUCCESS;
391 }
392 
393 __dead2 void tegra_soc_prepare_system_off(void)
394 {
395 	/* power off the entire system */
396 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
397 
398 	wfi();
399 
400 	/* wait for the system to power down */
401 	for (;;) {
402 		;
403 	}
404 }
405 
406 int32_t tegra_soc_prepare_system_reset(void)
407 {
408 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
409 
410 	return PSCI_E_SUCCESS;
411 }
412