xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision b6d1757b82a3d8e120e7f8079a23a9e1f5290415)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <cortex_a57.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <plat/common/platform.h>
20 
21 #include <mce.h>
22 #include <smmu.h>
23 #include <stdbool.h>
24 #include <t18x_ari.h>
25 #include <tegra_private.h>
26 
27 extern void memcpy16(void *dest, const void *src, unsigned int length);
28 extern void tegra186_cpu_reset_handler(void);
29 extern uint64_t __tegra186_cpu_reset_handler_end,
30 		__tegra186_smmu_context;
31 
32 /* state id mask */
33 #define TEGRA186_STATE_ID_MASK		0xFU
34 /* constants to get power state's wake time */
35 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
36 #define TEGRA186_WAKE_TIME_SHIFT	4U
37 /* default core wake mask for CPU_SUSPEND */
38 #define TEGRA186_CORE_WAKE_MASK		0x180cU
39 /* context size to save during system suspend */
40 #define TEGRA186_SE_CONTEXT_SIZE	3U
41 
42 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
43 static struct tegra_psci_percpu_data {
44 	uint32_t wake_time;
45 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
46 
47 int32_t tegra_soc_validate_power_state(uint32_t power_state,
48 					psci_power_state_t *req_state)
49 {
50 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
51 	uint32_t cpu = plat_my_core_pos();
52 	int32_t ret = PSCI_E_SUCCESS;
53 
54 	/* save the core wake time (in TSC ticks)*/
55 	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
56 			<< TEGRA186_WAKE_TIME_SHIFT;
57 
58 	/*
59 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
60 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
61 	 * is called with caches disabled. It is possible to read a stale value
62 	 * from DRAM in that function, because the L2 cache is not flushed
63 	 * unless the cluster is entering CC6/CC7.
64 	 */
65 	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
66 			sizeof(tegra_percpu_data[cpu]));
67 
68 	/* Sanity check the requested state id */
69 	switch (state_id) {
70 	case PSTATE_ID_CORE_IDLE:
71 	case PSTATE_ID_CORE_POWERDN:
72 
73 		/* Core powerdown request */
74 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
75 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
76 
77 		break;
78 
79 	default:
80 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
81 		ret = PSCI_E_INVALID_PARAMS;
82 		break;
83 	}
84 
85 	return ret;
86 }
87 
88 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
89 {
90 	const plat_local_state_t *pwr_domain_state;
91 	uint8_t stateid_afflvl0, stateid_afflvl2;
92 	uint32_t cpu = plat_my_core_pos();
93 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
94 	mce_cstate_info_t cstate_info = { 0 };
95 	uint64_t smmu_ctx_base;
96 	uint32_t val;
97 
98 	/* get the state ID */
99 	pwr_domain_state = target_state->pwr_domain_state;
100 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
101 		TEGRA186_STATE_ID_MASK;
102 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
103 		TEGRA186_STATE_ID_MASK;
104 
105 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
106 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
107 
108 		/* Enter CPU idle/powerdown */
109 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
110 			(uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
111 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
112 				tegra_percpu_data[cpu].wake_time, 0U);
113 
114 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
115 
116 		/* save SE registers */
117 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
118 				SE_MUTEX_WATCHDOG_NS_LIMIT);
119 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
120 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
121 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
122 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
123 
124 		/* save 'Secure Boot' Processor Feature Config Register */
125 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
126 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
127 
128 		/* save SMMU context to TZDRAM */
129 		smmu_ctx_base = params_from_bl2->tzdram_base +
130 			((uintptr_t)&__tegra186_smmu_context -
131 			 (uintptr_t)&tegra186_cpu_reset_handler);
132 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
133 
134 		/* Prepare for system suspend */
135 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
136 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
137 		cstate_info.system_state_force = 1;
138 		cstate_info.update_wake_mask = 1;
139 		mce_update_cstate_info(&cstate_info);
140 		/* Loop until system suspend is allowed */
141 		do {
142 			val = (uint32_t)mce_command_handler(
143 					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
144 					(uint64_t)TEGRA_ARI_CORE_C7,
145 					MCE_CORE_SLEEP_TIME_INFINITE,
146 					0U);
147 		} while (val == 0U);
148 
149 		/* Instruct the MCE to enter system suspend state */
150 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
151 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
152 	} else {
153 		; /* do nothing */
154 	}
155 
156 	return PSCI_E_SUCCESS;
157 }
158 
159 /*******************************************************************************
160  * Helper function to check if this is the last ON CPU in the cluster
161  ******************************************************************************/
162 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
163 			uint32_t ncpu)
164 {
165 	plat_local_state_t target;
166 	bool last_on_cpu = true;
167 	uint32_t num_cpus = ncpu, pos = 0;
168 
169 	do {
170 		target = states[pos];
171 		if (target != PLAT_MAX_OFF_STATE) {
172 			last_on_cpu = false;
173 		}
174 		--num_cpus;
175 		pos++;
176 	} while (num_cpus != 0U);
177 
178 	return last_on_cpu;
179 }
180 
181 /*******************************************************************************
182  * Helper function to get target power state for the cluster
183  ******************************************************************************/
184 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
185 			uint32_t ncpu)
186 {
187 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
188 	uint32_t cpu = plat_my_core_pos();
189 	int32_t ret;
190 	plat_local_state_t target = states[core_pos];
191 	mce_cstate_info_t cstate_info = { 0 };
192 
193 	/* CPU suspend */
194 	if (target == PSTATE_ID_CORE_POWERDN) {
195 		/* Program default wake mask */
196 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
197 		cstate_info.update_wake_mask = 1;
198 		mce_update_cstate_info(&cstate_info);
199 
200 		/* Check if CCx state is allowed. */
201 		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
202 				(uint64_t)TEGRA_ARI_CORE_C7,
203 				tegra_percpu_data[cpu].wake_time,
204 				0U);
205 		if (ret == 0) {
206 			target = PSCI_LOCAL_STATE_RUN;
207 		}
208 	}
209 
210 	/* CPU off */
211 	if (target == PLAT_MAX_OFF_STATE) {
212 		/* Enable cluster powerdn from last CPU in the cluster */
213 		if (tegra_last_cpu_in_cluster(states, ncpu)) {
214 			/* Enable CC7 state and turn off wake mask */
215 			cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
216 			cstate_info.update_wake_mask = 1;
217 			mce_update_cstate_info(&cstate_info);
218 
219 			/* Check if CCx state is allowed. */
220 			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
221 						  (uint64_t)TEGRA_ARI_CORE_C7,
222 						  MCE_CORE_SLEEP_TIME_INFINITE,
223 						  0U);
224 			if (ret == 0) {
225 				target = PSCI_LOCAL_STATE_RUN;
226 			}
227 
228 		} else {
229 
230 			/* Turn off wake_mask */
231 			cstate_info.update_wake_mask = 1;
232 			mce_update_cstate_info(&cstate_info);
233 			target = PSCI_LOCAL_STATE_RUN;
234 		}
235 	}
236 
237 	return target;
238 }
239 
240 /*******************************************************************************
241  * Platform handler to calculate the proper target power level at the
242  * specified affinity level
243  ******************************************************************************/
244 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
245 					     const plat_local_state_t *states,
246 					     uint32_t ncpu)
247 {
248 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
249 	uint32_t cpu = plat_my_core_pos();
250 
251 	/* System Suspend */
252 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
253 	    (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
254 		target = PSTATE_ID_SOC_POWERDN;
255 	}
256 
257 	/* CPU off, CPU suspend */
258 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
259 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
260 	}
261 
262 	/* target cluster/system state */
263 	return target;
264 }
265 
266 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
267 {
268 	const plat_local_state_t *pwr_domain_state =
269 		target_state->pwr_domain_state;
270 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
271 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
272 		TEGRA186_STATE_ID_MASK;
273 	uint64_t val;
274 
275 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
276 		/*
277 		 * The TZRAM loses power when we enter system suspend. To
278 		 * allow graceful exit from system suspend, we need to copy
279 		 * BL3-1 over to TZDRAM.
280 		 */
281 		val = params_from_bl2->tzdram_base +
282 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
283 			 (uintptr_t)&tegra186_cpu_reset_handler);
284 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
285 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
286 	}
287 
288 	return PSCI_E_SUCCESS;
289 }
290 
291 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
292 {
293 	int32_t ret = PSCI_E_SUCCESS;
294 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
295 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
296 			MPIDR_AFFINITY_BITS;
297 
298 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
299 
300 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
301 		ret = PSCI_E_NOT_PRESENT;
302 
303 	} else {
304 		/* construct the target CPU # */
305 		target_cpu |= (target_cluster << 2);
306 
307 		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
308 	}
309 
310 	return ret;
311 }
312 
313 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
314 {
315 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
316 	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
317 	mce_cstate_info_t cstate_info = { 0 };
318 	uint64_t impl, val;
319 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
320 
321 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
322 
323 	/*
324 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
325 	 * A02p and beyond).
326 	 */
327 	if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
328 
329 		val = read_l2ctlr_el1();
330 		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
331 		write_l2ctlr_el1(val);
332 	}
333 
334 	/*
335 	 * Reset power state info for CPUs when onlining, we set
336 	 * deepest power when offlining a core but that may not be
337 	 * requested by non-secure sw which controls idle states. It
338 	 * will re-init this info from non-secure software when the
339 	 * core come online.
340 	 */
341 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
342 
343 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
344 		cstate_info.update_wake_mask = 1;
345 		mce_update_cstate_info(&cstate_info);
346 	}
347 
348 	/*
349 	 * Check if we are exiting from deep sleep and restore SE
350 	 * context if we are.
351 	 */
352 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
353 
354 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
355 			se_regs[0]);
356 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
357 			se_regs[1]);
358 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
359 			se_regs[2]);
360 
361 		/* Init SMMU */
362 		tegra_smmu_init();
363 
364 		/*
365 		 * Reset power state info for the last core doing SC7
366 		 * entry and exit, we set deepest power state as CC7
367 		 * and SC7 for SC7 entry which may not be requested by
368 		 * non-secure SW which controls idle states.
369 		 */
370 		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
371 		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
372 		cstate_info.update_wake_mask = 1;
373 		mce_update_cstate_info(&cstate_info);
374 	}
375 
376 	return PSCI_E_SUCCESS;
377 }
378 
379 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
380 {
381 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
382 
383 	(void)target_state;
384 
385 	/* Disable Denver's DCO operations */
386 	if (impl == DENVER_IMPL) {
387 		denver_disable_dco();
388 	}
389 
390 	/* Turn off CPU */
391 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
392 			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
393 
394 	return PSCI_E_SUCCESS;
395 }
396 
397 __dead2 void tegra_soc_prepare_system_off(void)
398 {
399 	/* power off the entire system */
400 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
401 
402 	wfi();
403 
404 	/* wait for the system to power down */
405 	for (;;) {
406 		;
407 	}
408 }
409 
410 int32_t tegra_soc_prepare_system_reset(void)
411 {
412 	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
413 
414 	return PSCI_E_SUCCESS;
415 }
416