xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision 4e1830a99207b9c7dd6909f989fa01a4789ed05b)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <cortex_a57.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <plat/common/platform.h>
20 
21 #include <mce.h>
22 #include <smmu.h>
23 #include <stdbool.h>
24 #include <t18x_ari.h>
25 #include <tegra_private.h>
26 
27 extern void memcpy16(void *dest, const void *src, unsigned int length);
28 
29 extern void prepare_cpu_pwr_dwn(void);
30 extern void tegra186_cpu_reset_handler(void);
31 extern uint32_t __tegra186_cpu_reset_handler_end,
32 		__tegra186_smmu_context;
33 
34 /* TZDRAM offset for saving SMMU context */
35 #define TEGRA186_SMMU_CTX_OFFSET	16UL
36 
37 /* state id mask */
38 #define TEGRA186_STATE_ID_MASK		0xFU
39 /* constants to get power state's wake time */
40 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
41 #define TEGRA186_WAKE_TIME_SHIFT	4U
42 /* default core wake mask for CPU_SUSPEND */
43 #define TEGRA186_CORE_WAKE_MASK		0x180cU
44 /* context size to save during system suspend */
45 #define TEGRA186_SE_CONTEXT_SIZE	3U
46 
47 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
48 static struct tegra_psci_percpu_data {
49 	uint32_t wake_time;
50 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
51 
52 int32_t tegra_soc_validate_power_state(uint32_t power_state,
53 					psci_power_state_t *req_state)
54 {
55 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
56 	uint32_t cpu = plat_my_core_pos();
57 	int32_t ret = PSCI_E_SUCCESS;
58 
59 	/* save the core wake time (in TSC ticks)*/
60 	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
61 			<< TEGRA186_WAKE_TIME_SHIFT;
62 
63 	/*
64 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
65 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
66 	 * is called with caches disabled. It is possible to read a stale value
67 	 * from DRAM in that function, because the L2 cache is not flushed
68 	 * unless the cluster is entering CC6/CC7.
69 	 */
70 	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
71 			sizeof(tegra_percpu_data[cpu]));
72 
73 	/* Sanity check the requested state id */
74 	switch (state_id) {
75 	case PSTATE_ID_CORE_IDLE:
76 	case PSTATE_ID_CORE_POWERDN:
77 
78 		/* Core powerdown request */
79 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
80 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
81 
82 		break;
83 
84 	default:
85 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
86 		ret = PSCI_E_INVALID_PARAMS;
87 		break;
88 	}
89 
90 	return ret;
91 }
92 
93 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
94 {
95 	const plat_local_state_t *pwr_domain_state;
96 	uint8_t stateid_afflvl0, stateid_afflvl2;
97 	uint32_t cpu = plat_my_core_pos();
98 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
99 	mce_cstate_info_t cstate_info = { 0 };
100 	uint64_t smmu_ctx_base;
101 	uint32_t val;
102 
103 	/* get the state ID */
104 	pwr_domain_state = target_state->pwr_domain_state;
105 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
106 		TEGRA186_STATE_ID_MASK;
107 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
108 		TEGRA186_STATE_ID_MASK;
109 
110 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
111 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
112 
113 		/* Enter CPU idle/powerdown */
114 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
115 			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
116 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
117 				tegra_percpu_data[cpu].wake_time, 0U);
118 
119 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
120 
121 		/* save SE registers */
122 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
123 				SE_MUTEX_WATCHDOG_NS_LIMIT);
124 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
125 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
126 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
127 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
128 
129 		/* save 'Secure Boot' Processor Feature Config Register */
130 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
131 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
132 
133 		/* save SMMU context to TZDRAM */
134 		smmu_ctx_base = params_from_bl2->tzdram_base +
135 			((uintptr_t)&__tegra186_smmu_context -
136 			 (uintptr_t)tegra186_cpu_reset_handler);
137 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
138 
139 		/* Prepare for system suspend */
140 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
141 		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
142 		cstate_info.system_state_force = 1;
143 		cstate_info.update_wake_mask = 1;
144 		mce_update_cstate_info(&cstate_info);
145 		/* Loop until system suspend is allowed */
146 		do {
147 			val = (uint32_t)mce_command_handler(
148 					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
149 					TEGRA_ARI_CORE_C7,
150 					MCE_CORE_SLEEP_TIME_INFINITE,
151 					0U);
152 		} while (val == 0U);
153 
154 		/* Instruct the MCE to enter system suspend state */
155 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
156 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
157 	} else {
158 		; /* do nothing */
159 	}
160 
161 	return PSCI_E_SUCCESS;
162 }
163 
164 /*******************************************************************************
165  * Helper function to check if this is the last ON CPU in the cluster
166  ******************************************************************************/
167 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
168 			uint32_t ncpu)
169 {
170 	plat_local_state_t target;
171 	bool last_on_cpu = true;
172 	uint32_t num_cpus = ncpu, pos = 0;
173 
174 	do {
175 		target = states[pos];
176 		if (target != PLAT_MAX_OFF_STATE) {
177 			last_on_cpu = false;
178 		}
179 		--num_cpus;
180 		pos++;
181 	} while (num_cpus != 0U);
182 
183 	return last_on_cpu;
184 }
185 
186 /*******************************************************************************
187  * Helper function to get target power state for the cluster
188  ******************************************************************************/
189 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
190 			uint32_t ncpu)
191 {
192 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
193 	uint32_t cpu = plat_my_core_pos();
194 	int32_t ret;
195 	plat_local_state_t target = states[core_pos];
196 	mce_cstate_info_t cstate_info = { 0 };
197 
198 	/* CPU suspend */
199 	if (target == PSTATE_ID_CORE_POWERDN) {
200 
201 		/* Program default wake mask */
202 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
203 		cstate_info.update_wake_mask = 1;
204 		mce_update_cstate_info(&cstate_info);
205 
206 		/* Check if CCx state is allowed. */
207 		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
208 				(uint64_t)TEGRA_ARI_CORE_C7,
209 				tegra_percpu_data[cpu].wake_time,
210 				0U);
211 		if (ret == 0) {
212 			target = PSCI_LOCAL_STATE_RUN;
213 		}
214 	}
215 
216 	/* CPU off */
217 	if (target == PLAT_MAX_OFF_STATE) {
218 
219 		/* Enable cluster powerdn from last CPU in the cluster */
220 		if (tegra_last_cpu_in_cluster(states, ncpu)) {
221 
222 			/* Enable CC7 state and turn off wake mask */
223 			cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
224 			cstate_info.update_wake_mask = 1;
225 			mce_update_cstate_info(&cstate_info);
226 
227 			/* Check if CCx state is allowed. */
228 			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
229 						  (uint64_t)TEGRA_ARI_CORE_C7,
230 						  MCE_CORE_SLEEP_TIME_INFINITE,
231 						  0U);
232 			if (ret == 0) {
233 				target = PSCI_LOCAL_STATE_RUN;
234 			}
235 
236 		} else {
237 
238 			/* Turn off wake_mask */
239 			cstate_info.update_wake_mask = 1;
240 			mce_update_cstate_info(&cstate_info);
241 			target = PSCI_LOCAL_STATE_RUN;
242 		}
243 	}
244 
245 	return target;
246 }
247 
248 /*******************************************************************************
249  * Platform handler to calculate the proper target power level at the
250  * specified affinity level
251  ******************************************************************************/
252 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
253 					     const plat_local_state_t *states,
254 					     uint32_t ncpu)
255 {
256 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
257 	int cpu = plat_my_core_pos();
258 
259 	/* System Suspend */
260 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
261 	    (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
262 		target = PSTATE_ID_SOC_POWERDN;
263 	}
264 
265 	/* CPU off, CPU suspend */
266 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
267 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
268 	}
269 
270 	/* target cluster/system state */
271 	return target;
272 }
273 
274 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
275 {
276 	const plat_local_state_t *pwr_domain_state =
277 		target_state->pwr_domain_state;
278 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
279 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
280 		TEGRA186_STATE_ID_MASK;
281 	uint64_t val;
282 
283 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
284 		/*
285 		 * The TZRAM loses power when we enter system suspend. To
286 		 * allow graceful exit from system suspend, we need to copy
287 		 * BL3-1 over to TZDRAM.
288 		 */
289 		val = params_from_bl2->tzdram_base +
290 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
291 			 (uintptr_t)&tegra186_cpu_reset_handler);
292 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
293 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
294 	}
295 
296 	return PSCI_E_SUCCESS;
297 }
298 
299 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
300 {
301 	uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
302 	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
303 			(uint64_t)MPIDR_AFFINITY_BITS;
304 	int32_t ret = PSCI_E_SUCCESS;
305 
306 	if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
307 
308 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
309 		ret = PSCI_E_NOT_PRESENT;
310 
311 	} else {
312 		/* construct the target CPU # */
313 		target_cpu |= (target_cluster << 2);
314 
315 		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
316 	}
317 
318 	return ret;
319 }
320 
321 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
322 {
323 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
324 	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
325 	mce_cstate_info_t cstate_info = { 0 };
326 	uint64_t impl, val;
327 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
328 
329 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
330 
331 	/*
332 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
333 	 * A02p and beyond).
334 	 */
335 	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
336 	    (impl != (uint64_t)DENVER_IMPL)) {
337 
338 		val = read_l2ctlr_el1();
339 		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
340 		write_l2ctlr_el1(val);
341 	}
342 
343 	/*
344 	 * Reset power state info for CPUs when onlining, we set
345 	 * deepest power when offlining a core but that may not be
346 	 * requested by non-secure sw which controls idle states. It
347 	 * will re-init this info from non-secure software when the
348 	 * core come online.
349 	 */
350 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
351 
352 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
353 		cstate_info.update_wake_mask = 1;
354 		mce_update_cstate_info(&cstate_info);
355 	}
356 
357 	/*
358 	 * Check if we are exiting from deep sleep and restore SE
359 	 * context if we are.
360 	 */
361 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
362 
363 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
364 			se_regs[0]);
365 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
366 			se_regs[1]);
367 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
368 			se_regs[2]);
369 
370 		/* Init SMMU */
371 		tegra_smmu_init();
372 
373 		/*
374 		 * Reset power state info for the last core doing SC7
375 		 * entry and exit, we set deepest power state as CC7
376 		 * and SC7 for SC7 entry which may not be requested by
377 		 * non-secure SW which controls idle states.
378 		 */
379 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
380 		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
381 		cstate_info.update_wake_mask = 1;
382 		mce_update_cstate_info(&cstate_info);
383 	}
384 
385 	return PSCI_E_SUCCESS;
386 }
387 
388 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
389 {
390 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
391 
392 	(void)target_state;
393 
394 	/* Disable Denver's DCO operations */
395 	if (impl == DENVER_IMPL) {
396 		denver_disable_dco();
397 	}
398 
399 	/* Turn off CPU */
400 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
401 			MCE_CORE_SLEEP_TIME_INFINITE, 0U);
402 
403 	return PSCI_E_SUCCESS;
404 }
405 
406 __dead2 void tegra_soc_prepare_system_off(void)
407 {
408 	/* power off the entire system */
409 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
410 
411 	wfi();
412 
413 	/* wait for the system to power down */
414 	for (;;) {
415 		;
416 	}
417 }
418 
419 int32_t tegra_soc_prepare_system_reset(void)
420 {
421 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
422 
423 	return PSCI_E_SUCCESS;
424 }
425