xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision c948f77136c42a92d0bb660543a3600c36dcf7f1)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <cortex_a57.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <plat/common/platform.h>
20 
21 #include <mce.h>
22 #include <smmu.h>
23 #include <t18x_ari.h>
24 #include <tegra_private.h>
25 
26 extern void memcpy16(void *dest, const void *src, unsigned int length);
27 
28 extern void prepare_cpu_pwr_dwn(void);
29 extern void tegra186_cpu_reset_handler(void);
30 extern uint32_t __tegra186_cpu_reset_handler_end,
31 		__tegra186_smmu_context;
32 
33 /* TZDRAM offset for saving SMMU context */
34 #define TEGRA186_SMMU_CTX_OFFSET	16UL
35 
36 /* state id mask */
37 #define TEGRA186_STATE_ID_MASK		0xFU
38 /* constants to get power state's wake time */
39 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
40 #define TEGRA186_WAKE_TIME_SHIFT	4U
41 /* default core wake mask for CPU_SUSPEND */
42 #define TEGRA186_CORE_WAKE_MASK		0x180cU
43 /* context size to save during system suspend */
44 #define TEGRA186_SE_CONTEXT_SIZE	3U
45 
46 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
47 static struct tegra_psci_percpu_data {
48 	uint32_t wake_time;
49 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
50 
51 int32_t tegra_soc_validate_power_state(uint32_t power_state,
52 					psci_power_state_t *req_state)
53 {
54 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
55 	uint32_t cpu = plat_my_core_pos();
56 	int32_t ret = PSCI_E_SUCCESS;
57 
58 	/* save the core wake time (in TSC ticks)*/
59 	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
60 			<< TEGRA186_WAKE_TIME_SHIFT;
61 
62 	/*
63 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
64 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
65 	 * is called with caches disabled. It is possible to read a stale value
66 	 * from DRAM in that function, because the L2 cache is not flushed
67 	 * unless the cluster is entering CC6/CC7.
68 	 */
69 	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
70 			sizeof(tegra_percpu_data[cpu]));
71 
72 	/* Sanity check the requested state id */
73 	switch (state_id) {
74 	case PSTATE_ID_CORE_IDLE:
75 	case PSTATE_ID_CORE_POWERDN:
76 
77 		/* Core powerdown request */
78 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
79 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
80 
81 		break;
82 
83 	default:
84 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
85 		ret = PSCI_E_INVALID_PARAMS;
86 		break;
87 	}
88 
89 	return ret;
90 }
91 
92 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
93 {
94 	const plat_local_state_t *pwr_domain_state;
95 	uint8_t stateid_afflvl0, stateid_afflvl2;
96 	uint32_t cpu = plat_my_core_pos();
97 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
98 	mce_cstate_info_t cstate_info = { 0 };
99 	uint64_t smmu_ctx_base;
100 	uint32_t val;
101 
102 	/* get the state ID */
103 	pwr_domain_state = target_state->pwr_domain_state;
104 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
105 		TEGRA186_STATE_ID_MASK;
106 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
107 		TEGRA186_STATE_ID_MASK;
108 
109 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
110 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
111 
112 		/* Enter CPU idle/powerdown */
113 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
114 			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
115 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
116 				tegra_percpu_data[cpu].wake_time, 0U);
117 
118 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
119 
120 		/* save SE registers */
121 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
122 				SE_MUTEX_WATCHDOG_NS_LIMIT);
123 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
124 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
125 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
126 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
127 
128 		/* save 'Secure Boot' Processor Feature Config Register */
129 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
130 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
131 
132 		/* save SMMU context to TZDRAM */
133 		smmu_ctx_base = params_from_bl2->tzdram_base +
134 			((uintptr_t)&__tegra186_smmu_context -
135 			 (uintptr_t)tegra186_cpu_reset_handler);
136 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
137 
138 		/* Prepare for system suspend */
139 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
140 		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
141 		cstate_info.system_state_force = 1;
142 		cstate_info.update_wake_mask = 1;
143 		mce_update_cstate_info(&cstate_info);
144 		/* Loop until system suspend is allowed */
145 		do {
146 			val = (uint32_t)mce_command_handler(
147 					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
148 					TEGRA_ARI_CORE_C7,
149 					MCE_CORE_SLEEP_TIME_INFINITE,
150 					0U);
151 		} while (val == 0U);
152 
153 		/* Instruct the MCE to enter system suspend state */
154 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
155 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
156 	} else {
157 		; /* do nothing */
158 	}
159 
160 	return PSCI_E_SUCCESS;
161 }
162 
163 /*******************************************************************************
164  * Platform handler to calculate the proper target power level at the
165  * specified affinity level
166  ******************************************************************************/
167 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
168 					     const plat_local_state_t *states,
169 					     uint32_t ncpu)
170 {
171 	plat_local_state_t target = *states;
172 	uint32_t pos = 0;
173 	plat_local_state_t result = PSCI_LOCAL_STATE_RUN;
174 	uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
175 	int32_t ret, cluster_powerdn = 1;
176 	uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
177 	mce_cstate_info_t cstate_info = { 0 };
178 
179 	/* get the power state at this level */
180 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
181 		target = states[core_pos];
182 	}
183 	if (lvl == (uint32_t)MPIDR_AFFLVL2) {
184 		target = states[cpu];
185 	}
186 
187 	/* CPU suspend */
188 	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
189 
190 		/* Program default wake mask */
191 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
192 		cstate_info.update_wake_mask = 1;
193 		mce_update_cstate_info(&cstate_info);
194 
195 		/* Check if CCx state is allowed. */
196 		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
197 				TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time,
198 				0U);
199 		if (ret != 0) {
200 			result = PSTATE_ID_CORE_POWERDN;
201 		}
202 	}
203 
204 	/* CPU off */
205 	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
206 
207 		/* find out the number of ON cpus in the cluster */
208 		do {
209 			target = states[pos];
210 			if (target != PLAT_MAX_OFF_STATE) {
211 				cluster_powerdn = 0;
212 			}
213 			--num_cpu;
214 			pos++;
215 		} while (num_cpu != 0U);
216 
217 		/* Enable cluster powerdn from last CPU in the cluster */
218 		if (cluster_powerdn != 0) {
219 
220 			/* Enable CC7 state and turn off wake mask */
221 			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
222 			cstate_info.update_wake_mask = 1;
223 			mce_update_cstate_info(&cstate_info);
224 
225 			/* Check if CCx state is allowed. */
226 			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
227 						  TEGRA_ARI_CORE_C7,
228 						  MCE_CORE_SLEEP_TIME_INFINITE,
229 						  0U);
230 			if (ret != 0) {
231 				result = PSTATE_ID_CORE_POWERDN;
232 			}
233 
234 		} else {
235 
236 			/* Turn off wake_mask */
237 			cstate_info.update_wake_mask = 1;
238 			mce_update_cstate_info(&cstate_info);
239 		}
240 	}
241 
242 	/* System Suspend */
243 	if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) &&
244 	    (target == PSTATE_ID_SOC_POWERDN)) {
245 		result = PSTATE_ID_SOC_POWERDN;
246 	}
247 
248 	/* default state */
249 	return result;
250 }
251 
252 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
253 {
254 	const plat_local_state_t *pwr_domain_state =
255 		target_state->pwr_domain_state;
256 	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
257 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
258 		TEGRA186_STATE_ID_MASK;
259 	uint64_t val;
260 
261 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
262 		/*
263 		 * The TZRAM loses power when we enter system suspend. To
264 		 * allow graceful exit from system suspend, we need to copy
265 		 * BL3-1 over to TZDRAM.
266 		 */
267 		val = params_from_bl2->tzdram_base +
268 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
269 			 (uintptr_t)&tegra186_cpu_reset_handler);
270 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
271 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
272 	}
273 
274 	return PSCI_E_SUCCESS;
275 }
276 
277 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
278 {
279 	uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
280 	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
281 			(uint64_t)MPIDR_AFFINITY_BITS;
282 	int32_t ret = PSCI_E_SUCCESS;
283 
284 	if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
285 
286 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
287 		ret = PSCI_E_NOT_PRESENT;
288 
289 	} else {
290 		/* construct the target CPU # */
291 		target_cpu |= (target_cluster << 2);
292 
293 		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
294 	}
295 
296 	return ret;
297 }
298 
299 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
300 {
301 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
302 	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
303 	mce_cstate_info_t cstate_info = { 0 };
304 	uint64_t impl, val;
305 	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
306 
307 	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
308 
309 	/*
310 	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
311 	 * A02p and beyond).
312 	 */
313 	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
314 	    (impl != (uint64_t)DENVER_IMPL)) {
315 
316 		val = read_l2ctlr_el1();
317 		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
318 		write_l2ctlr_el1(val);
319 	}
320 
321 	/*
322 	 * Reset power state info for CPUs when onlining, we set
323 	 * deepest power when offlining a core but that may not be
324 	 * requested by non-secure sw which controls idle states. It
325 	 * will re-init this info from non-secure software when the
326 	 * core come online.
327 	 */
328 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
329 
330 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
331 		cstate_info.update_wake_mask = 1;
332 		mce_update_cstate_info(&cstate_info);
333 	}
334 
335 	/*
336 	 * Check if we are exiting from deep sleep and restore SE
337 	 * context if we are.
338 	 */
339 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
340 
341 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
342 			se_regs[0]);
343 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
344 			se_regs[1]);
345 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
346 			se_regs[2]);
347 
348 		/* Init SMMU */
349 		tegra_smmu_init();
350 
351 		/*
352 		 * Reset power state info for the last core doing SC7
353 		 * entry and exit, we set deepest power state as CC7
354 		 * and SC7 for SC7 entry which may not be requested by
355 		 * non-secure SW which controls idle states.
356 		 */
357 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
358 		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
359 		cstate_info.update_wake_mask = 1;
360 		mce_update_cstate_info(&cstate_info);
361 	}
362 
363 	return PSCI_E_SUCCESS;
364 }
365 
366 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
367 {
368 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
369 
370 	(void)target_state;
371 
372 	/* Disable Denver's DCO operations */
373 	if (impl == DENVER_IMPL) {
374 		denver_disable_dco();
375 	}
376 
377 	/* Turn off CPU */
378 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
379 			MCE_CORE_SLEEP_TIME_INFINITE, 0U);
380 
381 	return PSCI_E_SUCCESS;
382 }
383 
384 __dead2 void tegra_soc_prepare_system_off(void)
385 {
386 	/* power off the entire system */
387 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
388 
389 	wfi();
390 
391 	/* wait for the system to power down */
392 	for (;;) {
393 		;
394 	}
395 }
396 
397 int32_t tegra_soc_prepare_system_reset(void)
398 {
399 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
400 
401 	return PSCI_E_SUCCESS;
402 }
403