xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision 1c62509e89333bbb1b4c0b933d4b906e77206066)
1 /*
2  * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <mce.h>
20 #include <plat/common/platform.h>
21 #include <se.h>
22 #include <smmu.h>
23 #include <t194_nvg.h>
24 #include <tegra_platform.h>
25 #include <tegra_private.h>
26 
27 extern void tegra194_cpu_reset_handler(void);
28 extern uint32_t __tegra194_cpu_reset_handler_data,
29 		__tegra194_cpu_reset_handler_end;
30 
31 /* TZDRAM offset for saving SMMU context */
32 #define TEGRA194_SMMU_CTX_OFFSET	16U
33 
34 /* state id mask */
35 #define TEGRA194_STATE_ID_MASK		0xFU
36 /* constants to get power state's wake time */
37 #define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
38 #define TEGRA194_WAKE_TIME_SHIFT	4U
39 /* default core wake mask for CPU_SUSPEND */
40 #define TEGRA194_CORE_WAKE_MASK		0x180cU
41 
42 static struct t19x_psci_percpu_data {
43 	uint32_t wake_time;
44 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
45 
46 /*
47  * tegra_fake_system_suspend acts as a boolean var controlling whether
48  * we are going to take fake system suspend code or normal system suspend code
49  * path. This variable is set inside the sip call handlers, when the kernel
50  * requests an SIP call to set the suspend debug flags.
51  */
52 bool tegra_fake_system_suspend;
53 
54 int32_t tegra_soc_validate_power_state(uint32_t power_state,
55 					psci_power_state_t *req_state)
56 {
57 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
58 			   TEGRA194_STATE_ID_MASK;
59 	uint32_t cpu = plat_my_core_pos();
60 	int32_t ret = PSCI_E_SUCCESS;
61 
62 	/* save the core wake time (in TSC ticks)*/
63 	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
64 			<< TEGRA194_WAKE_TIME_SHIFT;
65 
66 	/*
67 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
68 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
69 	 * is called with caches disabled. It is possible to read a stale value
70 	 * from DRAM in that function, because the L2 cache is not flushed
71 	 * unless the cluster is entering CC6/CC7.
72 	 */
73 	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
74 			sizeof(t19x_percpu_data[cpu]));
75 
76 	/* Sanity check the requested state id */
77 	switch (state_id) {
78 	case PSTATE_ID_CORE_IDLE:
79 	case PSTATE_ID_CORE_POWERDN:
80 
81 		/* Core powerdown request */
82 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
83 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
84 
85 		break;
86 
87 	default:
88 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
89 		ret = PSCI_E_INVALID_PARAMS;
90 		break;
91 	}
92 
93 	return ret;
94 }
95 
96 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
97 {
98 	const plat_local_state_t *pwr_domain_state;
99 	uint8_t stateid_afflvl0, stateid_afflvl2;
100 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
101 	uint64_t smmu_ctx_base;
102 	uint32_t val;
103 	mce_cstate_info_t sc7_cstate_info = {
104 		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
105 		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
106 		.system_state_force = 1U,
107 		.update_wake_mask = 1U,
108 	};
109 	uint32_t cpu = plat_my_core_pos();
110 	int32_t ret = 0;
111 
112 	/* get the state ID */
113 	pwr_domain_state = target_state->pwr_domain_state;
114 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
115 		TEGRA194_STATE_ID_MASK;
116 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
117 		TEGRA194_STATE_ID_MASK;
118 
119 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
120 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
121 
122 		/* Enter CPU idle/powerdown */
123 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
124 			(uint32_t)TEGRA_NVG_CORE_C6 : (uint32_t)TEGRA_NVG_CORE_C7;
125 		ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
126 				percpu_data[cpu].wake_time, 0);
127 		assert(ret == 0);
128 
129 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
130 
131 		/* save 'Secure Boot' Processor Feature Config Register */
132 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
133 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
134 
135 		/* save SMMU context */
136 		smmu_ctx_base = params_from_bl2->tzdram_base +
137 			((uintptr_t)&__tegra194_cpu_reset_handler_data -
138 			 (uintptr_t)&tegra194_cpu_reset_handler) +
139 			TEGRA194_SMMU_CTX_OFFSET;
140 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
141 
142 		/*
143 		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
144 		 * since VDK does not support atomic se ctx save
145 		 */
146 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
147 			ret = tegra_se_suspend();
148 			assert(ret == 0);
149 		}
150 
151 		if (!tegra_fake_system_suspend) {
152 
153 			/* Prepare for system suspend */
154 			mce_update_cstate_info(&sc7_cstate_info);
155 
156 			do {
157 				val = (uint32_t)mce_command_handler(
158 						(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
159 						(uint32_t)TEGRA_NVG_CORE_C7,
160 						MCE_CORE_SLEEP_TIME_INFINITE,
161 						0U);
162 			} while (val == 0U);
163 
164 			/* Instruct the MCE to enter system suspend state */
165 			ret = mce_command_handler(
166 					(uint64_t)MCE_CMD_ENTER_CSTATE,
167 					(uint64_t)TEGRA_NVG_CORE_C7,
168 					MCE_CORE_SLEEP_TIME_INFINITE,
169 					0U);
170 			assert(ret == 0);
171 		}
172 	} else {
173 		; /* do nothing */
174 	}
175 
176 	return PSCI_E_SUCCESS;
177 }
178 
179 /*******************************************************************************
180  * Platform handler to calculate the proper target power level at the
181  * specified affinity level
182  ******************************************************************************/
183 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
184 					     const plat_local_state_t *states,
185 					     uint32_t ncpu)
186 {
187 	plat_local_state_t target = *states;
188 	int32_t cluster_powerdn = 1;
189 	uint32_t core_pos = (uint32_t)read_mpidr() & MPIDR_CPU_MASK;
190 	uint32_t num_cpus = ncpu, pos = 0;
191 	mce_cstate_info_t cstate_info = { 0 };
192 
193 	/* get the current core's power state */
194 	target = states[core_pos];
195 
196 	/* CPU suspend */
197 	if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
198 
199 		/* Program default wake mask */
200 		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
201 		cstate_info.update_wake_mask = 1;
202 		mce_update_cstate_info(&cstate_info);
203 	}
204 
205 	/* CPU off */
206 	if ((lvl == MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
207 
208 		/* find out the number of ON cpus in the cluster */
209 		do {
210 			target = states[pos];
211 			if (target != PLAT_MAX_OFF_STATE) {
212 				cluster_powerdn = 0;
213 			}
214 			--num_cpus;
215 			pos++;
216 		} while (num_cpus != 0U);
217 
218 		/* Enable cluster powerdn from last CPU in the cluster */
219 		if (cluster_powerdn != 0) {
220 
221 			/* Enable CC6 */
222 			/* todo */
223 
224 			/* If cluster group needs to be railgated, request CG7 */
225 			/* todo */
226 
227 			/* Turn off wake mask */
228 			cstate_info.update_wake_mask = 1U;
229 			mce_update_cstate_info(&cstate_info);
230 
231 		} else {
232 			/* Turn off wake_mask */
233 			cstate_info.update_wake_mask = 1U;
234 			mce_update_cstate_info(&cstate_info);
235 		}
236 	}
237 
238 	/* System Suspend */
239 	if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) {
240 		return PSTATE_ID_SOC_POWERDN;
241 	}
242 
243 	/* default state */
244 	return PSCI_LOCAL_STATE_RUN;
245 }
246 
247 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
248 {
249 	const plat_local_state_t *pwr_domain_state =
250 		target_state->pwr_domain_state;
251 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
252 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
253 		TEGRA194_STATE_ID_MASK;
254 	uint64_t val;
255 	u_register_t ns_sctlr_el1;
256 
257 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
258 		/*
259 		 * The TZRAM loses power when we enter system suspend. To
260 		 * allow graceful exit from system suspend, we need to copy
261 		 * BL3-1 over to TZDRAM.
262 		 */
263 		val = params_from_bl2->tzdram_base +
264 			((uintptr_t)&__tegra194_cpu_reset_handler_end -
265 			 (uintptr_t)&tegra194_cpu_reset_handler);
266 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
267 		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
268 
269 		/*
270 		 * In fake suspend mode, ensure that the loopback procedure
271 		 * towards system suspend exit is started, instead of calling
272 		 * WFI. This is done by disabling both MMU's of EL1 & El3
273 		 * and calling tegra_secure_entrypoint().
274 		 */
275 		if (tegra_fake_system_suspend) {
276 
277 			/*
278 			 * Disable EL1's MMU.
279 			 */
280 			ns_sctlr_el1 = read_sctlr_el1();
281 			ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
282 			write_sctlr_el1(ns_sctlr_el1);
283 
284 			/*
285 			 * Disable MMU to power up the CPU in a "clean"
286 			 * state
287 			 */
288 			disable_mmu_el3();
289 			tegra_secure_entrypoint();
290 			panic();
291 		}
292 	}
293 
294 	return PSCI_E_SUCCESS;
295 }
296 
297 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
298 {
299 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
300 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
301 			MPIDR_AFFINITY_BITS;
302 	int32_t ret = 0;
303 
304 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
305 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
306 		return PSCI_E_NOT_PRESENT;
307 	}
308 
309 	/* construct the target CPU # */
310 	target_cpu += (target_cluster << 1U);
311 
312 	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
313 	if (ret < 0) {
314 		return PSCI_E_DENIED;
315 	}
316 
317 	return PSCI_E_SUCCESS;
318 }
319 
320 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
321 {
322 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
323 
324 	/*
325 	 * Reset power state info for CPUs when onlining, we set
326 	 * deepest power when offlining a core but that may not be
327 	 * requested by non-secure sw which controls idle states. It
328 	 * will re-init this info from non-secure software when the
329 	 * core come online.
330 	 */
331 
332 	/*
333 	 * Check if we are exiting from deep sleep and restore SE
334 	 * context if we are.
335 	 */
336 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
337 		/* Init SMMU */
338 		tegra_smmu_init();
339 
340 		/* Resume SE, RNG1 and PKA1 */
341 		tegra_se_resume();
342 
343 		/*
344 		 * Reset power state info for the last core doing SC7
345 		 * entry and exit, we set deepest power state as CC7
346 		 * and SC7 for SC7 entry which may not be requested by
347 		 * non-secure SW which controls idle states.
348 		 */
349 	}
350 
351 	return PSCI_E_SUCCESS;
352 }
353 
354 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
355 {
356 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
357 	int32_t ret = 0;
358 
359 	(void)target_state;
360 
361 	/* Disable Denver's DCO operations */
362 	if (impl == DENVER_IMPL) {
363 		denver_disable_dco();
364 	}
365 
366 	/* Turn off CPU */
367 	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
368 			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
369 	assert(ret == 0);
370 
371 	return PSCI_E_SUCCESS;
372 }
373 
374 __dead2 void tegra_soc_prepare_system_off(void)
375 {
376 	/* System power off */
377 
378 	/* SC8 */
379 
380 	wfi();
381 
382 	/* wait for the system to power down */
383 	for (;;) {
384 		;
385 	}
386 }
387 
388 int32_t tegra_soc_prepare_system_reset(void)
389 {
390 	return PSCI_E_SUCCESS;
391 }
392