xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision 040529e9e67f23dc85f4ff5aec94debf8cecb3cc)
1 /*
2  * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <mce.h>
20 #include <plat/common/platform.h>
21 #include <se.h>
22 #include <smmu.h>
23 #include <t194_nvg.h>
24 #include <tegra194_private.h>
25 #include <tegra_platform.h>
26 #include <tegra_private.h>
27 
28 extern void tegra194_cpu_reset_handler(void);
29 extern uint32_t __tegra194_cpu_reset_handler_data,
30 		__tegra194_cpu_reset_handler_end;
31 
32 /* TZDRAM offset for saving SMMU context */
33 #define TEGRA194_SMMU_CTX_OFFSET	16U
34 
35 /* state id mask */
36 #define TEGRA194_STATE_ID_MASK		0xFU
37 /* constants to get power state's wake time */
38 #define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
39 #define TEGRA194_WAKE_TIME_SHIFT	4U
40 /* default core wake mask for CPU_SUSPEND */
41 #define TEGRA194_CORE_WAKE_MASK		0x180cU
42 
43 static struct t19x_psci_percpu_data {
44 	uint32_t wake_time;
45 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
46 
47 /*
48  * tegra_fake_system_suspend acts as a boolean var controlling whether
49  * we are going to take fake system suspend code or normal system suspend code
50  * path. This variable is set inside the sip call handlers, when the kernel
51  * requests an SIP call to set the suspend debug flags.
52  */
53 bool tegra_fake_system_suspend;
54 
55 int32_t tegra_soc_validate_power_state(uint32_t power_state,
56 					psci_power_state_t *req_state)
57 {
58 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
59 			   TEGRA194_STATE_ID_MASK;
60 	uint32_t cpu = plat_my_core_pos();
61 	int32_t ret = PSCI_E_SUCCESS;
62 
63 	/* save the core wake time (in TSC ticks)*/
64 	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
65 			<< TEGRA194_WAKE_TIME_SHIFT;
66 
67 	/*
68 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
69 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
70 	 * is called with caches disabled. It is possible to read a stale value
71 	 * from DRAM in that function, because the L2 cache is not flushed
72 	 * unless the cluster is entering CC6/CC7.
73 	 */
74 	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
75 			sizeof(t19x_percpu_data[cpu]));
76 
77 	/* Sanity check the requested state id */
78 	switch (state_id) {
79 	case PSTATE_ID_CORE_IDLE:
80 	case PSTATE_ID_CORE_POWERDN:
81 
82 		/* Core powerdown request */
83 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
84 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
85 
86 		break;
87 
88 	default:
89 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
90 		ret = PSCI_E_INVALID_PARAMS;
91 		break;
92 	}
93 
94 	return ret;
95 }
96 
97 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
98 {
99 	const plat_local_state_t *pwr_domain_state;
100 	uint8_t stateid_afflvl0, stateid_afflvl2;
101 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
102 	uint64_t smmu_ctx_base;
103 	uint32_t val;
104 	mce_cstate_info_t sc7_cstate_info = {
105 		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
106 		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
107 		.system_state_force = 1U,
108 		.update_wake_mask = 1U,
109 	};
110 	uint32_t cpu = plat_my_core_pos();
111 	int32_t ret = 0;
112 
113 	/* get the state ID */
114 	pwr_domain_state = target_state->pwr_domain_state;
115 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
116 		TEGRA194_STATE_ID_MASK;
117 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
118 		TEGRA194_STATE_ID_MASK;
119 
120 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
121 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
122 
123 		/* Enter CPU idle/powerdown */
124 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
125 			(uint32_t)TEGRA_NVG_CORE_C6 : (uint32_t)TEGRA_NVG_CORE_C7;
126 		ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
127 				percpu_data[cpu].wake_time, 0);
128 		assert(ret == 0);
129 
130 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
131 
132 		/* save 'Secure Boot' Processor Feature Config Register */
133 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
134 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
135 
136 		/* save SMMU context */
137 		smmu_ctx_base = params_from_bl2->tzdram_base +
138 				tegra194_get_smmu_ctx_offset();
139 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
140 
141 		/*
142 		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
143 		 * since VDK does not support atomic se ctx save
144 		 */
145 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
146 			ret = tegra_se_suspend();
147 			assert(ret == 0);
148 		}
149 
150 		if (!tegra_fake_system_suspend) {
151 
152 			/* Prepare for system suspend */
153 			mce_update_cstate_info(&sc7_cstate_info);
154 
155 			do {
156 				val = (uint32_t)mce_command_handler(
157 						(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
158 						(uint32_t)TEGRA_NVG_CORE_C7,
159 						MCE_CORE_SLEEP_TIME_INFINITE,
160 						0U);
161 			} while (val == 0U);
162 
163 			/* Instruct the MCE to enter system suspend state */
164 			ret = mce_command_handler(
165 					(uint64_t)MCE_CMD_ENTER_CSTATE,
166 					(uint64_t)TEGRA_NVG_CORE_C7,
167 					MCE_CORE_SLEEP_TIME_INFINITE,
168 					0U);
169 			assert(ret == 0);
170 
171 			/* set system suspend state for house-keeping */
172 			tegra194_set_system_suspend_entry();
173 		}
174 	} else {
175 		; /* do nothing */
176 	}
177 
178 	return PSCI_E_SUCCESS;
179 }
180 
181 /*******************************************************************************
182  * Platform handler to calculate the proper target power level at the
183  * specified affinity level
184  ******************************************************************************/
185 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
186 					     const plat_local_state_t *states,
187 					     uint32_t ncpu)
188 {
189 	plat_local_state_t target = *states;
190 	int32_t cluster_powerdn = 1;
191 	uint32_t core_pos = (uint32_t)read_mpidr() & MPIDR_CPU_MASK;
192 	uint32_t num_cpus = ncpu, pos = 0;
193 	mce_cstate_info_t cstate_info = { 0 };
194 
195 	/* get the current core's power state */
196 	target = states[core_pos];
197 
198 	/* CPU suspend */
199 	if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
200 
201 		/* Program default wake mask */
202 		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
203 		cstate_info.update_wake_mask = 1;
204 		mce_update_cstate_info(&cstate_info);
205 	}
206 
207 	/* CPU off */
208 	if ((lvl == MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
209 
210 		/* find out the number of ON cpus in the cluster */
211 		do {
212 			target = states[pos];
213 			if (target != PLAT_MAX_OFF_STATE) {
214 				cluster_powerdn = 0;
215 			}
216 			--num_cpus;
217 			pos++;
218 		} while (num_cpus != 0U);
219 
220 		/* Enable cluster powerdn from last CPU in the cluster */
221 		if (cluster_powerdn != 0) {
222 
223 			/* Enable CC6 */
224 			/* todo */
225 
226 			/* If cluster group needs to be railgated, request CG7 */
227 			/* todo */
228 
229 			/* Turn off wake mask */
230 			cstate_info.update_wake_mask = 1U;
231 			mce_update_cstate_info(&cstate_info);
232 
233 		} else {
234 			/* Turn off wake_mask */
235 			cstate_info.update_wake_mask = 1U;
236 			mce_update_cstate_info(&cstate_info);
237 		}
238 	}
239 
240 	/* System Suspend */
241 	if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) {
242 		return PSTATE_ID_SOC_POWERDN;
243 	}
244 
245 	/* default state */
246 	return PSCI_LOCAL_STATE_RUN;
247 }
248 
249 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
250 {
251 	const plat_local_state_t *pwr_domain_state =
252 		target_state->pwr_domain_state;
253 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
254 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
255 		TEGRA194_STATE_ID_MASK;
256 	uint64_t val;
257 	u_register_t ns_sctlr_el1;
258 
259 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
260 		/*
261 		 * The TZRAM loses power when we enter system suspend. To
262 		 * allow graceful exit from system suspend, we need to copy
263 		 * BL3-1 over to TZDRAM.
264 		 */
265 		val = params_from_bl2->tzdram_base +
266 		      tegra194_get_cpu_reset_handler_size();
267 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
268 		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
269 
270 		/*
271 		 * In fake suspend mode, ensure that the loopback procedure
272 		 * towards system suspend exit is started, instead of calling
273 		 * WFI. This is done by disabling both MMU's of EL1 & El3
274 		 * and calling tegra_secure_entrypoint().
275 		 */
276 		if (tegra_fake_system_suspend) {
277 
278 			/*
279 			 * Disable EL1's MMU.
280 			 */
281 			ns_sctlr_el1 = read_sctlr_el1();
282 			ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
283 			write_sctlr_el1(ns_sctlr_el1);
284 
285 			/*
286 			 * Disable MMU to power up the CPU in a "clean"
287 			 * state
288 			 */
289 			disable_mmu_el3();
290 			tegra_secure_entrypoint();
291 			panic();
292 		}
293 	}
294 
295 	return PSCI_E_SUCCESS;
296 }
297 
298 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
299 {
300 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
301 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
302 			MPIDR_AFFINITY_BITS;
303 	int32_t ret = 0;
304 
305 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
306 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
307 		return PSCI_E_NOT_PRESENT;
308 	}
309 
310 	/* construct the target CPU # */
311 	target_cpu += (target_cluster << 1U);
312 
313 	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
314 	if (ret < 0) {
315 		return PSCI_E_DENIED;
316 	}
317 
318 	return PSCI_E_SUCCESS;
319 }
320 
321 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
322 {
323 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
324 
325 	/*
326 	 * Reset power state info for CPUs when onlining, we set
327 	 * deepest power when offlining a core but that may not be
328 	 * requested by non-secure sw which controls idle states. It
329 	 * will re-init this info from non-secure software when the
330 	 * core come online.
331 	 */
332 
333 	/*
334 	 * Check if we are exiting from deep sleep and restore SE
335 	 * context if we are.
336 	 */
337 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
338 		/* Init SMMU */
339 		tegra_smmu_init();
340 
341 		/* Resume SE, RNG1 and PKA1 */
342 		tegra_se_resume();
343 
344 		/*
345 		 * Reset power state info for the last core doing SC7
346 		 * entry and exit, we set deepest power state as CC7
347 		 * and SC7 for SC7 entry which may not be requested by
348 		 * non-secure SW which controls idle states.
349 		 */
350 	}
351 
352 	return PSCI_E_SUCCESS;
353 }
354 
355 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
356 {
357 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
358 	int32_t ret = 0;
359 
360 	(void)target_state;
361 
362 	/* Disable Denver's DCO operations */
363 	if (impl == DENVER_IMPL) {
364 		denver_disable_dco();
365 	}
366 
367 	/* Turn off CPU */
368 	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
369 			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
370 	assert(ret == 0);
371 
372 	return PSCI_E_SUCCESS;
373 }
374 
375 __dead2 void tegra_soc_prepare_system_off(void)
376 {
377 	/* System power off */
378 
379 	/* SC8 */
380 
381 	wfi();
382 
383 	/* wait for the system to power down */
384 	for (;;) {
385 		;
386 	}
387 }
388 
389 int32_t tegra_soc_prepare_system_reset(void)
390 {
391 	return PSCI_E_SUCCESS;
392 }
393