xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision 61cbd41d7914032d3df1e49c1c1efbe2f9cb4c39)
1 /*
2  * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <mce.h>
20 #include <mce_private.h>
21 #include <plat/common/platform.h>
22 #include <se.h>
23 #include <smmu.h>
24 #include <t194_nvg.h>
25 #include <tegra194_private.h>
26 #include <tegra_platform.h>
27 #include <tegra_private.h>
28 
29 extern uint32_t __tegra194_cpu_reset_handler_data,
30 		__tegra194_cpu_reset_handler_end;
31 
32 /* TZDRAM offset for saving SMMU context */
33 #define TEGRA194_SMMU_CTX_OFFSET	16U
34 
35 /* state id mask */
36 #define TEGRA194_STATE_ID_MASK		0xFU
37 /* constants to get power state's wake time */
38 #define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
39 #define TEGRA194_WAKE_TIME_SHIFT	4U
40 /* default core wake mask for CPU_SUSPEND */
41 #define TEGRA194_CORE_WAKE_MASK		0x180cU
42 
43 static struct t19x_psci_percpu_data {
44 	uint32_t wake_time;
45 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
46 
47 /*
48  * tegra_fake_system_suspend acts as a boolean var controlling whether
49  * we are going to take fake system suspend code or normal system suspend code
50  * path. This variable is set inside the sip call handlers, when the kernel
51  * requests an SIP call to set the suspend debug flags.
52  */
53 bool tegra_fake_system_suspend;
54 
55 int32_t tegra_soc_validate_power_state(uint32_t power_state,
56 					psci_power_state_t *req_state)
57 {
58 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
59 			   TEGRA194_STATE_ID_MASK;
60 	uint32_t cpu = plat_my_core_pos();
61 	int32_t ret = PSCI_E_SUCCESS;
62 
63 	/* save the core wake time (in TSC ticks)*/
64 	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
65 			<< TEGRA194_WAKE_TIME_SHIFT;
66 
67 	/*
68 	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
69 	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
70 	 * which is called with caches disabled. It is possible to read a stale
71 	 * value from DRAM in that function, because the L2 cache is not flushed
72 	 * unless the cluster is entering CC6/CC7.
73 	 */
74 	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
75 			sizeof(t19x_percpu_data[cpu]));
76 
77 	/* Sanity check the requested state id */
78 	switch (state_id) {
79 	case PSTATE_ID_CORE_IDLE:
80 
81 		/* Core idle request */
82 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
83 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
84 		break;
85 
86 	case PSTATE_ID_CORE_POWERDN:
87 
88 		/* Core powerdown request */
89 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
90 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
91 
92 		break;
93 
94 	default:
95 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
96 		ret = PSCI_E_INVALID_PARAMS;
97 		break;
98 	}
99 
100 	return ret;
101 }
102 
103 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
104 {
105 	uint32_t cpu = plat_my_core_pos();
106 	mce_cstate_info_t cstate_info = { 0 };
107 
108 	/* Program default wake mask */
109 	cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
110 	cstate_info.update_wake_mask = 1;
111 	mce_update_cstate_info(&cstate_info);
112 
113 	/* Enter CPU idle */
114 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
115 				  (uint64_t)TEGRA_NVG_CORE_C6,
116 				  t19x_percpu_data[cpu].wake_time,
117 				  0U);
118 
119 	return PSCI_E_SUCCESS;
120 }
121 
122 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
123 {
124 	const plat_local_state_t *pwr_domain_state;
125 	uint8_t stateid_afflvl0, stateid_afflvl2;
126 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
127 	uint64_t smmu_ctx_base;
128 	uint32_t val;
129 	mce_cstate_info_t sc7_cstate_info = {
130 		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
131 		.ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
132 		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
133 		.system_state_force = 1U,
134 		.update_wake_mask = 1U,
135 	};
136 	uint32_t cpu = plat_my_core_pos();
137 	int32_t ret = 0;
138 
139 	/* get the state ID */
140 	pwr_domain_state = target_state->pwr_domain_state;
141 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
142 		TEGRA194_STATE_ID_MASK;
143 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
144 		TEGRA194_STATE_ID_MASK;
145 
146 	if ((stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
147 
148 		/* Enter CPU powerdown */
149 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
150 					  (uint64_t)TEGRA_NVG_CORE_C7,
151 					  t19x_percpu_data[cpu].wake_time,
152 					  0U);
153 
154 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
155 
156 		/* save 'Secure Boot' Processor Feature Config Register */
157 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
158 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
159 
160 		/* save SMMU context */
161 		smmu_ctx_base = params_from_bl2->tzdram_base +
162 				tegra194_get_smmu_ctx_offset();
163 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
164 
165 		/*
166 		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
167 		 * since VDK does not support atomic se ctx save
168 		 */
169 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
170 			ret = tegra_se_suspend();
171 			assert(ret == 0);
172 		}
173 
174 		if (!tegra_fake_system_suspend) {
175 
176 			/* Prepare for system suspend */
177 			mce_update_cstate_info(&sc7_cstate_info);
178 
179 			do {
180 				val = (uint32_t)mce_command_handler(
181 						(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
182 						(uint32_t)TEGRA_NVG_CORE_C7,
183 						MCE_CORE_SLEEP_TIME_INFINITE,
184 						0U);
185 			} while (val == 0U);
186 
187 			/* Instruct the MCE to enter system suspend state */
188 			ret = mce_command_handler(
189 					(uint64_t)MCE_CMD_ENTER_CSTATE,
190 					(uint64_t)TEGRA_NVG_CORE_C7,
191 					MCE_CORE_SLEEP_TIME_INFINITE,
192 					0U);
193 			assert(ret == 0);
194 
195 			/* set system suspend state for house-keeping */
196 			tegra194_set_system_suspend_entry();
197 		}
198 	} else {
199 		; /* do nothing */
200 	}
201 
202 	return PSCI_E_SUCCESS;
203 }
204 
205 /*******************************************************************************
206  * Helper function to check if this is the last ON CPU in the cluster
207  ******************************************************************************/
208 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
209 			uint32_t ncpu)
210 {
211 	plat_local_state_t target;
212 	bool last_on_cpu = true;
213 	uint32_t num_cpus = ncpu, pos = 0;
214 
215 	do {
216 		target = states[pos];
217 		if (target != PLAT_MAX_OFF_STATE) {
218 			last_on_cpu = false;
219 		}
220 		--num_cpus;
221 		pos++;
222 	} while (num_cpus != 0U);
223 
224 	return last_on_cpu;
225 }
226 
227 /*******************************************************************************
228  * Helper function to get target power state for the cluster
229  ******************************************************************************/
230 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
231 			uint32_t ncpu)
232 {
233 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
234 	plat_local_state_t target = states[core_pos];
235 	mce_cstate_info_t cstate_info = { 0 };
236 
237 	/* CPU suspend */
238 	if (target == PSTATE_ID_CORE_POWERDN) {
239 
240 		/* Program default wake mask */
241 		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
242 		cstate_info.update_wake_mask = 1;
243 		mce_update_cstate_info(&cstate_info);
244 	}
245 
246 	/* CPU off */
247 	if (target == PLAT_MAX_OFF_STATE) {
248 
249 		/* Enable cluster powerdn from last CPU in the cluster */
250 		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
251 
252 			/* Enable CC6 state and turn off wake mask */
253 			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
254 			cstate_info.update_wake_mask = 1U;
255 			mce_update_cstate_info(&cstate_info);
256 
257 		} else {
258 
259 			/* Turn off wake_mask */
260 			cstate_info.update_wake_mask = 1U;
261 			mce_update_cstate_info(&cstate_info);
262 			target = PSCI_LOCAL_STATE_RUN;
263 		}
264 	}
265 
266 	return target;
267 }
268 
269 /*******************************************************************************
270  * Platform handler to calculate the proper target power level at the
271  * specified affinity level
272  ******************************************************************************/
273 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
274 					     const plat_local_state_t *states,
275 					     uint32_t ncpu)
276 {
277 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
278 	uint32_t cpu = plat_my_core_pos();
279 
280 	/* System Suspend */
281 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
282 		target = PSTATE_ID_SOC_POWERDN;
283 	}
284 
285 	/* CPU off, CPU suspend */
286 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
287 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
288 	}
289 
290 	/* target cluster/system state */
291 	return target;
292 }
293 
294 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
295 {
296 	const plat_local_state_t *pwr_domain_state =
297 		target_state->pwr_domain_state;
298 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
299 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
300 		TEGRA194_STATE_ID_MASK;
301 	uint64_t val;
302 	u_register_t ns_sctlr_el1;
303 
304 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
305 		/*
306 		 * The TZRAM loses power when we enter system suspend. To
307 		 * allow graceful exit from system suspend, we need to copy
308 		 * BL3-1 over to TZDRAM.
309 		 */
310 		val = params_from_bl2->tzdram_base +
311 		      tegra194_get_cpu_reset_handler_size();
312 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
313 		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
314 
315 		/*
316 		 * In fake suspend mode, ensure that the loopback procedure
317 		 * towards system suspend exit is started, instead of calling
318 		 * WFI. This is done by disabling both MMU's of EL1 & El3
319 		 * and calling tegra_secure_entrypoint().
320 		 */
321 		if (tegra_fake_system_suspend) {
322 
323 			/*
324 			 * Disable EL1's MMU.
325 			 */
326 			ns_sctlr_el1 = read_sctlr_el1();
327 			ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
328 			write_sctlr_el1(ns_sctlr_el1);
329 
330 			/*
331 			 * Disable MMU to power up the CPU in a "clean"
332 			 * state
333 			 */
334 			disable_mmu_el3();
335 			tegra_secure_entrypoint();
336 			panic();
337 		}
338 	}
339 
340 	return PSCI_E_SUCCESS;
341 }
342 
343 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
344 {
345 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
346 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
347 			MPIDR_AFFINITY_BITS;
348 	int32_t ret = 0;
349 
350 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
351 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
352 		return PSCI_E_NOT_PRESENT;
353 	}
354 
355 	/* construct the target CPU # */
356 	target_cpu += (target_cluster << 1U);
357 
358 	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
359 	if (ret < 0) {
360 		return PSCI_E_DENIED;
361 	}
362 
363 	return PSCI_E_SUCCESS;
364 }
365 
366 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
367 {
368 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
369 
370 	/*
371 	 * Reset power state info for CPUs when onlining, we set
372 	 * deepest power when offlining a core but that may not be
373 	 * requested by non-secure sw which controls idle states. It
374 	 * will re-init this info from non-secure software when the
375 	 * core come online.
376 	 */
377 
378 	/*
379 	 * Check if we are exiting from deep sleep and restore SE
380 	 * context if we are.
381 	 */
382 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
383 
384 #if ENABLE_STRICT_CHECKING_MODE
385 		/*
386 		 * Enable strict checking after programming the GSC for
387 		 * enabling TZSRAM and TZDRAM
388 		 */
389 		mce_enable_strict_checking();
390 #endif
391 
392 		/* Init SMMU */
393 		tegra_smmu_init();
394 
395 		/* Resume SE, RNG1 and PKA1 */
396 		tegra_se_resume();
397 
398 		/*
399 		 * Program XUSB STREAMIDs
400 		 * ======================
401 		 * T19x XUSB has support for XUSB virtualization. It will
402 		 * have one physical function (PF) and four Virtual functions
403 		 * (VF)
404 		 *
405 		 * There were below two SIDs for XUSB until T186.
406 		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
407 		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
408 		 *
409 		 * We have below four new SIDs added for VF(s)
410 		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
411 		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
412 		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
413 		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
414 		 *
415 		 * When virtualization is enabled then we have to disable SID
416 		 * override and program above SIDs in below newly added SID
417 		 * registers in XUSB PADCTL MMIO space. These registers are
418 		 * TZ protected and so need to be done in ATF.
419 		 *
420 		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
421 		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
422 		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
423 		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
424 		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
425 		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
426 		 *
427 		 * This change disables SID override and programs XUSB SIDs
428 		 * in above registers to support both virtualization and
429 		 * non-virtualization platforms
430 		 */
431 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
432 			XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
433 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
434 			XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
435 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
436 			XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
437 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
438 			XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
439 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
440 			XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
441 		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
442 			XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
443 
444 		/*
445 		 * Reset power state info for the last core doing SC7
446 		 * entry and exit, we set deepest power state as CC7
447 		 * and SC7 for SC7 entry which may not be requested by
448 		 * non-secure SW which controls idle states.
449 		 */
450 	}
451 
452 	return PSCI_E_SUCCESS;
453 }
454 
455 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
456 {
457 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
458 	int32_t ret = 0;
459 
460 	(void)target_state;
461 
462 	/* Disable Denver's DCO operations */
463 	if (impl == DENVER_IMPL) {
464 		denver_disable_dco();
465 	}
466 
467 	/* Turn off CPU */
468 	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
469 			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
470 	assert(ret == 0);
471 
472 	return PSCI_E_SUCCESS;
473 }
474 
475 __dead2 void tegra_soc_prepare_system_off(void)
476 {
477 	/* System power off */
478 	mce_system_shutdown();
479 
480 	wfi();
481 
482 	/* wait for the system to power down */
483 	for (;;) {
484 		;
485 	}
486 }
487 
488 int32_t tegra_soc_prepare_system_reset(void)
489 {
490 	/* System reboot */
491 	mce_system_reboot();
492 
493 	return PSCI_E_SUCCESS;
494 }
495