xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision e904448006b03385c69802d080c4d568b914d828)
1 /*
2  * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <bpmp_ipc.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <context.h>
17 #include <drivers/delay_timer.h>
18 #include <denver.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/psci/psci.h>
21 #include <mce.h>
22 #include <mce_private.h>
23 #include <plat/common/platform.h>
24 #include <se.h>
25 #include <smmu.h>
26 #include <t194_nvg.h>
27 #include <tegra194_private.h>
28 #include <tegra_platform.h>
29 #include <tegra_private.h>
30 
31 extern uint32_t __tegra194_cpu_reset_handler_data,
32 		__tegra194_cpu_reset_handler_end;
33 
34 /* TZDRAM offset for saving SMMU context */
35 #define TEGRA194_SMMU_CTX_OFFSET	16U
36 
37 /* state id mask */
38 #define TEGRA194_STATE_ID_MASK		0xFU
39 /* constants to get power state's wake time */
40 #define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
41 #define TEGRA194_WAKE_TIME_SHIFT	4U
42 /* default core wake mask for CPU_SUSPEND */
43 #define TEGRA194_CORE_WAKE_MASK		0x180cU
44 
45 static struct t19x_psci_percpu_data {
46 	uint32_t wake_time;
47 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
48 
49 int32_t tegra_soc_validate_power_state(uint32_t power_state,
50 					psci_power_state_t *req_state)
51 {
52 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
53 			   TEGRA194_STATE_ID_MASK;
54 	uint32_t cpu = plat_my_core_pos();
55 	int32_t ret = PSCI_E_SUCCESS;
56 
57 	/* save the core wake time (in TSC ticks)*/
58 	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
59 			<< TEGRA194_WAKE_TIME_SHIFT;
60 
61 	/*
62 	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
63 	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
64 	 * which is called with caches disabled. It is possible to read a stale
65 	 * value from DRAM in that function, because the L2 cache is not flushed
66 	 * unless the cluster is entering CC6/CC7.
67 	 */
68 	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
69 			sizeof(t19x_percpu_data[cpu]));
70 
71 	/* Sanity check the requested state id */
72 	switch (state_id) {
73 	case PSTATE_ID_CORE_IDLE:
74 
75 		/* Core idle request */
76 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
77 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
78 		break;
79 
80 	case PSTATE_ID_CORE_POWERDN:
81 
82 		/* Core powerdown request */
83 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
84 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
85 
86 		break;
87 
88 	default:
89 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
90 		ret = PSCI_E_INVALID_PARAMS;
91 		break;
92 	}
93 
94 	return ret;
95 }
96 
97 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
98 {
99 	uint32_t cpu = plat_my_core_pos();
100 	mce_cstate_info_t cstate_info = { 0 };
101 
102 	/* Program default wake mask */
103 	cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
104 	cstate_info.update_wake_mask = 1;
105 	mce_update_cstate_info(&cstate_info);
106 
107 	/* Enter CPU idle */
108 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
109 				  (uint64_t)TEGRA_NVG_CORE_C6,
110 				  t19x_percpu_data[cpu].wake_time,
111 				  0U);
112 
113 	return PSCI_E_SUCCESS;
114 }
115 
116 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
117 {
118 	const plat_local_state_t *pwr_domain_state;
119 	uint8_t stateid_afflvl0, stateid_afflvl2;
120 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
121 	uint64_t smmu_ctx_base;
122 	uint32_t val;
123 	mce_cstate_info_t sc7_cstate_info = {
124 		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
125 		.ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
126 		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
127 		.system_state_force = 1U,
128 		.update_wake_mask = 1U,
129 	};
130 	uint32_t cpu = plat_my_core_pos();
131 	int32_t ret = 0;
132 
133 	/* get the state ID */
134 	pwr_domain_state = target_state->pwr_domain_state;
135 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
136 		TEGRA194_STATE_ID_MASK;
137 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
138 		TEGRA194_STATE_ID_MASK;
139 
140 	if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
141 
142 		/* Enter CPU powerdown */
143 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
144 					  (uint64_t)TEGRA_NVG_CORE_C7,
145 					  t19x_percpu_data[cpu].wake_time,
146 					  0U);
147 
148 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
149 
150 		/* save 'Secure Boot' Processor Feature Config Register */
151 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
152 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
153 
154 		/* save SMMU context */
155 		smmu_ctx_base = params_from_bl2->tzdram_base +
156 				tegra194_get_smmu_ctx_offset();
157 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
158 
159 		/*
160 		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
161 		 * since VDK does not support atomic se ctx save
162 		 */
163 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
164 			ret = tegra_se_suspend();
165 			assert(ret == 0);
166 		}
167 
168 		/* Prepare for system suspend */
169 		mce_update_cstate_info(&sc7_cstate_info);
170 
171 		do {
172 			val = (uint32_t)mce_command_handler(
173 					(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
174 					(uint32_t)TEGRA_NVG_CORE_C7,
175 					MCE_CORE_SLEEP_TIME_INFINITE,
176 					0U);
177 		} while (val == 0U);
178 
179 		/* Instruct the MCE to enter system suspend state */
180 		ret = mce_command_handler(
181 				(uint64_t)MCE_CMD_ENTER_CSTATE,
182 				(uint64_t)TEGRA_NVG_CORE_C7,
183 				MCE_CORE_SLEEP_TIME_INFINITE,
184 				0U);
185 		assert(ret == 0);
186 
187 		/* set system suspend state for house-keeping */
188 		tegra194_set_system_suspend_entry();
189 	} else {
190 		; /* do nothing */
191 	}
192 
193 	return PSCI_E_SUCCESS;
194 }
195 
196 /*******************************************************************************
197  * Helper function to check if this is the last ON CPU in the cluster
198  ******************************************************************************/
199 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
200 			uint32_t ncpu)
201 {
202 	plat_local_state_t target;
203 	bool last_on_cpu = true;
204 	uint32_t num_cpus = ncpu, pos = 0;
205 
206 	do {
207 		target = states[pos];
208 		if (target != PLAT_MAX_OFF_STATE) {
209 			last_on_cpu = false;
210 		}
211 		--num_cpus;
212 		pos++;
213 	} while (num_cpus != 0U);
214 
215 	return last_on_cpu;
216 }
217 
218 /*******************************************************************************
219  * Helper function to get target power state for the cluster
220  ******************************************************************************/
221 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
222 			uint32_t ncpu)
223 {
224 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
225 	plat_local_state_t target = states[core_pos];
226 	mce_cstate_info_t cstate_info = { 0 };
227 
228 	/* CPU suspend */
229 	if (target == PSTATE_ID_CORE_POWERDN) {
230 
231 		/* Program default wake mask */
232 		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
233 		cstate_info.update_wake_mask = 1;
234 		mce_update_cstate_info(&cstate_info);
235 	}
236 
237 	/* CPU off */
238 	if (target == PLAT_MAX_OFF_STATE) {
239 
240 		/* Enable cluster powerdn from last CPU in the cluster */
241 		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
242 
243 			/* Enable CC6 state and turn off wake mask */
244 			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
245 			cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
246 			cstate_info.system_state_force = 1;
247 			cstate_info.update_wake_mask = 1U;
248 			mce_update_cstate_info(&cstate_info);
249 
250 		} else {
251 
252 			/* Turn off wake_mask */
253 			cstate_info.update_wake_mask = 1U;
254 			mce_update_cstate_info(&cstate_info);
255 			target = PSCI_LOCAL_STATE_RUN;
256 		}
257 	}
258 
259 	return target;
260 }
261 
262 /*******************************************************************************
263  * Platform handler to calculate the proper target power level at the
264  * specified affinity level
265  ******************************************************************************/
266 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
267 					     const plat_local_state_t *states,
268 					     uint32_t ncpu)
269 {
270 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
271 	uint32_t cpu = plat_my_core_pos();
272 
273 	/* System Suspend */
274 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
275 		target = PSTATE_ID_SOC_POWERDN;
276 	}
277 
278 	/* CPU off, CPU suspend */
279 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
280 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
281 	}
282 
283 	/* target cluster/system state */
284 	return target;
285 }
286 
287 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
288 {
289 	const plat_local_state_t *pwr_domain_state =
290 		target_state->pwr_domain_state;
291 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
292 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
293 		TEGRA194_STATE_ID_MASK;
294 	uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
295 	uint64_t val;
296 	int32_t ret = PSCI_E_SUCCESS;
297 
298 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
299 		val = params_from_bl2->tzdram_base +
300 		      tegra194_get_cpu_reset_handler_size();
301 
302 		/* initialise communication channel with BPMP */
303 		ret = tegra_bpmp_ipc_init();
304 		assert(ret == 0);
305 
306 		/* Enable SE clock before SE context save */
307 		ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
308 		assert(ret == 0);
309 
310 		/*
311 		 * It is very unlikely that the BL31 image would be
312 		 * bigger than 2^32 bytes
313 		 */
314 		assert(src_len_in_bytes < UINT32_MAX);
315 
316 		if (tegra_se_calculate_save_sha256(BL31_BASE,
317 					(uint32_t)src_len_in_bytes) != 0) {
318 			ERROR("Hash calculation failed. Reboot\n");
319 			(void)tegra_soc_prepare_system_reset();
320 		}
321 
322 		/*
323 		 * The TZRAM loses power when we enter system suspend. To
324 		 * allow graceful exit from system suspend, we need to copy
325 		 * BL3-1 over to TZDRAM.
326 		 */
327 		val = params_from_bl2->tzdram_base +
328 		      tegra194_get_cpu_reset_handler_size();
329 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
330 		       src_len_in_bytes);
331 
332 		/* Disable SE clock after SE context save */
333 		ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
334 		assert(ret == 0);
335 	}
336 
337 	return ret;
338 }
339 
340 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
341 {
342 	return PSCI_E_NOT_SUPPORTED;
343 }
344 
345 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
346 {
347 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
348 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
349 			MPIDR_AFFINITY_BITS;
350 	int32_t ret = 0;
351 
352 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
353 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
354 		return PSCI_E_NOT_PRESENT;
355 	}
356 
357 	/* construct the target CPU # */
358 	target_cpu += (target_cluster << 1U);
359 
360 	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
361 	if (ret < 0) {
362 		return PSCI_E_DENIED;
363 	}
364 
365 	return PSCI_E_SUCCESS;
366 }
367 
368 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
369 {
370 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
371 
372 	/*
373 	 * Reset power state info for CPUs when onlining, we set
374 	 * deepest power when offlining a core but that may not be
375 	 * requested by non-secure sw which controls idle states. It
376 	 * will re-init this info from non-secure software when the
377 	 * core come online.
378 	 */
379 
380 	/*
381 	 * Check if we are exiting from deep sleep and restore SE
382 	 * context if we are.
383 	 */
384 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
385 
386 #if ENABLE_STRICT_CHECKING_MODE
387 		/*
388 		 * Enable strict checking after programming the GSC for
389 		 * enabling TZSRAM and TZDRAM
390 		 */
391 		mce_enable_strict_checking();
392 #endif
393 
394 		/* Init SMMU */
395 		tegra_smmu_init();
396 
397 		/* Resume SE, RNG1 and PKA1 */
398 		tegra_se_resume();
399 
400 		/*
401 		 * Program XUSB STREAMIDs
402 		 * ======================
403 		 * T19x XUSB has support for XUSB virtualization. It will
404 		 * have one physical function (PF) and four Virtual functions
405 		 * (VF)
406 		 *
407 		 * There were below two SIDs for XUSB until T186.
408 		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
409 		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
410 		 *
411 		 * We have below four new SIDs added for VF(s)
412 		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
413 		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
414 		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
415 		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
416 		 *
417 		 * When virtualization is enabled then we have to disable SID
418 		 * override and program above SIDs in below newly added SID
419 		 * registers in XUSB PADCTL MMIO space. These registers are
420 		 * TZ protected and so need to be done in ATF.
421 		 *
422 		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
423 		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
424 		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
425 		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
426 		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
427 		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
428 		 *
429 		 * This change disables SID override and programs XUSB SIDs
430 		 * in above registers to support both virtualization and
431 		 * non-virtualization platforms
432 		 */
433 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
434 
435 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
436 				XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
437 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
438 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
439 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
440 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
441 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
442 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
443 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
444 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
445 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
446 				XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
447 		}
448 
449 		/*
450 		 * Reset power state info for the last core doing SC7
451 		 * entry and exit, we set deepest power state as CC7
452 		 * and SC7 for SC7 entry which may not be requested by
453 		 * non-secure SW which controls idle states.
454 		 */
455 	}
456 
457 	return PSCI_E_SUCCESS;
458 }
459 
460 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
461 {
462 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
463 	int32_t ret = 0;
464 
465 	(void)target_state;
466 
467 	/* Disable Denver's DCO operations */
468 	if (impl == DENVER_IMPL) {
469 		denver_disable_dco();
470 	}
471 
472 	/* Turn off CPU */
473 	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
474 			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
475 	assert(ret == 0);
476 
477 	return PSCI_E_SUCCESS;
478 }
479 
480 __dead2 void tegra_soc_prepare_system_off(void)
481 {
482 	/* System power off */
483 	mce_system_shutdown();
484 
485 	wfi();
486 
487 	/* wait for the system to power down */
488 	for (;;) {
489 		;
490 	}
491 }
492 
493 int32_t tegra_soc_prepare_system_reset(void)
494 {
495 	/* System reboot */
496 	mce_system_reboot();
497 
498 	return PSCI_E_SUCCESS;
499 }
500