xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision 665e71b8ea28162ec7737c1411bca3ea89e5957e)
1 /*
2  * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <denver.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/psci/psci.h>
19 #include <mce.h>
20 #include <mce_private.h>
21 #include <plat/common/platform.h>
22 #include <se.h>
23 #include <smmu.h>
24 #include <t194_nvg.h>
25 #include <tegra194_private.h>
26 #include <tegra_platform.h>
27 #include <tegra_private.h>
28 
29 extern uint32_t __tegra194_cpu_reset_handler_data,
30 		__tegra194_cpu_reset_handler_end;
31 
32 /* TZDRAM offset for saving SMMU context */
33 #define TEGRA194_SMMU_CTX_OFFSET	16U
34 
35 /* state id mask */
36 #define TEGRA194_STATE_ID_MASK		0xFU
37 /* constants to get power state's wake time */
38 #define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
39 #define TEGRA194_WAKE_TIME_SHIFT	4U
40 /* default core wake mask for CPU_SUSPEND */
41 #define TEGRA194_CORE_WAKE_MASK		0x180cU
42 
43 static struct t19x_psci_percpu_data {
44 	uint32_t wake_time;
45 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
46 
47 int32_t tegra_soc_validate_power_state(uint32_t power_state,
48 					psci_power_state_t *req_state)
49 {
50 	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
51 			   TEGRA194_STATE_ID_MASK;
52 	uint32_t cpu = plat_my_core_pos();
53 	int32_t ret = PSCI_E_SUCCESS;
54 
55 	/* save the core wake time (in TSC ticks)*/
56 	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
57 			<< TEGRA194_WAKE_TIME_SHIFT;
58 
59 	/*
60 	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
61 	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
62 	 * which is called with caches disabled. It is possible to read a stale
63 	 * value from DRAM in that function, because the L2 cache is not flushed
64 	 * unless the cluster is entering CC6/CC7.
65 	 */
66 	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
67 			sizeof(t19x_percpu_data[cpu]));
68 
69 	/* Sanity check the requested state id */
70 	switch (state_id) {
71 	case PSTATE_ID_CORE_IDLE:
72 
73 		/* Core idle request */
74 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
75 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
76 		break;
77 
78 	case PSTATE_ID_CORE_POWERDN:
79 
80 		/* Core powerdown request */
81 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
82 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
83 
84 		break;
85 
86 	default:
87 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
88 		ret = PSCI_E_INVALID_PARAMS;
89 		break;
90 	}
91 
92 	return ret;
93 }
94 
95 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
96 {
97 	uint32_t cpu = plat_my_core_pos();
98 	mce_cstate_info_t cstate_info = { 0 };
99 
100 	/* Program default wake mask */
101 	cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
102 	cstate_info.update_wake_mask = 1;
103 	mce_update_cstate_info(&cstate_info);
104 
105 	/* Enter CPU idle */
106 	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
107 				  (uint64_t)TEGRA_NVG_CORE_C6,
108 				  t19x_percpu_data[cpu].wake_time,
109 				  0U);
110 
111 	return PSCI_E_SUCCESS;
112 }
113 
114 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
115 {
116 	const plat_local_state_t *pwr_domain_state;
117 	uint8_t stateid_afflvl0, stateid_afflvl2;
118 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
119 	uint64_t smmu_ctx_base;
120 	uint32_t val;
121 	mce_cstate_info_t sc7_cstate_info = {
122 		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
123 		.ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
124 		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
125 		.system_state_force = 1U,
126 		.update_wake_mask = 1U,
127 	};
128 	uint32_t cpu = plat_my_core_pos();
129 	int32_t ret = 0;
130 
131 	/* get the state ID */
132 	pwr_domain_state = target_state->pwr_domain_state;
133 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
134 		TEGRA194_STATE_ID_MASK;
135 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
136 		TEGRA194_STATE_ID_MASK;
137 
138 	if ((stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
139 
140 		/* Enter CPU powerdown */
141 		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
142 					  (uint64_t)TEGRA_NVG_CORE_C7,
143 					  t19x_percpu_data[cpu].wake_time,
144 					  0U);
145 
146 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
147 
148 		/* save 'Secure Boot' Processor Feature Config Register */
149 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
150 		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
151 
152 		/* save SMMU context */
153 		smmu_ctx_base = params_from_bl2->tzdram_base +
154 				tegra194_get_smmu_ctx_offset();
155 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
156 
157 		/*
158 		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
159 		 * since VDK does not support atomic se ctx save
160 		 */
161 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
162 			ret = tegra_se_suspend();
163 			assert(ret == 0);
164 		}
165 
166 		/* Prepare for system suspend */
167 		mce_update_cstate_info(&sc7_cstate_info);
168 
169 		do {
170 			val = (uint32_t)mce_command_handler(
171 					(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
172 					(uint32_t)TEGRA_NVG_CORE_C7,
173 					MCE_CORE_SLEEP_TIME_INFINITE,
174 					0U);
175 		} while (val == 0U);
176 
177 		/* Instruct the MCE to enter system suspend state */
178 		ret = mce_command_handler(
179 				(uint64_t)MCE_CMD_ENTER_CSTATE,
180 				(uint64_t)TEGRA_NVG_CORE_C7,
181 				MCE_CORE_SLEEP_TIME_INFINITE,
182 				0U);
183 		assert(ret == 0);
184 
185 		/* set system suspend state for house-keeping */
186 		tegra194_set_system_suspend_entry();
187 	} else {
188 		; /* do nothing */
189 	}
190 
191 	return PSCI_E_SUCCESS;
192 }
193 
194 /*******************************************************************************
195  * Helper function to check if this is the last ON CPU in the cluster
196  ******************************************************************************/
197 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
198 			uint32_t ncpu)
199 {
200 	plat_local_state_t target;
201 	bool last_on_cpu = true;
202 	uint32_t num_cpus = ncpu, pos = 0;
203 
204 	do {
205 		target = states[pos];
206 		if (target != PLAT_MAX_OFF_STATE) {
207 			last_on_cpu = false;
208 		}
209 		--num_cpus;
210 		pos++;
211 	} while (num_cpus != 0U);
212 
213 	return last_on_cpu;
214 }
215 
216 /*******************************************************************************
217  * Helper function to get target power state for the cluster
218  ******************************************************************************/
219 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
220 			uint32_t ncpu)
221 {
222 	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
223 	plat_local_state_t target = states[core_pos];
224 	mce_cstate_info_t cstate_info = { 0 };
225 
226 	/* CPU suspend */
227 	if (target == PSTATE_ID_CORE_POWERDN) {
228 
229 		/* Program default wake mask */
230 		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
231 		cstate_info.update_wake_mask = 1;
232 		mce_update_cstate_info(&cstate_info);
233 	}
234 
235 	/* CPU off */
236 	if (target == PLAT_MAX_OFF_STATE) {
237 
238 		/* Enable cluster powerdn from last CPU in the cluster */
239 		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
240 
241 			/* Enable CC6 state and turn off wake mask */
242 			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
243 			cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
244 			cstate_info.system_state_force = 1;
245 			cstate_info.update_wake_mask = 1U;
246 			mce_update_cstate_info(&cstate_info);
247 
248 		} else {
249 
250 			/* Turn off wake_mask */
251 			cstate_info.update_wake_mask = 1U;
252 			mce_update_cstate_info(&cstate_info);
253 			target = PSCI_LOCAL_STATE_RUN;
254 		}
255 	}
256 
257 	return target;
258 }
259 
260 /*******************************************************************************
261  * Platform handler to calculate the proper target power level at the
262  * specified affinity level
263  ******************************************************************************/
264 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
265 					     const plat_local_state_t *states,
266 					     uint32_t ncpu)
267 {
268 	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
269 	uint32_t cpu = plat_my_core_pos();
270 
271 	/* System Suspend */
272 	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
273 		target = PSTATE_ID_SOC_POWERDN;
274 	}
275 
276 	/* CPU off, CPU suspend */
277 	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
278 		target = tegra_get_afflvl1_pwr_state(states, ncpu);
279 	}
280 
281 	/* target cluster/system state */
282 	return target;
283 }
284 
285 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
286 {
287 	const plat_local_state_t *pwr_domain_state =
288 		target_state->pwr_domain_state;
289 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
290 	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
291 		TEGRA194_STATE_ID_MASK;
292 	uint64_t val;
293 
294 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
295 		/*
296 		 * The TZRAM loses power when we enter system suspend. To
297 		 * allow graceful exit from system suspend, we need to copy
298 		 * BL3-1 over to TZDRAM.
299 		 */
300 		val = params_from_bl2->tzdram_base +
301 		      tegra194_get_cpu_reset_handler_size();
302 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
303 		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
304 	}
305 
306 	return PSCI_E_SUCCESS;
307 }
308 
309 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
310 {
311 	return PSCI_E_NOT_SUPPORTED;
312 }
313 
314 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
315 {
316 	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
317 	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
318 			MPIDR_AFFINITY_BITS;
319 	int32_t ret = 0;
320 
321 	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
322 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
323 		return PSCI_E_NOT_PRESENT;
324 	}
325 
326 	/* construct the target CPU # */
327 	target_cpu += (target_cluster << 1U);
328 
329 	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
330 	if (ret < 0) {
331 		return PSCI_E_DENIED;
332 	}
333 
334 	return PSCI_E_SUCCESS;
335 }
336 
337 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
338 {
339 	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
340 
341 	/*
342 	 * Reset power state info for CPUs when onlining, we set
343 	 * deepest power when offlining a core but that may not be
344 	 * requested by non-secure sw which controls idle states. It
345 	 * will re-init this info from non-secure software when the
346 	 * core come online.
347 	 */
348 
349 	/*
350 	 * Check if we are exiting from deep sleep and restore SE
351 	 * context if we are.
352 	 */
353 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
354 
355 #if ENABLE_STRICT_CHECKING_MODE
356 		/*
357 		 * Enable strict checking after programming the GSC for
358 		 * enabling TZSRAM and TZDRAM
359 		 */
360 		mce_enable_strict_checking();
361 #endif
362 
363 		/* Init SMMU */
364 		tegra_smmu_init();
365 
366 		/* Resume SE, RNG1 and PKA1 */
367 		tegra_se_resume();
368 
369 		/*
370 		 * Program XUSB STREAMIDs
371 		 * ======================
372 		 * T19x XUSB has support for XUSB virtualization. It will
373 		 * have one physical function (PF) and four Virtual functions
374 		 * (VF)
375 		 *
376 		 * There were below two SIDs for XUSB until T186.
377 		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
378 		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
379 		 *
380 		 * We have below four new SIDs added for VF(s)
381 		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
382 		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
383 		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
384 		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
385 		 *
386 		 * When virtualization is enabled then we have to disable SID
387 		 * override and program above SIDs in below newly added SID
388 		 * registers in XUSB PADCTL MMIO space. These registers are
389 		 * TZ protected and so need to be done in ATF.
390 		 *
391 		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
392 		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
393 		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
394 		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
395 		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
396 		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
397 		 *
398 		 * This change disables SID override and programs XUSB SIDs
399 		 * in above registers to support both virtualization and
400 		 * non-virtualization platforms
401 		 */
402 		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
403 
404 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
405 				XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
406 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
407 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
408 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
409 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
410 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
411 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
412 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
413 				XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
414 			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
415 				XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
416 		}
417 
418 		/*
419 		 * Reset power state info for the last core doing SC7
420 		 * entry and exit, we set deepest power state as CC7
421 		 * and SC7 for SC7 entry which may not be requested by
422 		 * non-secure SW which controls idle states.
423 		 */
424 	}
425 
426 	return PSCI_E_SUCCESS;
427 }
428 
429 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
430 {
431 	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
432 	int32_t ret = 0;
433 
434 	(void)target_state;
435 
436 	/* Disable Denver's DCO operations */
437 	if (impl == DENVER_IMPL) {
438 		denver_disable_dco();
439 	}
440 
441 	/* Turn off CPU */
442 	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
443 			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
444 	assert(ret == 0);
445 
446 	return PSCI_E_SUCCESS;
447 }
448 
449 __dead2 void tegra_soc_prepare_system_off(void)
450 {
451 	/* System power off */
452 	mce_system_shutdown();
453 
454 	wfi();
455 
456 	/* wait for the system to power down */
457 	for (;;) {
458 		;
459 	}
460 }
461 
462 int32_t tegra_soc_prepare_system_reset(void)
463 {
464 	/* System reboot */
465 	mce_system_reboot();
466 
467 	return PSCI_E_SUCCESS;
468 }
469