xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision 1552df5d25944b2bddf42e96acbadca18b3c7c95)
1 /*
2  * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <denver.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 #include <lib/psci/psci.h>
18 #include <plat/common/platform.h>
19 
20 #include <mce.h>
21 #include <smmu.h>
22 #include <t18x_ari.h>
23 #include <tegra_private.h>
24 
25 extern void memcpy16(void *dest, const void *src, unsigned int length);
26 
27 extern void prepare_cpu_pwr_dwn(void);
28 extern void tegra186_cpu_reset_handler(void);
29 extern uint32_t __tegra186_cpu_reset_handler_end,
30 		__tegra186_smmu_context;
31 
32 /* state id mask */
33 #define TEGRA186_STATE_ID_MASK		0xF
34 /* constants to get power state's wake time */
35 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0
36 #define TEGRA186_WAKE_TIME_SHIFT	4
37 /* default core wake mask for CPU_SUSPEND */
38 #define TEGRA186_CORE_WAKE_MASK		0x180c
39 /* context size to save during system suspend */
40 #define TEGRA186_SE_CONTEXT_SIZE	3
41 
42 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
43 static struct t18x_psci_percpu_data {
44 	unsigned int wake_time;
45 } __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
46 
47 /* System power down state */
48 uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
49 
50 int32_t tegra_soc_validate_power_state(unsigned int power_state,
51 					psci_power_state_t *req_state)
52 {
53 	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
54 	int cpu = plat_my_core_pos();
55 
56 	/* save the core wake time (in TSC ticks)*/
57 	percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
58 			<< TEGRA186_WAKE_TIME_SHIFT;
59 
60 	/*
61 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
62 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
63 	 * is called with caches disabled. It is possible to read a stale value
64 	 * from DRAM in that function, because the L2 cache is not flushed
65 	 * unless the cluster is entering CC6/CC7.
66 	 */
67 	clean_dcache_range((uint64_t)&percpu_data[cpu],
68 			sizeof(percpu_data[cpu]));
69 
70 	/* Sanity check the requested state id */
71 	switch (state_id) {
72 	case PSTATE_ID_CORE_IDLE:
73 	case PSTATE_ID_CORE_POWERDN:
74 
75 		/* Core powerdown request */
76 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
77 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
78 
79 		break;
80 
81 	default:
82 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
83 		return PSCI_E_INVALID_PARAMS;
84 	}
85 
86 	return PSCI_E_SUCCESS;
87 }
88 
89 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
90 {
91 	const plat_local_state_t *pwr_domain_state;
92 	unsigned int stateid_afflvl0, stateid_afflvl2;
93 	int cpu = plat_my_core_pos();
94 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
95 	mce_cstate_info_t cstate_info = { 0 };
96 	uint64_t smmu_ctx_base;
97 	uint32_t val;
98 
99 	/* get the state ID */
100 	pwr_domain_state = target_state->pwr_domain_state;
101 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
102 		TEGRA186_STATE_ID_MASK;
103 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
104 		TEGRA186_STATE_ID_MASK;
105 
106 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
107 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
108 
109 		/* Enter CPU idle/powerdown */
110 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
111 			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
112 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
113 				percpu_data[cpu].wake_time, 0);
114 
115 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
116 
117 		/* save SE registers */
118 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
119 				SE_MUTEX_WATCHDOG_NS_LIMIT);
120 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
121 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
122 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
123 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
124 
125 		/* save 'Secure Boot' Processor Feature Config Register */
126 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
127 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
128 
129 		/* save SMMU context to TZDRAM */
130 		smmu_ctx_base = params_from_bl2->tzdram_base +
131 			((uintptr_t)&__tegra186_smmu_context -
132 			 (uintptr_t)tegra186_cpu_reset_handler);
133 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
134 
135 		/* Prepare for system suspend */
136 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
137 		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
138 		cstate_info.system_state_force = 1;
139 		cstate_info.update_wake_mask = 1;
140 		mce_update_cstate_info(&cstate_info);
141 
142 		/* Loop until system suspend is allowed */
143 		do {
144 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
145 					TEGRA_ARI_CORE_C7,
146 					MCE_CORE_SLEEP_TIME_INFINITE,
147 					0);
148 		} while (val == 0);
149 
150 		/* Instruct the MCE to enter system suspend state */
151 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
152 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
153 	}
154 
155 	return PSCI_E_SUCCESS;
156 }
157 
158 /*******************************************************************************
159  * Platform handler to calculate the proper target power level at the
160  * specified affinity level
161  ******************************************************************************/
162 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
163 					     const plat_local_state_t *states,
164 					     unsigned int ncpu)
165 {
166 	plat_local_state_t target = *states;
167 	int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
168 	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
169 	mce_cstate_info_t cstate_info = { 0 };
170 
171 	/* get the power state at this level */
172 	if (lvl == MPIDR_AFFLVL1)
173 		target = *(states + core_pos);
174 	if (lvl == MPIDR_AFFLVL2)
175 		target = *(states + cpu);
176 
177 	/* CPU suspend */
178 	if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
179 
180 		/* Program default wake mask */
181 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
182 		cstate_info.update_wake_mask = 1;
183 		mce_update_cstate_info(&cstate_info);
184 
185 		/* Check if CCx state is allowed. */
186 		ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
187 				TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
188 				0);
189 		if (ret)
190 			return PSTATE_ID_CORE_POWERDN;
191 	}
192 
193 	/* CPU off */
194 	if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
195 
196 		/* find out the number of ON cpus in the cluster */
197 		do {
198 			target = *states++;
199 			if (target != PLAT_MAX_OFF_STATE)
200 				cluster_powerdn = 0;
201 		} while (--ncpu);
202 
203 		/* Enable cluster powerdn from last CPU in the cluster */
204 		if (cluster_powerdn) {
205 
206 			/* Enable CC7 state and turn off wake mask */
207 			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
208 			cstate_info.update_wake_mask = 1;
209 			mce_update_cstate_info(&cstate_info);
210 
211 			/* Check if CCx state is allowed. */
212 			ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
213 						  TEGRA_ARI_CORE_C7,
214 						  MCE_CORE_SLEEP_TIME_INFINITE,
215 						  0);
216 			if (ret)
217 				return PSTATE_ID_CORE_POWERDN;
218 
219 		} else {
220 
221 			/* Turn off wake_mask */
222 			cstate_info.update_wake_mask = 1;
223 			mce_update_cstate_info(&cstate_info);
224 		}
225 	}
226 
227 	/* System Suspend */
228 	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
229 	    (target == PSTATE_ID_SOC_POWERDN))
230 		return PSTATE_ID_SOC_POWERDN;
231 
232 	/* default state */
233 	return PSCI_LOCAL_STATE_RUN;
234 }
235 
236 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
237 {
238 	const plat_local_state_t *pwr_domain_state =
239 		target_state->pwr_domain_state;
240 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
241 	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
242 		TEGRA186_STATE_ID_MASK;
243 	uint64_t val;
244 
245 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
246 		/*
247 		 * The TZRAM loses power when we enter system suspend. To
248 		 * allow graceful exit from system suspend, we need to copy
249 		 * BL3-1 over to TZDRAM.
250 		 */
251 		val = params_from_bl2->tzdram_base +
252 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
253 			 (uintptr_t)tegra186_cpu_reset_handler);
254 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
255 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
256 	}
257 
258 	return PSCI_E_SUCCESS;
259 }
260 
261 int tegra_soc_pwr_domain_on(u_register_t mpidr)
262 {
263 	uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
264 	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
265 			MPIDR_AFFINITY_BITS;
266 
267 	if (target_cluster > MPIDR_AFFLVL1) {
268 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
269 		return PSCI_E_NOT_PRESENT;
270 	}
271 
272 	/* construct the target CPU # */
273 	target_cpu |= (target_cluster << 2);
274 
275 	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
276 
277 	return PSCI_E_SUCCESS;
278 }
279 
280 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
281 {
282 	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
283 	int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
284 	mce_cstate_info_t cstate_info = { 0 };
285 
286 	/*
287 	 * Reset power state info for CPUs when onlining, we set
288 	 * deepest power when offlining a core but that may not be
289 	 * requested by non-secure sw which controls idle states. It
290 	 * will re-init this info from non-secure software when the
291 	 * core come online.
292 	 */
293 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
294 
295 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
296 		cstate_info.update_wake_mask = 1;
297 		mce_update_cstate_info(&cstate_info);
298 	}
299 
300 	/*
301 	 * Check if we are exiting from deep sleep and restore SE
302 	 * context if we are.
303 	 */
304 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
305 
306 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
307 			se_regs[0]);
308 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
309 			se_regs[1]);
310 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
311 			se_regs[2]);
312 
313 		/* Init SMMU */
314 		tegra_smmu_init();
315 
316 		/*
317 		 * Reset power state info for the last core doing SC7
318 		 * entry and exit, we set deepest power state as CC7
319 		 * and SC7 for SC7 entry which may not be requested by
320 		 * non-secure SW which controls idle states.
321 		 */
322 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
323 		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
324 		cstate_info.update_wake_mask = 1;
325 		mce_update_cstate_info(&cstate_info);
326 	}
327 
328 	return PSCI_E_SUCCESS;
329 }
330 
331 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
332 {
333 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
334 
335 	/* Disable Denver's DCO operations */
336 	if (impl == DENVER_IMPL)
337 		denver_disable_dco();
338 
339 	/* Turn off CPU */
340 	(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
341 			MCE_CORE_SLEEP_TIME_INFINITE, 0);
342 
343 	return PSCI_E_SUCCESS;
344 }
345 
346 __dead2 void tegra_soc_prepare_system_off(void)
347 {
348 	mce_cstate_info_t cstate_info = { 0 };
349 	uint32_t val;
350 
351 	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
352 
353 		/* power off the entire system */
354 		mce_enter_ccplex_state(tegra186_system_powerdn_state);
355 
356 	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
357 
358 		/* Prepare for quasi power down */
359 		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
360 		cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
361 		cstate_info.system_state_force = 1;
362 		cstate_info.update_wake_mask = 1;
363 		mce_update_cstate_info(&cstate_info);
364 
365 		/* loop until other CPUs power down */
366 		do {
367 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
368 					TEGRA_ARI_CORE_C7,
369 					MCE_CORE_SLEEP_TIME_INFINITE,
370 					0);
371 		} while (val == 0);
372 
373 		/* Enter quasi power down state */
374 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
375 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
376 
377 		/* disable GICC */
378 		tegra_gic_cpuif_deactivate();
379 
380 		/* power down core */
381 		prepare_cpu_pwr_dwn();
382 
383 		/* flush L1/L2 data caches */
384 		dcsw_op_all(DCCISW);
385 
386 	} else {
387 		ERROR("%s: unsupported power down state (%d)\n", __func__,
388 			tegra186_system_powerdn_state);
389 	}
390 
391 	wfi();
392 
393 	/* wait for the system to power down */
394 	for (;;) {
395 		;
396 	}
397 }
398 
399 int tegra_soc_prepare_system_reset(void)
400 {
401 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
402 
403 	return PSCI_E_SUCCESS;
404 }
405