xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c (revision f9608bc8f8c171f0a2a7c07ed749e552e09839da)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <denver.h>
39 #include <mce.h>
40 #include <psci.h>
41 #include <smmu.h>
42 #include <string.h>
43 #include <t18x_ari.h>
44 #include <tegra_private.h>
45 
46 extern void prepare_cpu_pwr_dwn(void);
47 extern void tegra186_cpu_reset_handler(void);
48 extern uint32_t __tegra186_cpu_reset_handler_data,
49 		__tegra186_cpu_reset_handler_end;
50 
51 /* TZDRAM offset for saving SMMU context */
52 #define TEGRA186_SMMU_CTX_OFFSET	16
53 
54 /* state id mask */
55 #define TEGRA186_STATE_ID_MASK		0xF
56 /* constants to get power state's wake time */
57 #define TEGRA186_WAKE_TIME_MASK		0xFFFFFF
58 #define TEGRA186_WAKE_TIME_SHIFT	4
59 /* default core wake mask for CPU_SUSPEND */
60 #define TEGRA186_CORE_WAKE_MASK		0x180c
61 /* context size to save during system suspend */
62 #define TEGRA186_SE_CONTEXT_SIZE	3
63 
64 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
65 static unsigned int wake_time[PLATFORM_CORE_COUNT];
66 
67 /* System power down state */
68 uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
69 
70 int32_t tegra_soc_validate_power_state(unsigned int power_state,
71 					psci_power_state_t *req_state)
72 {
73 	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
74 	int cpu = read_mpidr() & MPIDR_CPU_MASK;
75 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
76 
77 	if (impl == DENVER_IMPL)
78 		cpu |= 0x4;
79 
80 	wake_time[cpu] = (power_state  >> TEGRA186_WAKE_TIME_SHIFT) &
81 			 TEGRA186_WAKE_TIME_MASK;
82 
83 	/* Sanity check the requested state id */
84 	switch (state_id) {
85 	case PSTATE_ID_CORE_IDLE:
86 	case PSTATE_ID_CORE_POWERDN:
87 		/*
88 		 * Core powerdown request only for afflvl 0
89 		 */
90 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
91 
92 		break;
93 
94 	default:
95 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
96 		return PSCI_E_INVALID_PARAMS;
97 	}
98 
99 	return PSCI_E_SUCCESS;
100 }
101 
102 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
103 {
104 	const plat_local_state_t *pwr_domain_state;
105 	unsigned int stateid_afflvl0, stateid_afflvl2;
106 	int cpu = read_mpidr() & MPIDR_CPU_MASK;
107 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
108 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
109 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
110 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
111 	uint64_t smmu_ctx_base;
112 	uint32_t val;
113 
114 	assert(ctx);
115 	assert(gp_regs);
116 
117 	if (impl == DENVER_IMPL)
118 		cpu |= 0x4;
119 
120 	/* get the state ID */
121 	pwr_domain_state = target_state->pwr_domain_state;
122 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
123 		TEGRA186_STATE_ID_MASK;
124 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
125 		TEGRA186_STATE_ID_MASK;
126 
127 	if (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) {
128 
129 		/* Program default wake mask */
130 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
131 		write_ctx_reg(gp_regs, CTX_GPREG_X5, TEGRA186_CORE_WAKE_MASK);
132 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
133 		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 0, 0, 0);
134 
135 		/* Prepare for cpu idle */
136 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
137 			TEGRA_ARI_CORE_C6, wake_time[cpu], 0);
138 
139 	} else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
140 
141 		/* Program default wake mask */
142 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
143 		write_ctx_reg(gp_regs, CTX_GPREG_X5, TEGRA186_CORE_WAKE_MASK);
144 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
145 		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 0, 0, 0);
146 
147 		/* Prepare for cpu powerdn */
148 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
149 			TEGRA_ARI_CORE_C7, wake_time[cpu], 0);
150 
151 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
152 
153 		/* save SE registers */
154 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
155 				SE_MUTEX_WATCHDOG_NS_LIMIT);
156 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
157 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
158 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
159 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
160 
161 		/* save 'Secure Boot' Processor Feature Config Register */
162 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
163 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
164 
165 		/* save SMMU context to TZDRAM */
166 		smmu_ctx_base = params_from_bl2->tzdram_base +
167 			((uintptr_t)&__tegra186_cpu_reset_handler_data -
168 			 (uintptr_t)tegra186_cpu_reset_handler) +
169 			TEGRA186_SMMU_CTX_OFFSET;
170 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
171 
172 		/* Prepare for system suspend */
173 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 1);
174 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
175 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
176 		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
177 			TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC7);
178 
179 		/* Loop until system suspend is allowed */
180 		do {
181 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
182 					TEGRA_ARI_CORE_C7,
183 					MCE_CORE_SLEEP_TIME_INFINITE,
184 					0);
185 		} while (val == 0);
186 
187 		/* Instruct the MCE to enter system suspend state */
188 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
189 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
190 
191 	} else {
192 		ERROR("%s: Unknown state id\n", __func__);
193 		return PSCI_E_NOT_SUPPORTED;
194 	}
195 
196 	return PSCI_E_SUCCESS;
197 }
198 
199 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
200 {
201 	const plat_local_state_t *pwr_domain_state =
202 		target_state->pwr_domain_state;
203 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
204 	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
205 		TEGRA186_STATE_ID_MASK;
206 	uint32_t val;
207 
208 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
209 		/*
210 		 * The TZRAM loses power when we enter system suspend. To
211 		 * allow graceful exit from system suspend, we need to copy
212 		 * BL3-1 over to TZDRAM.
213 		 */
214 		val = params_from_bl2->tzdram_base +
215 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
216 			 (uintptr_t)tegra186_cpu_reset_handler);
217 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
218 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
219 	}
220 
221 	return PSCI_E_SUCCESS;
222 }
223 
224 int tegra_soc_pwr_domain_on(u_register_t mpidr)
225 {
226 	int target_cpu = mpidr & MPIDR_CPU_MASK;
227 	int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
228 			MPIDR_AFFINITY_BITS;
229 
230 	if (target_cluster > MPIDR_AFFLVL1) {
231 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
232 		return PSCI_E_NOT_PRESENT;
233 	}
234 
235 	/* construct the target CPU # */
236 	target_cpu |= (target_cluster << 2);
237 
238 	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
239 
240 	return PSCI_E_SUCCESS;
241 }
242 
243 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
244 {
245 	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
246 	int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
247 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
248 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
249 
250 	/*
251 	 * Reset power state info for CPUs when onlining, we set
252 	 * deepest power when offlining a core but that may not be
253 	 * requested by non-secure sw which controls idle states. It
254 	 * will re-init this info from non-secure software when the
255 	 * core come online.
256 	 */
257 	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
258 
259 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
260 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
261 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
262 		mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
263 			TEGRA_ARI_CLUSTER_CC1, 0, 0);
264 	}
265 
266 	/*
267 	 * Check if we are exiting from deep sleep and restore SE
268 	 * context if we are.
269 	 */
270 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
271 
272 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
273 			se_regs[0]);
274 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
275 			se_regs[1]);
276 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
277 			se_regs[2]);
278 
279 		/* Init SMMU */
280 		tegra_smmu_init();
281 
282 		/*
283 		 * Reset power state info for the last core doing SC7 entry and exit,
284 		 * we set deepest power state as CC7 and SC7 for SC7 entry which
285 		 * may not be requested by non-secure SW which controls idle states.
286 		 */
287 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
288 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
289 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
290 		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
291 			TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC1);
292 	}
293 
294 	return PSCI_E_SUCCESS;
295 }
296 
297 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
298 {
299 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
300 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
301 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
302 
303 	assert(ctx);
304 	assert(gp_regs);
305 
306 	/* Turn off wake_mask */
307 	write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
308 	write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
309 	write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
310 	mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, TEGRA_ARI_CLUSTER_CC7,
311 		0, 0);
312 
313 	/* Disable Denver's DCO operations */
314 	if (impl == DENVER_IMPL)
315 		denver_disable_dco();
316 
317 	/* Turn off CPU */
318 	return mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
319 			MCE_CORE_SLEEP_TIME_INFINITE, 0);
320 }
321 
322 __dead2 void tegra_soc_prepare_system_off(void)
323 {
324 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
325 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
326 	uint32_t val;
327 
328 	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
329 
330 		/* power off the entire system */
331 		mce_enter_ccplex_state(tegra186_system_powerdn_state);
332 
333 	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
334 
335 		/* Prepare for quasi power down */
336 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 1);
337 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
338 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
339 		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
340 			TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC8);
341 
342 		/* loop until other CPUs power down */
343 		do {
344 			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
345 					TEGRA_ARI_CORE_C7,
346 					MCE_CORE_SLEEP_TIME_INFINITE,
347 					0);
348 		} while (val == 0);
349 
350 		/* Enter quasi power down state */
351 		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
352 			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
353 
354 		/* disable GICC */
355 		tegra_gic_cpuif_deactivate();
356 
357 		/* power down core */
358 		prepare_cpu_pwr_dwn();
359 
360 	} else {
361 		ERROR("%s: unsupported power down state (%d)\n", __func__,
362 			tegra186_system_powerdn_state);
363 	}
364 
365 	wfi();
366 
367 	/* wait for the system to power down */
368 	for (;;) {
369 		;
370 	}
371 }
372 
373 int tegra_soc_prepare_system_reset(void)
374 {
375 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
376 
377 	return PSCI_E_SUCCESS;
378 }
379