xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c (revision 5d0bdd5772544a97b20ed11c3eacf54131f51cc2)
1 /*
2  * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <common/bl_common.h>
11 #include <context.h>
12 #include <lib/el3_runtime/context_mgmt.h>
13 #include <common/debug.h>
14 #include <denver.h>
15 #include <mce.h>
16 #include <plat/common/platform.h>
17 #include <lib/psci/psci.h>
18 #include <smmu.h>
19 #include <string.h>
20 #include <tegra_private.h>
21 
22 extern void prepare_core_pwr_dwn(void);
23 
24 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM
25 extern void tegra186_cpu_reset_handler(void);
26 extern uint32_t __tegra186_cpu_reset_handler_data,
27 		__tegra186_cpu_reset_handler_end;
28 
29 /* TZDRAM offset for saving SMMU context */
30 #define TEGRA186_SMMU_CTX_OFFSET	16
31 #endif
32 
33 /* state id mask */
34 #define TEGRA186_STATE_ID_MASK		0xF
35 /* constants to get power state's wake time */
36 #define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0
37 #define TEGRA186_WAKE_TIME_SHIFT	4
38 /* default core wake mask for CPU_SUSPEND */
39 #define TEGRA186_CORE_WAKE_MASK		0x180c
40 /* context size to save during system suspend */
41 #define TEGRA186_SE_CONTEXT_SIZE	3
42 
43 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
44 static struct t18x_psci_percpu_data {
45 	unsigned int wake_time;
46 } __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
47 
48 int32_t tegra_soc_validate_power_state(unsigned int power_state,
49 					psci_power_state_t *req_state)
50 {
51 	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
52 	int cpu = plat_my_core_pos();
53 
54 	/* save the core wake time (in TSC ticks)*/
55 	percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
56 			<< TEGRA186_WAKE_TIME_SHIFT;
57 
58 	/*
59 	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
60 	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
61 	 * is called with caches disabled. It is possible to read a stale value
62 	 * from DRAM in that function, because the L2 cache is not flushed
63 	 * unless the cluster is entering CC6/CC7.
64 	 */
65 	clean_dcache_range((uint64_t)&percpu_data[cpu],
66 			sizeof(percpu_data[cpu]));
67 
68 	/* Sanity check the requested state id */
69 	switch (state_id) {
70 	case PSTATE_ID_CORE_IDLE:
71 	case PSTATE_ID_CORE_POWERDN:
72 
73 		/* Core powerdown request */
74 		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
75 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
76 
77 		break;
78 
79 	default:
80 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
81 		return PSCI_E_INVALID_PARAMS;
82 	}
83 
84 	return PSCI_E_SUCCESS;
85 }
86 
87 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
88 {
89 	const plat_local_state_t *pwr_domain_state;
90 	unsigned int stateid_afflvl0, stateid_afflvl2;
91 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM
92 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
93 	uint64_t smmu_ctx_base;
94 #endif
95 	uint32_t val;
96 
97 	/* get the state ID */
98 	pwr_domain_state = target_state->pwr_domain_state;
99 	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
100 		TEGRA186_STATE_ID_MASK;
101 	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
102 		TEGRA186_STATE_ID_MASK;
103 
104 	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
105 	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
106 
107 		/* Enter CPU idle/powerdown */
108 
109 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
110 
111 		/* save SE registers */
112 		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
113 				SE_MUTEX_WATCHDOG_NS_LIMIT);
114 		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
115 				RNG_MUTEX_WATCHDOG_NS_LIMIT);
116 		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
117 				PKA_MUTEX_WATCHDOG_NS_LIMIT);
118 
119 		/* save 'Secure Boot' Processor Feature Config Register */
120 		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
121 		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
122 
123 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM
124 		/* save SMMU context */
125 		smmu_ctx_base = params_from_bl2->tzdram_base +
126 			((uintptr_t)&__tegra186_cpu_reset_handler_data -
127 			 (uintptr_t)tegra186_cpu_reset_handler) +
128 			TEGRA186_SMMU_CTX_OFFSET;
129 		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
130 #else
131 		tegra_smmu_save_context(0);
132 #endif
133 
134 		/* Instruct the MCE to enter system suspend state */
135 	}
136 
137 	return PSCI_E_SUCCESS;
138 }
139 
140 /*******************************************************************************
141  * Platform handler to calculate the proper target power level at the
142  * specified affinity level
143  ******************************************************************************/
144 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
145 					     const plat_local_state_t *states,
146 					     unsigned int ncpu)
147 {
148 	plat_local_state_t target = *states;
149 	int cluster_powerdn = 1;
150 	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
151 
152 	/* get the current core's power state */
153 	target = *(states + core_pos);
154 
155 	/* CPU suspend */
156 	if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
157 
158 		/* Program default wake mask */
159 
160 		/* Check if CCx state is allowed. */
161 	}
162 
163 	/* CPU off */
164 	if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
165 
166 		/* find out the number of ON cpus in the cluster */
167 		do {
168 			target = *states++;
169 			if (target != PLAT_MAX_OFF_STATE)
170 				cluster_powerdn = 0;
171 		} while (--ncpu);
172 
173 		/* Enable cluster powerdn from last CPU in the cluster */
174 		if (cluster_powerdn) {
175 
176 			/* Enable CC7 state and turn off wake mask */
177 
178 		} else {
179 
180 			/* Turn off wake_mask */
181 		}
182 	}
183 
184 	/* System Suspend */
185 	if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN))
186 		return PSTATE_ID_SOC_POWERDN;
187 
188 	/* default state */
189 	return PSCI_LOCAL_STATE_RUN;
190 }
191 
192 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM
193 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
194 {
195 	const plat_local_state_t *pwr_domain_state =
196 		target_state->pwr_domain_state;
197 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
198 	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
199 		TEGRA186_STATE_ID_MASK;
200 	uint64_t val;
201 
202 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
203 		/*
204 		 * The TZRAM loses power when we enter system suspend. To
205 		 * allow graceful exit from system suspend, we need to copy
206 		 * BL3-1 over to TZDRAM.
207 		 */
208 		val = params_from_bl2->tzdram_base +
209 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
210 			 (uintptr_t)tegra186_cpu_reset_handler);
211 		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
212 		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
213 	}
214 
215 	return PSCI_E_SUCCESS;
216 }
217 #endif
218 
219 int tegra_soc_pwr_domain_on(u_register_t mpidr)
220 {
221 	int target_cpu = mpidr & MPIDR_CPU_MASK;
222 	int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
223 			MPIDR_AFFINITY_BITS;
224 
225 	if (target_cluster > MPIDR_AFFLVL1) {
226 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
227 		return PSCI_E_NOT_PRESENT;
228 	}
229 
230 	/* construct the target CPU # */
231 	target_cpu |= (target_cluster << 2);
232 
233 	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
234 
235 	return PSCI_E_SUCCESS;
236 }
237 
238 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
239 {
240 	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
241 
242 	/*
243 	 * Reset power state info for CPUs when onlining, we set
244 	 * deepest power when offlining a core but that may not be
245 	 * requested by non-secure sw which controls idle states. It
246 	 * will re-init this info from non-secure software when the
247 	 * core come online.
248 	 */
249 
250 	/*
251 	 * Check if we are exiting from deep sleep and restore SE
252 	 * context if we are.
253 	 */
254 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
255 
256 		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
257 			se_regs[0]);
258 		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
259 			se_regs[1]);
260 		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
261 			se_regs[2]);
262 
263 		/* Init SMMU */
264 
265 		/*
266 		 * Reset power state info for the last core doing SC7
267 		 * entry and exit, we set deepest power state as CC7
268 		 * and SC7 for SC7 entry which may not be requested by
269 		 * non-secure SW which controls idle states.
270 		 */
271 	}
272 
273 	return PSCI_E_SUCCESS;
274 }
275 
276 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
277 {
278 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
279 
280 	/* Disable Denver's DCO operations */
281 	if (impl == DENVER_IMPL)
282 		denver_disable_dco();
283 
284 	/* Turn off CPU */
285 
286 	return PSCI_E_SUCCESS;
287 }
288 
289 __dead2 void tegra_soc_prepare_system_off(void)
290 {
291 	/* System power off */
292 
293 	/* SC8 */
294 
295 	wfi();
296 
297 	/* wait for the system to power down */
298 	for (;;) {
299 		;
300 	}
301 }
302 
303 int tegra_soc_prepare_system_reset(void)
304 {
305 	return PSCI_E_SUCCESS;
306 }
307