1 /* 2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <context.h> 15 #include <cortex_a57.h> 16 #include <denver.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/psci/psci.h> 19 #include <plat/common/platform.h> 20 21 #include <mce.h> 22 #include <smmu.h> 23 #include <stdbool.h> 24 #include <t18x_ari.h> 25 #include <tegra_private.h> 26 27 extern void memcpy16(void *dest, const void *src, unsigned int length); 28 29 extern void prepare_cpu_pwr_dwn(void); 30 extern void tegra186_cpu_reset_handler(void); 31 extern uint64_t __tegra186_cpu_reset_handler_end, 32 __tegra186_smmu_context; 33 34 /* state id mask */ 35 #define TEGRA186_STATE_ID_MASK 0xFU 36 /* constants to get power state's wake time */ 37 #define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U 38 #define TEGRA186_WAKE_TIME_SHIFT 4U 39 /* default core wake mask for CPU_SUSPEND */ 40 #define TEGRA186_CORE_WAKE_MASK 0x180cU 41 /* context size to save during system suspend */ 42 #define TEGRA186_SE_CONTEXT_SIZE 3U 43 44 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; 45 static struct tegra_psci_percpu_data { 46 uint32_t wake_time; 47 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT]; 48 49 int32_t tegra_soc_validate_power_state(uint32_t power_state, 50 psci_power_state_t *req_state) 51 { 52 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; 53 uint32_t cpu = plat_my_core_pos(); 54 int32_t ret = PSCI_E_SUCCESS; 55 56 /* save the core wake time (in TSC ticks)*/ 57 tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) 58 << TEGRA186_WAKE_TIME_SHIFT; 59 60 /* 61 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that 62 * the correct value is read in tegra_soc_pwr_domain_suspend(), which 63 * is called with caches disabled. It is possible to read a stale value 64 * from DRAM in that function, because the L2 cache is not flushed 65 * unless the cluster is entering CC6/CC7. 66 */ 67 clean_dcache_range((uint64_t)&tegra_percpu_data[cpu], 68 sizeof(tegra_percpu_data[cpu])); 69 70 /* Sanity check the requested state id */ 71 switch (state_id) { 72 case PSTATE_ID_CORE_IDLE: 73 case PSTATE_ID_CORE_POWERDN: 74 75 /* Core powerdown request */ 76 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 77 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; 78 79 break; 80 81 default: 82 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 83 ret = PSCI_E_INVALID_PARAMS; 84 break; 85 } 86 87 return ret; 88 } 89 90 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 91 { 92 const plat_local_state_t *pwr_domain_state; 93 uint8_t stateid_afflvl0, stateid_afflvl2; 94 uint32_t cpu = plat_my_core_pos(); 95 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 96 mce_cstate_info_t cstate_info = { 0 }; 97 uint64_t smmu_ctx_base; 98 uint32_t val; 99 100 /* get the state ID */ 101 pwr_domain_state = target_state->pwr_domain_state; 102 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 103 TEGRA186_STATE_ID_MASK; 104 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 105 TEGRA186_STATE_ID_MASK; 106 107 if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) || 108 (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) { 109 110 /* Enter CPU idle/powerdown */ 111 val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? 112 (uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7; 113 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val, 114 tegra_percpu_data[cpu].wake_time, 0U); 115 116 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 117 118 /* save SE registers */ 119 se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + 120 SE_MUTEX_WATCHDOG_NS_LIMIT); 121 se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE + 122 RNG_MUTEX_WATCHDOG_NS_LIMIT); 123 se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE + 124 PKA_MUTEX_WATCHDOG_NS_LIMIT); 125 126 /* save 'Secure Boot' Processor Feature Config Register */ 127 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 128 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val); 129 130 /* save SMMU context to TZDRAM */ 131 smmu_ctx_base = params_from_bl2->tzdram_base + 132 ((uintptr_t)&__tegra186_smmu_context - 133 (uintptr_t)&tegra186_cpu_reset_handler); 134 tegra_smmu_save_context((uintptr_t)smmu_ctx_base); 135 136 /* Prepare for system suspend */ 137 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7; 138 cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7; 139 cstate_info.system_state_force = 1; 140 cstate_info.update_wake_mask = 1; 141 mce_update_cstate_info(&cstate_info); 142 /* Loop until system suspend is allowed */ 143 do { 144 val = (uint32_t)mce_command_handler( 145 (uint64_t)MCE_CMD_IS_SC7_ALLOWED, 146 (uint64_t)TEGRA_ARI_CORE_C7, 147 MCE_CORE_SLEEP_TIME_INFINITE, 148 0U); 149 } while (val == 0U); 150 151 /* Instruct the MCE to enter system suspend state */ 152 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 153 (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); 154 } else { 155 ; /* do nothing */ 156 } 157 158 return PSCI_E_SUCCESS; 159 } 160 161 /******************************************************************************* 162 * Helper function to check if this is the last ON CPU in the cluster 163 ******************************************************************************/ 164 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states, 165 uint32_t ncpu) 166 { 167 plat_local_state_t target; 168 bool last_on_cpu = true; 169 uint32_t num_cpus = ncpu, pos = 0; 170 171 do { 172 target = states[pos]; 173 if (target != PLAT_MAX_OFF_STATE) { 174 last_on_cpu = false; 175 } 176 --num_cpus; 177 pos++; 178 } while (num_cpus != 0U); 179 180 return last_on_cpu; 181 } 182 183 /******************************************************************************* 184 * Helper function to get target power state for the cluster 185 ******************************************************************************/ 186 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, 187 uint32_t ncpu) 188 { 189 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK; 190 uint32_t cpu = plat_my_core_pos(); 191 int32_t ret; 192 plat_local_state_t target = states[core_pos]; 193 mce_cstate_info_t cstate_info = { 0 }; 194 195 /* CPU suspend */ 196 if (target == PSTATE_ID_CORE_POWERDN) { 197 /* Program default wake mask */ 198 cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK; 199 cstate_info.update_wake_mask = 1; 200 mce_update_cstate_info(&cstate_info); 201 202 /* Check if CCx state is allowed. */ 203 ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, 204 (uint64_t)TEGRA_ARI_CORE_C7, 205 tegra_percpu_data[cpu].wake_time, 206 0U); 207 if (ret == 0) { 208 target = PSCI_LOCAL_STATE_RUN; 209 } 210 } 211 212 /* CPU off */ 213 if (target == PLAT_MAX_OFF_STATE) { 214 /* Enable cluster powerdn from last CPU in the cluster */ 215 if (tegra_last_cpu_in_cluster(states, ncpu)) { 216 /* Enable CC7 state and turn off wake mask */ 217 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7; 218 cstate_info.update_wake_mask = 1; 219 mce_update_cstate_info(&cstate_info); 220 221 /* Check if CCx state is allowed. */ 222 ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, 223 (uint64_t)TEGRA_ARI_CORE_C7, 224 MCE_CORE_SLEEP_TIME_INFINITE, 225 0U); 226 if (ret == 0) { 227 target = PSCI_LOCAL_STATE_RUN; 228 } 229 230 } else { 231 232 /* Turn off wake_mask */ 233 cstate_info.update_wake_mask = 1; 234 mce_update_cstate_info(&cstate_info); 235 target = PSCI_LOCAL_STATE_RUN; 236 } 237 } 238 239 return target; 240 } 241 242 /******************************************************************************* 243 * Platform handler to calculate the proper target power level at the 244 * specified affinity level 245 ******************************************************************************/ 246 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, 247 const plat_local_state_t *states, 248 uint32_t ncpu) 249 { 250 plat_local_state_t target = PSCI_LOCAL_STATE_RUN; 251 uint32_t cpu = plat_my_core_pos(); 252 253 /* System Suspend */ 254 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && 255 (states[cpu] == PSTATE_ID_SOC_POWERDN)) { 256 target = PSTATE_ID_SOC_POWERDN; 257 } 258 259 /* CPU off, CPU suspend */ 260 if (lvl == (uint32_t)MPIDR_AFFLVL1) { 261 target = tegra_get_afflvl1_pwr_state(states, ncpu); 262 } 263 264 /* target cluster/system state */ 265 return target; 266 } 267 268 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 269 { 270 const plat_local_state_t *pwr_domain_state = 271 target_state->pwr_domain_state; 272 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 273 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 274 TEGRA186_STATE_ID_MASK; 275 uint64_t val; 276 277 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 278 /* 279 * The TZRAM loses power when we enter system suspend. To 280 * allow graceful exit from system suspend, we need to copy 281 * BL3-1 over to TZDRAM. 282 */ 283 val = params_from_bl2->tzdram_base + 284 ((uintptr_t)&__tegra186_cpu_reset_handler_end - 285 (uintptr_t)&tegra186_cpu_reset_handler); 286 memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 287 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); 288 } 289 290 return PSCI_E_SUCCESS; 291 } 292 293 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) 294 { 295 int32_t ret = PSCI_E_SUCCESS; 296 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK; 297 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 298 MPIDR_AFFINITY_BITS; 299 300 if (target_cluster > MPIDR_AFFLVL1) { 301 302 ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr); 303 ret = PSCI_E_NOT_PRESENT; 304 305 } else { 306 /* construct the target CPU # */ 307 target_cpu |= (target_cluster << 2); 308 309 (void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); 310 } 311 312 return ret; 313 } 314 315 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 316 { 317 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 318 uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0]; 319 mce_cstate_info_t cstate_info = { 0 }; 320 uint64_t impl, val; 321 const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); 322 323 impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 324 325 /* 326 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186 327 * A02p and beyond). 328 */ 329 if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) { 330 331 val = read_l2ctlr_el1(); 332 val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; 333 write_l2ctlr_el1(val); 334 } 335 336 /* 337 * Reset power state info for CPUs when onlining, we set 338 * deepest power when offlining a core but that may not be 339 * requested by non-secure sw which controls idle states. It 340 * will re-init this info from non-secure software when the 341 * core come online. 342 */ 343 if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) { 344 345 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1; 346 cstate_info.update_wake_mask = 1; 347 mce_update_cstate_info(&cstate_info); 348 } 349 350 /* 351 * Check if we are exiting from deep sleep and restore SE 352 * context if we are. 353 */ 354 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 355 356 mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT, 357 se_regs[0]); 358 mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT, 359 se_regs[1]); 360 mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT, 361 se_regs[2]); 362 363 /* Init SMMU */ 364 tegra_smmu_init(); 365 366 /* 367 * Reset power state info for the last core doing SC7 368 * entry and exit, we set deepest power state as CC7 369 * and SC7 for SC7 entry which may not be requested by 370 * non-secure SW which controls idle states. 371 */ 372 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7; 373 cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1; 374 cstate_info.update_wake_mask = 1; 375 mce_update_cstate_info(&cstate_info); 376 } 377 378 return PSCI_E_SUCCESS; 379 } 380 381 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 382 { 383 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; 384 385 (void)target_state; 386 387 /* Disable Denver's DCO operations */ 388 if (impl == DENVER_IMPL) { 389 denver_disable_dco(); 390 } 391 392 /* Turn off CPU */ 393 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 394 (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); 395 396 return PSCI_E_SUCCESS; 397 } 398 399 __dead2 void tegra_soc_prepare_system_off(void) 400 { 401 /* power off the entire system */ 402 mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF); 403 404 wfi(); 405 406 /* wait for the system to power down */ 407 for (;;) { 408 ; 409 } 410 } 411 412 int32_t tegra_soc_prepare_system_reset(void) 413 { 414 mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT); 415 416 return PSCI_E_SUCCESS; 417 } 418