1 /* 2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <arch_helpers.h> 13 #include <common/bl_common.h> 14 #include <common/debug.h> 15 #include <context.h> 16 #include <denver.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/psci/psci.h> 19 #include <mce.h> 20 #include <plat/common/platform.h> 21 #include <se.h> 22 #include <smmu.h> 23 #include <t194_nvg.h> 24 #include <tegra_platform.h> 25 #include <tegra_private.h> 26 27 extern void tegra_secure_entrypoint(void); 28 29 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 30 extern void tegra186_cpu_reset_handler(void); 31 extern uint32_t __tegra186_cpu_reset_handler_data, 32 __tegra186_cpu_reset_handler_end; 33 34 /* TZDRAM offset for saving SMMU context */ 35 #define TEGRA186_SMMU_CTX_OFFSET 16U 36 #endif 37 38 /* state id mask */ 39 #define TEGRA186_STATE_ID_MASK 0xFU 40 /* constants to get power state's wake time */ 41 #define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U 42 #define TEGRA186_WAKE_TIME_SHIFT 4U 43 /* default core wake mask for CPU_SUSPEND */ 44 #define TEGRA194_CORE_WAKE_MASK 0x180cU 45 46 static struct t19x_psci_percpu_data { 47 uint32_t wake_time; 48 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT]; 49 50 /* 51 * tegra_fake_system_suspend acts as a boolean var controlling whether 52 * we are going to take fake system suspend code or normal system suspend code 53 * path. This variable is set inside the sip call handlers, when the kernel 54 * requests an SIP call to set the suspend debug flags. 55 */ 56 bool tegra_fake_system_suspend; 57 58 int32_t tegra_soc_validate_power_state(uint32_t power_state, 59 psci_power_state_t *req_state) 60 { 61 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & 62 TEGRA186_STATE_ID_MASK; 63 uint32_t cpu = plat_my_core_pos(); 64 int32_t ret = PSCI_E_SUCCESS; 65 66 /* save the core wake time (in TSC ticks)*/ 67 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) 68 << TEGRA186_WAKE_TIME_SHIFT; 69 70 /* 71 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that 72 * the correct value is read in tegra_soc_pwr_domain_suspend(), which 73 * is called with caches disabled. It is possible to read a stale value 74 * from DRAM in that function, because the L2 cache is not flushed 75 * unless the cluster is entering CC6/CC7. 76 */ 77 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu], 78 sizeof(t19x_percpu_data[cpu])); 79 80 /* Sanity check the requested state id */ 81 switch (state_id) { 82 case PSTATE_ID_CORE_IDLE: 83 case PSTATE_ID_CORE_POWERDN: 84 85 /* Core powerdown request */ 86 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 87 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; 88 89 break; 90 91 default: 92 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 93 ret = PSCI_E_INVALID_PARAMS; 94 break; 95 } 96 97 return ret; 98 } 99 100 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 101 { 102 const plat_local_state_t *pwr_domain_state; 103 uint8_t stateid_afflvl0, stateid_afflvl2; 104 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 105 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 106 uint64_t smmu_ctx_base; 107 #endif 108 uint32_t val; 109 mce_cstate_info_t sc7_cstate_info = { 110 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6, 111 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7, 112 .system_state_force = 1U, 113 .update_wake_mask = 1U, 114 }; 115 uint32_t cpu = plat_my_core_pos(); 116 int32_t ret = 0; 117 118 /* get the state ID */ 119 pwr_domain_state = target_state->pwr_domain_state; 120 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 121 TEGRA186_STATE_ID_MASK; 122 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 123 TEGRA186_STATE_ID_MASK; 124 125 if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) || 126 (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) { 127 128 /* Enter CPU idle/powerdown */ 129 val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? 130 (uint32_t)TEGRA_NVG_CORE_C6 : (uint32_t)TEGRA_NVG_CORE_C7; 131 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val, 132 percpu_data[cpu].wake_time, 0); 133 assert(ret == 0); 134 135 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 136 137 /* save 'Secure Boot' Processor Feature Config Register */ 138 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 139 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val); 140 141 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 142 /* save SMMU context */ 143 smmu_ctx_base = params_from_bl2->tzdram_base + 144 ((uintptr_t)&__tegra186_cpu_reset_handler_data - 145 (uintptr_t)&tegra186_cpu_reset_handler) + 146 TEGRA186_SMMU_CTX_OFFSET; 147 tegra_smmu_save_context((uintptr_t)smmu_ctx_base); 148 #else 149 tegra_smmu_save_context(0); 150 #endif 151 152 /* 153 * Suspend SE, RNG1 and PKA1 only on silcon and fpga, 154 * since VDK does not support atomic se ctx save 155 */ 156 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { 157 ret = tegra_se_suspend(); 158 assert(ret == 0); 159 } 160 161 if (!tegra_fake_system_suspend) { 162 163 /* Prepare for system suspend */ 164 mce_update_cstate_info(&sc7_cstate_info); 165 166 do { 167 val = (uint32_t)mce_command_handler( 168 (uint32_t)MCE_CMD_IS_SC7_ALLOWED, 169 (uint32_t)TEGRA_NVG_CORE_C7, 170 MCE_CORE_SLEEP_TIME_INFINITE, 171 0U); 172 } while (val == 0U); 173 174 /* Instruct the MCE to enter system suspend state */ 175 ret = mce_command_handler( 176 (uint64_t)MCE_CMD_ENTER_CSTATE, 177 (uint64_t)TEGRA_NVG_CORE_C7, 178 MCE_CORE_SLEEP_TIME_INFINITE, 179 0U); 180 assert(ret == 0); 181 } 182 } else { 183 ; /* do nothing */ 184 } 185 186 return PSCI_E_SUCCESS; 187 } 188 189 /******************************************************************************* 190 * Platform handler to calculate the proper target power level at the 191 * specified affinity level 192 ******************************************************************************/ 193 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, 194 const plat_local_state_t *states, 195 uint32_t ncpu) 196 { 197 plat_local_state_t target = *states; 198 int32_t cluster_powerdn = 1; 199 uint32_t core_pos = (uint32_t)read_mpidr() & MPIDR_CPU_MASK; 200 uint32_t num_cpus = ncpu, pos = 0; 201 mce_cstate_info_t cstate_info = { 0 }; 202 203 /* get the current core's power state */ 204 target = states[core_pos]; 205 206 /* CPU suspend */ 207 if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) { 208 209 /* Program default wake mask */ 210 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; 211 cstate_info.update_wake_mask = 1; 212 mce_update_cstate_info(&cstate_info); 213 } 214 215 /* CPU off */ 216 if ((lvl == MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) { 217 218 /* find out the number of ON cpus in the cluster */ 219 do { 220 target = states[pos]; 221 if (target != PLAT_MAX_OFF_STATE) { 222 cluster_powerdn = 0; 223 } 224 --num_cpus; 225 pos++; 226 } while (num_cpus != 0U); 227 228 /* Enable cluster powerdn from last CPU in the cluster */ 229 if (cluster_powerdn != 0) { 230 231 /* Enable CC6 */ 232 /* todo */ 233 234 /* If cluster group needs to be railgated, request CG7 */ 235 /* todo */ 236 237 /* Turn off wake mask */ 238 cstate_info.update_wake_mask = 1U; 239 mce_update_cstate_info(&cstate_info); 240 241 } else { 242 /* Turn off wake_mask */ 243 cstate_info.update_wake_mask = 1U; 244 mce_update_cstate_info(&cstate_info); 245 } 246 } 247 248 /* System Suspend */ 249 if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) { 250 return PSTATE_ID_SOC_POWERDN; 251 } 252 253 /* default state */ 254 return PSCI_LOCAL_STATE_RUN; 255 } 256 257 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 258 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 259 { 260 const plat_local_state_t *pwr_domain_state = 261 target_state->pwr_domain_state; 262 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 263 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 264 TEGRA186_STATE_ID_MASK; 265 uint64_t val; 266 u_register_t ns_sctlr_el1; 267 268 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 269 /* 270 * The TZRAM loses power when we enter system suspend. To 271 * allow graceful exit from system suspend, we need to copy 272 * BL3-1 over to TZDRAM. 273 */ 274 val = params_from_bl2->tzdram_base + 275 ((uintptr_t)&__tegra186_cpu_reset_handler_end - 276 (uintptr_t)tegra186_cpu_reset_handler); 277 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 278 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); 279 280 /* 281 * In fake suspend mode, ensure that the loopback procedure 282 * towards system suspend exit is started, instead of calling 283 * WFI. This is done by disabling both MMU's of EL1 & El3 284 * and calling tegra_secure_entrypoint(). 285 */ 286 if (tegra_fake_system_suspend) { 287 288 /* 289 * Disable EL1's MMU. 290 */ 291 ns_sctlr_el1 = read_sctlr_el1(); 292 ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT)); 293 write_sctlr_el1(ns_sctlr_el1); 294 295 /* 296 * Disable MMU to power up the CPU in a "clean" 297 * state 298 */ 299 disable_mmu_el3(); 300 tegra_secure_entrypoint(); 301 panic(); 302 } 303 } 304 305 return PSCI_E_SUCCESS; 306 } 307 #endif 308 309 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) 310 { 311 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK; 312 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 313 MPIDR_AFFINITY_BITS; 314 int32_t ret = 0; 315 316 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) { 317 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); 318 return PSCI_E_NOT_PRESENT; 319 } 320 321 /* construct the target CPU # */ 322 target_cpu += (target_cluster << 1U); 323 324 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); 325 if (ret < 0) { 326 return PSCI_E_DENIED; 327 } 328 329 return PSCI_E_SUCCESS; 330 } 331 332 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 333 { 334 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 335 336 /* 337 * Reset power state info for CPUs when onlining, we set 338 * deepest power when offlining a core but that may not be 339 * requested by non-secure sw which controls idle states. It 340 * will re-init this info from non-secure software when the 341 * core come online. 342 */ 343 344 /* 345 * Check if we are exiting from deep sleep and restore SE 346 * context if we are. 347 */ 348 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 349 /* Init SMMU */ 350 tegra_smmu_init(); 351 352 /* Resume SE, RNG1 and PKA1 */ 353 tegra_se_resume(); 354 355 /* 356 * Reset power state info for the last core doing SC7 357 * entry and exit, we set deepest power state as CC7 358 * and SC7 for SC7 entry which may not be requested by 359 * non-secure SW which controls idle states. 360 */ 361 } 362 363 return PSCI_E_SUCCESS; 364 } 365 366 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 367 { 368 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 369 int32_t ret = 0; 370 371 (void)target_state; 372 373 /* Disable Denver's DCO operations */ 374 if (impl == DENVER_IMPL) { 375 denver_disable_dco(); 376 } 377 378 /* Turn off CPU */ 379 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 380 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); 381 assert(ret == 0); 382 383 return PSCI_E_SUCCESS; 384 } 385 386 __dead2 void tegra_soc_prepare_system_off(void) 387 { 388 /* System power off */ 389 390 /* SC8 */ 391 392 wfi(); 393 394 /* wait for the system to power down */ 395 for (;;) { 396 ; 397 } 398 } 399 400 int32_t tegra_soc_prepare_system_reset(void) 401 { 402 return PSCI_E_SUCCESS; 403 } 404