1 /* 2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <common/bl_common.h> 11 #include <context.h> 12 #include <lib/el3_runtime/context_mgmt.h> 13 #include <common/debug.h> 14 #include <denver.h> 15 #include <mce.h> 16 #include <plat/common/platform.h> 17 #include <lib/psci/psci.h> 18 #include <smmu.h> 19 #include <string.h> 20 #include <tegra_private.h> 21 #include <t194_nvg.h> 22 23 extern void prepare_core_pwr_dwn(void); 24 25 extern uint8_t tegra_fake_system_suspend; 26 27 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 28 extern void tegra186_cpu_reset_handler(void); 29 extern uint32_t __tegra186_cpu_reset_handler_data, 30 __tegra186_cpu_reset_handler_end; 31 32 /* TZDRAM offset for saving SMMU context */ 33 #define TEGRA186_SMMU_CTX_OFFSET 16 34 #endif 35 36 /* state id mask */ 37 #define TEGRA186_STATE_ID_MASK 0xF 38 /* constants to get power state's wake time */ 39 #define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0 40 #define TEGRA186_WAKE_TIME_SHIFT 4 41 /* default core wake mask for CPU_SUSPEND */ 42 #define TEGRA194_CORE_WAKE_MASK 0x180c 43 /* context size to save during system suspend */ 44 #define TEGRA186_SE_CONTEXT_SIZE 3 45 46 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; 47 static struct t18x_psci_percpu_data { 48 unsigned int wake_time; 49 } __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT]; 50 51 int32_t tegra_soc_validate_power_state(unsigned int power_state, 52 psci_power_state_t *req_state) 53 { 54 int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; 55 int cpu = plat_my_core_pos(); 56 57 /* save the core wake time (in TSC ticks)*/ 58 percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) 59 << TEGRA186_WAKE_TIME_SHIFT; 60 61 /* 62 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that 63 * the correct value is read in tegra_soc_pwr_domain_suspend(), which 64 * is called with caches disabled. It is possible to read a stale value 65 * from DRAM in that function, because the L2 cache is not flushed 66 * unless the cluster is entering CC6/CC7. 67 */ 68 clean_dcache_range((uint64_t)&percpu_data[cpu], 69 sizeof(percpu_data[cpu])); 70 71 /* Sanity check the requested state id */ 72 switch (state_id) { 73 case PSTATE_ID_CORE_IDLE: 74 case PSTATE_ID_CORE_POWERDN: 75 76 /* Core powerdown request */ 77 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 78 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; 79 80 break; 81 82 default: 83 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 84 return PSCI_E_INVALID_PARAMS; 85 } 86 87 return PSCI_E_SUCCESS; 88 } 89 90 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 91 { 92 const plat_local_state_t *pwr_domain_state; 93 unsigned int stateid_afflvl0, stateid_afflvl2; 94 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 95 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 96 uint64_t smmu_ctx_base; 97 #endif 98 uint32_t val; 99 mce_cstate_info_t cstate_info = { 0 }; 100 int cpu = plat_my_core_pos(); 101 102 /* get the state ID */ 103 pwr_domain_state = target_state->pwr_domain_state; 104 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 105 TEGRA186_STATE_ID_MASK; 106 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 107 TEGRA186_STATE_ID_MASK; 108 109 if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) || 110 (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) { 111 112 /* Enter CPU idle/powerdown */ 113 val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? 114 TEGRA_NVG_CORE_C6 : TEGRA_NVG_CORE_C7; 115 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val, 116 percpu_data[cpu].wake_time, 0); 117 118 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 119 120 /* save SE registers */ 121 se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + 122 SE_MUTEX_WATCHDOG_NS_LIMIT); 123 se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE + 124 RNG_MUTEX_WATCHDOG_NS_LIMIT); 125 se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE + 126 PKA_MUTEX_WATCHDOG_NS_LIMIT); 127 128 /* save 'Secure Boot' Processor Feature Config Register */ 129 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 130 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val); 131 132 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 133 /* save SMMU context */ 134 smmu_ctx_base = params_from_bl2->tzdram_base + 135 ((uintptr_t)&__tegra186_cpu_reset_handler_data - 136 (uintptr_t)tegra186_cpu_reset_handler) + 137 TEGRA186_SMMU_CTX_OFFSET; 138 tegra_smmu_save_context((uintptr_t)smmu_ctx_base); 139 #else 140 tegra_smmu_save_context(0); 141 #endif 142 143 if (tegra_fake_system_suspend == 0U) { 144 145 /* Prepare for system suspend */ 146 cstate_info.cluster = TEGRA_NVG_CLUSTER_CC6; 147 cstate_info.system = TEGRA_NVG_SYSTEM_SC7; 148 cstate_info.system_state_force = 1; 149 cstate_info.update_wake_mask = 1; 150 151 mce_update_cstate_info(&cstate_info); 152 153 do { 154 val = mce_command_handler( 155 MCE_CMD_IS_SC7_ALLOWED, 156 TEGRA_NVG_CORE_C7, 157 MCE_CORE_SLEEP_TIME_INFINITE, 158 0); 159 } while (val == 0); 160 161 /* Instruct the MCE to enter system suspend state */ 162 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 163 TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); 164 } 165 } 166 167 return PSCI_E_SUCCESS; 168 } 169 170 /******************************************************************************* 171 * Platform handler to calculate the proper target power level at the 172 * specified affinity level 173 ******************************************************************************/ 174 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, 175 const plat_local_state_t *states, 176 unsigned int ncpu) 177 { 178 plat_local_state_t target = *states; 179 int cluster_powerdn = 1; 180 int core_pos = read_mpidr() & MPIDR_CPU_MASK; 181 mce_cstate_info_t cstate_info = { 0 }; 182 183 /* get the current core's power state */ 184 target = *(states + core_pos); 185 186 /* CPU suspend */ 187 if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) { 188 189 /* Program default wake mask */ 190 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; 191 cstate_info.update_wake_mask = 1; 192 mce_update_cstate_info(&cstate_info); 193 } 194 195 /* CPU off */ 196 if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) { 197 198 /* find out the number of ON cpus in the cluster */ 199 do { 200 target = *states++; 201 if (target != PLAT_MAX_OFF_STATE) 202 cluster_powerdn = 0; 203 } while (--ncpu); 204 205 /* Enable cluster powerdn from last CPU in the cluster */ 206 if (cluster_powerdn) { 207 208 /* Enable CC7 state and turn off wake mask */ 209 210 } else { 211 212 /* Turn off wake_mask */ 213 } 214 } 215 216 /* System Suspend */ 217 if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) 218 return PSTATE_ID_SOC_POWERDN; 219 220 /* default state */ 221 return PSCI_LOCAL_STATE_RUN; 222 } 223 224 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 225 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 226 { 227 const plat_local_state_t *pwr_domain_state = 228 target_state->pwr_domain_state; 229 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 230 unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 231 TEGRA186_STATE_ID_MASK; 232 uint64_t val; 233 234 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 235 /* 236 * The TZRAM loses power when we enter system suspend. To 237 * allow graceful exit from system suspend, we need to copy 238 * BL3-1 over to TZDRAM. 239 */ 240 val = params_from_bl2->tzdram_base + 241 ((uintptr_t)&__tegra186_cpu_reset_handler_end - 242 (uintptr_t)tegra186_cpu_reset_handler); 243 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 244 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); 245 } 246 247 return PSCI_E_SUCCESS; 248 } 249 #endif 250 251 int tegra_soc_pwr_domain_on(u_register_t mpidr) 252 { 253 int target_cpu = mpidr & MPIDR_CPU_MASK; 254 int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 255 MPIDR_AFFINITY_BITS; 256 257 if (target_cluster > MPIDR_AFFLVL1) { 258 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); 259 return PSCI_E_NOT_PRESENT; 260 } 261 262 /* construct the target CPU # */ 263 target_cpu |= (target_cluster << 2); 264 265 mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0); 266 267 return PSCI_E_SUCCESS; 268 } 269 270 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 271 { 272 int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 273 274 /* 275 * Reset power state info for CPUs when onlining, we set 276 * deepest power when offlining a core but that may not be 277 * requested by non-secure sw which controls idle states. It 278 * will re-init this info from non-secure software when the 279 * core come online. 280 */ 281 282 /* 283 * Check if we are exiting from deep sleep and restore SE 284 * context if we are. 285 */ 286 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 287 288 mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT, 289 se_regs[0]); 290 mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT, 291 se_regs[1]); 292 mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT, 293 se_regs[2]); 294 295 /* Init SMMU */ 296 297 /* 298 * Reset power state info for the last core doing SC7 299 * entry and exit, we set deepest power state as CC7 300 * and SC7 for SC7 entry which may not be requested by 301 * non-secure SW which controls idle states. 302 */ 303 } 304 305 return PSCI_E_SUCCESS; 306 } 307 308 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 309 { 310 int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 311 312 /* Disable Denver's DCO operations */ 313 if (impl == DENVER_IMPL) 314 denver_disable_dco(); 315 316 /* Turn off CPU */ 317 318 return PSCI_E_SUCCESS; 319 } 320 321 __dead2 void tegra_soc_prepare_system_off(void) 322 { 323 /* System power off */ 324 325 /* SC8 */ 326 327 wfi(); 328 329 /* wait for the system to power down */ 330 for (;;) { 331 ; 332 } 333 } 334 335 int tegra_soc_prepare_system_reset(void) 336 { 337 return PSCI_E_SUCCESS; 338 } 339