1 /* 2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <common/bl_common.h> 11 #include <context.h> 12 #include <lib/el3_runtime/context_mgmt.h> 13 #include <common/debug.h> 14 #include <denver.h> 15 #include <mce.h> 16 #include <plat/common/platform.h> 17 #include <lib/psci/psci.h> 18 #include <smmu.h> 19 #include <string.h> 20 #include <tegra_private.h> 21 #include <t194_nvg.h> 22 23 extern void prepare_core_pwr_dwn(void); 24 25 extern uint8_t tegra_fake_system_suspend; 26 27 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 28 extern void tegra186_cpu_reset_handler(void); 29 extern uint32_t __tegra186_cpu_reset_handler_data, 30 __tegra186_cpu_reset_handler_end; 31 32 /* TZDRAM offset for saving SMMU context */ 33 #define TEGRA186_SMMU_CTX_OFFSET 16 34 #endif 35 36 /* state id mask */ 37 #define TEGRA186_STATE_ID_MASK 0xF 38 /* constants to get power state's wake time */ 39 #define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0 40 #define TEGRA186_WAKE_TIME_SHIFT 4 41 /* default core wake mask for CPU_SUSPEND */ 42 #define TEGRA186_CORE_WAKE_MASK 0x180c 43 /* context size to save during system suspend */ 44 #define TEGRA186_SE_CONTEXT_SIZE 3 45 46 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; 47 static struct t18x_psci_percpu_data { 48 unsigned int wake_time; 49 } __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT]; 50 51 int32_t tegra_soc_validate_power_state(unsigned int power_state, 52 psci_power_state_t *req_state) 53 { 54 int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; 55 int cpu = plat_my_core_pos(); 56 57 /* save the core wake time (in TSC ticks)*/ 58 percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) 59 << TEGRA186_WAKE_TIME_SHIFT; 60 61 /* 62 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that 63 * the correct value is read in tegra_soc_pwr_domain_suspend(), which 64 * is called with caches disabled. It is possible to read a stale value 65 * from DRAM in that function, because the L2 cache is not flushed 66 * unless the cluster is entering CC6/CC7. 67 */ 68 clean_dcache_range((uint64_t)&percpu_data[cpu], 69 sizeof(percpu_data[cpu])); 70 71 /* Sanity check the requested state id */ 72 switch (state_id) { 73 case PSTATE_ID_CORE_IDLE: 74 case PSTATE_ID_CORE_POWERDN: 75 76 /* Core powerdown request */ 77 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 78 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; 79 80 break; 81 82 default: 83 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 84 return PSCI_E_INVALID_PARAMS; 85 } 86 87 return PSCI_E_SUCCESS; 88 } 89 90 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 91 { 92 const plat_local_state_t *pwr_domain_state; 93 unsigned int stateid_afflvl0, stateid_afflvl2; 94 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 95 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 96 uint64_t smmu_ctx_base; 97 #endif 98 uint32_t val; 99 mce_cstate_info_t cstate_info = { 0 }; 100 101 /* get the state ID */ 102 pwr_domain_state = target_state->pwr_domain_state; 103 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 104 TEGRA186_STATE_ID_MASK; 105 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 106 TEGRA186_STATE_ID_MASK; 107 108 if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) || 109 (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) { 110 111 /* Enter CPU idle/powerdown */ 112 113 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 114 115 /* save SE registers */ 116 se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + 117 SE_MUTEX_WATCHDOG_NS_LIMIT); 118 se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE + 119 RNG_MUTEX_WATCHDOG_NS_LIMIT); 120 se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE + 121 PKA_MUTEX_WATCHDOG_NS_LIMIT); 122 123 /* save 'Secure Boot' Processor Feature Config Register */ 124 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 125 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val); 126 127 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 128 /* save SMMU context */ 129 smmu_ctx_base = params_from_bl2->tzdram_base + 130 ((uintptr_t)&__tegra186_cpu_reset_handler_data - 131 (uintptr_t)tegra186_cpu_reset_handler) + 132 TEGRA186_SMMU_CTX_OFFSET; 133 tegra_smmu_save_context((uintptr_t)smmu_ctx_base); 134 #else 135 tegra_smmu_save_context(0); 136 #endif 137 138 if (tegra_fake_system_suspend == 0U) { 139 140 /* Prepare for system suspend */ 141 cstate_info.cluster = TEGRA_NVG_CLUSTER_CC6; 142 cstate_info.system = TEGRA_NVG_SYSTEM_SC7; 143 cstate_info.system_state_force = 1; 144 cstate_info.update_wake_mask = 1; 145 146 mce_update_cstate_info(&cstate_info); 147 148 do { 149 val = mce_command_handler( 150 MCE_CMD_IS_SC7_ALLOWED, 151 TEGRA_NVG_CORE_C7, 152 MCE_CORE_SLEEP_TIME_INFINITE, 153 0); 154 } while (val == 0); 155 156 /* Instruct the MCE to enter system suspend state */ 157 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 158 TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); 159 } 160 } 161 162 return PSCI_E_SUCCESS; 163 } 164 165 /******************************************************************************* 166 * Platform handler to calculate the proper target power level at the 167 * specified affinity level 168 ******************************************************************************/ 169 plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, 170 const plat_local_state_t *states, 171 unsigned int ncpu) 172 { 173 plat_local_state_t target = *states; 174 int cluster_powerdn = 1; 175 int core_pos = read_mpidr() & MPIDR_CPU_MASK; 176 177 /* get the current core's power state */ 178 target = *(states + core_pos); 179 180 /* CPU suspend */ 181 if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) { 182 183 /* Program default wake mask */ 184 185 /* Check if CCx state is allowed. */ 186 } 187 188 /* CPU off */ 189 if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) { 190 191 /* find out the number of ON cpus in the cluster */ 192 do { 193 target = *states++; 194 if (target != PLAT_MAX_OFF_STATE) 195 cluster_powerdn = 0; 196 } while (--ncpu); 197 198 /* Enable cluster powerdn from last CPU in the cluster */ 199 if (cluster_powerdn) { 200 201 /* Enable CC7 state and turn off wake mask */ 202 203 } else { 204 205 /* Turn off wake_mask */ 206 } 207 } 208 209 /* System Suspend */ 210 if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) 211 return PSTATE_ID_SOC_POWERDN; 212 213 /* default state */ 214 return PSCI_LOCAL_STATE_RUN; 215 } 216 217 #if ENABLE_SYSTEM_SUSPEND_CTX_SAVE_TZDRAM 218 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 219 { 220 const plat_local_state_t *pwr_domain_state = 221 target_state->pwr_domain_state; 222 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 223 unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 224 TEGRA186_STATE_ID_MASK; 225 uint64_t val; 226 227 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 228 /* 229 * The TZRAM loses power when we enter system suspend. To 230 * allow graceful exit from system suspend, we need to copy 231 * BL3-1 over to TZDRAM. 232 */ 233 val = params_from_bl2->tzdram_base + 234 ((uintptr_t)&__tegra186_cpu_reset_handler_end - 235 (uintptr_t)tegra186_cpu_reset_handler); 236 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 237 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); 238 } 239 240 return PSCI_E_SUCCESS; 241 } 242 #endif 243 244 int tegra_soc_pwr_domain_on(u_register_t mpidr) 245 { 246 int target_cpu = mpidr & MPIDR_CPU_MASK; 247 int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 248 MPIDR_AFFINITY_BITS; 249 250 if (target_cluster > MPIDR_AFFLVL1) { 251 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); 252 return PSCI_E_NOT_PRESENT; 253 } 254 255 /* construct the target CPU # */ 256 target_cpu |= (target_cluster << 2); 257 258 mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0); 259 260 return PSCI_E_SUCCESS; 261 } 262 263 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 264 { 265 int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 266 267 /* 268 * Reset power state info for CPUs when onlining, we set 269 * deepest power when offlining a core but that may not be 270 * requested by non-secure sw which controls idle states. It 271 * will re-init this info from non-secure software when the 272 * core come online. 273 */ 274 275 /* 276 * Check if we are exiting from deep sleep and restore SE 277 * context if we are. 278 */ 279 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 280 281 mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT, 282 se_regs[0]); 283 mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT, 284 se_regs[1]); 285 mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT, 286 se_regs[2]); 287 288 /* Init SMMU */ 289 290 /* 291 * Reset power state info for the last core doing SC7 292 * entry and exit, we set deepest power state as CC7 293 * and SC7 for SC7 entry which may not be requested by 294 * non-secure SW which controls idle states. 295 */ 296 } 297 298 return PSCI_E_SUCCESS; 299 } 300 301 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 302 { 303 int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 304 305 /* Disable Denver's DCO operations */ 306 if (impl == DENVER_IMPL) 307 denver_disable_dco(); 308 309 /* Turn off CPU */ 310 311 return PSCI_E_SUCCESS; 312 } 313 314 __dead2 void tegra_soc_prepare_system_off(void) 315 { 316 /* System power off */ 317 318 /* SC8 */ 319 320 wfi(); 321 322 /* wait for the system to power down */ 323 for (;;) { 324 ; 325 } 326 } 327 328 int tegra_soc_prepare_system_reset(void) 329 { 330 return PSCI_E_SUCCESS; 331 } 332