1 /* 2 * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch.h> 32 #include <arch_helpers.h> 33 #include <assert.h> 34 #include <bl_common.h> 35 #include <context.h> 36 #include <context_mgmt.h> 37 #include <debug.h> 38 #include <denver.h> 39 #include <mce.h> 40 #include <psci.h> 41 #include <smmu.h> 42 #include <string.h> 43 #include <t18x_ari.h> 44 #include <tegra_private.h> 45 46 extern void prepare_cpu_pwr_dwn(void); 47 extern void tegra186_cpu_reset_handler(void); 48 extern uint32_t __tegra186_cpu_reset_handler_data, 49 __tegra186_cpu_reset_handler_end; 50 51 /* TZDRAM offset for saving SMMU context */ 52 #define TEGRA186_SMMU_CTX_OFFSET 16 53 54 /* state id mask */ 55 #define TEGRA186_STATE_ID_MASK 0xF 56 /* constants to get power state's wake time */ 57 #define TEGRA186_WAKE_TIME_MASK 0xFFFFFF 58 #define TEGRA186_WAKE_TIME_SHIFT 4 59 /* default core wake mask for CPU_SUSPEND */ 60 #define TEGRA186_CORE_WAKE_MASK 0x180c 61 /* context size to save during system suspend */ 62 #define TEGRA186_SE_CONTEXT_SIZE 3 63 64 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; 65 static unsigned int wake_time[PLATFORM_CORE_COUNT]; 66 67 /* System power down state */ 68 uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF; 69 70 int32_t tegra_soc_validate_power_state(unsigned int power_state, 71 psci_power_state_t *req_state) 72 { 73 int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; 74 int cpu = read_mpidr() & MPIDR_CPU_MASK; 75 int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 76 77 if (impl == DENVER_IMPL) 78 cpu |= 0x4; 79 80 wake_time[cpu] = (power_state >> TEGRA186_WAKE_TIME_SHIFT) & 81 TEGRA186_WAKE_TIME_MASK; 82 83 /* Sanity check the requested state id */ 84 switch (state_id) { 85 case PSTATE_ID_CORE_IDLE: 86 case PSTATE_ID_CORE_POWERDN: 87 /* 88 * Core powerdown request only for afflvl 0 89 */ 90 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 91 92 break; 93 94 default: 95 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 96 return PSCI_E_INVALID_PARAMS; 97 } 98 99 return PSCI_E_SUCCESS; 100 } 101 102 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 103 { 104 const plat_local_state_t *pwr_domain_state; 105 unsigned int stateid_afflvl0, stateid_afflvl2; 106 int cpu = read_mpidr() & MPIDR_CPU_MASK; 107 int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 108 cpu_context_t *ctx = cm_get_context(NON_SECURE); 109 gp_regs_t *gp_regs = get_gpregs_ctx(ctx); 110 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 111 uint64_t smmu_ctx_base; 112 uint32_t val; 113 114 assert(ctx); 115 assert(gp_regs); 116 117 if (impl == DENVER_IMPL) 118 cpu |= 0x4; 119 120 /* get the state ID */ 121 pwr_domain_state = target_state->pwr_domain_state; 122 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 123 TEGRA186_STATE_ID_MASK; 124 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 125 TEGRA186_STATE_ID_MASK; 126 127 if (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) { 128 129 /* Program default wake mask */ 130 write_ctx_reg(gp_regs, CTX_GPREG_X4, 0); 131 write_ctx_reg(gp_regs, CTX_GPREG_X5, TEGRA186_CORE_WAKE_MASK); 132 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 133 (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 0, 0, 0); 134 135 /* Prepare for cpu idle */ 136 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 137 TEGRA_ARI_CORE_C6, wake_time[cpu], 0); 138 139 } else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) { 140 141 /* Program default wake mask */ 142 write_ctx_reg(gp_regs, CTX_GPREG_X4, 0); 143 write_ctx_reg(gp_regs, CTX_GPREG_X5, TEGRA186_CORE_WAKE_MASK); 144 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 145 (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 0, 0, 0); 146 147 /* Prepare for cpu powerdn */ 148 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 149 TEGRA_ARI_CORE_C7, wake_time[cpu], 0); 150 151 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 152 153 /* loop until SC7 is allowed */ 154 do { 155 val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, 156 TEGRA_ARI_CORE_C7, 157 MCE_CORE_SLEEP_TIME_INFINITE, 158 0); 159 } while (val == 0); 160 161 /* save SE registers */ 162 se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + 163 SE_MUTEX_WATCHDOG_NS_LIMIT); 164 se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE + 165 RNG_MUTEX_WATCHDOG_NS_LIMIT); 166 se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE + 167 PKA_MUTEX_WATCHDOG_NS_LIMIT); 168 169 /* save 'Secure Boot' Processor Feature Config Register */ 170 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 171 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val); 172 173 /* save SMMU context to TZDRAM */ 174 smmu_ctx_base = params_from_bl2->tzdram_base + 175 ((uintptr_t)&__tegra186_cpu_reset_handler_data - 176 (uintptr_t)tegra186_cpu_reset_handler) + 177 TEGRA186_SMMU_CTX_OFFSET; 178 tegra_smmu_save_context((uintptr_t)smmu_ctx_base); 179 180 /* Prepare for system suspend */ 181 write_ctx_reg(gp_regs, CTX_GPREG_X4, 1); 182 write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); 183 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 184 (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 185 TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC7); 186 187 /* Instruct the MCE to enter system suspend state */ 188 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 189 TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); 190 191 } else { 192 ERROR("%s: Unknown state id\n", __func__); 193 return PSCI_E_NOT_SUPPORTED; 194 } 195 196 return PSCI_E_SUCCESS; 197 } 198 199 int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 200 { 201 const plat_local_state_t *pwr_domain_state = 202 target_state->pwr_domain_state; 203 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 204 unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 205 TEGRA186_STATE_ID_MASK; 206 uint32_t val; 207 208 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 209 /* 210 * The TZRAM loses power when we enter system suspend. To 211 * allow graceful exit from system suspend, we need to copy 212 * BL3-1 over to TZDRAM. 213 */ 214 val = params_from_bl2->tzdram_base + 215 ((uintptr_t)&__tegra186_cpu_reset_handler_end - 216 (uintptr_t)tegra186_cpu_reset_handler); 217 memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 218 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); 219 } 220 221 return PSCI_E_SUCCESS; 222 } 223 224 int tegra_soc_pwr_domain_on(u_register_t mpidr) 225 { 226 int target_cpu = mpidr & MPIDR_CPU_MASK; 227 int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 228 MPIDR_AFFINITY_BITS; 229 230 if (target_cluster > MPIDR_AFFLVL1) { 231 ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr); 232 return PSCI_E_NOT_PRESENT; 233 } 234 235 /* construct the target CPU # */ 236 target_cpu |= (target_cluster << 2); 237 238 mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0); 239 240 return PSCI_E_SUCCESS; 241 } 242 243 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 244 { 245 int state_id = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 246 cpu_context_t *ctx = cm_get_context(NON_SECURE); 247 gp_regs_t *gp_regs = get_gpregs_ctx(ctx); 248 249 /* 250 * Check if we are exiting from deep sleep and restore SE 251 * context if we are. 252 */ 253 if (state_id == PSTATE_ID_SOC_POWERDN) { 254 mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT, 255 se_regs[0]); 256 mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT, 257 se_regs[1]); 258 mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT, 259 se_regs[2]); 260 261 /* Init SMMU */ 262 tegra_smmu_init(); 263 264 /* 265 * Reset power state info for the last core doing SC7 entry and exit, 266 * we set deepest power state as CC7 and SC7 for SC7 entry which 267 * may not be requested by non-secure SW which controls idle states. 268 */ 269 write_ctx_reg(gp_regs, CTX_GPREG_X4, 0); 270 write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); 271 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 272 (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 273 TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC1); 274 } 275 276 return PSCI_E_SUCCESS; 277 } 278 279 int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 280 { 281 cpu_context_t *ctx = cm_get_context(NON_SECURE); 282 gp_regs_t *gp_regs = get_gpregs_ctx(ctx); 283 int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 284 285 assert(ctx); 286 assert(gp_regs); 287 288 /* Turn off wake_mask */ 289 write_ctx_reg(gp_regs, CTX_GPREG_X4, 0); 290 write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); 291 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 292 mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, TEGRA_ARI_CLUSTER_CC7, 293 0, 0); 294 295 /* Disable Denver's DCO operations */ 296 if (impl == DENVER_IMPL) 297 denver_disable_dco(); 298 299 /* Turn off CPU */ 300 return mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, 301 MCE_CORE_SLEEP_TIME_INFINITE, 0); 302 } 303 304 __dead2 void tegra_soc_prepare_system_off(void) 305 { 306 cpu_context_t *ctx = cm_get_context(NON_SECURE); 307 gp_regs_t *gp_regs = get_gpregs_ctx(ctx); 308 uint32_t val; 309 310 if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) { 311 312 /* power off the entire system */ 313 mce_enter_ccplex_state(tegra186_system_powerdn_state); 314 315 } else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) { 316 317 /* loop until other CPUs power down */ 318 do { 319 val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, 320 TEGRA_ARI_CORE_C7, 321 MCE_CORE_SLEEP_TIME_INFINITE, 322 0); 323 } while (val == 0); 324 325 /* Prepare for quasi power down */ 326 write_ctx_reg(gp_regs, CTX_GPREG_X4, 1); 327 write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); 328 write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); 329 (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, 330 TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC8); 331 332 /* Enter quasi power down state */ 333 (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, 334 TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); 335 336 /* disable GICC */ 337 tegra_gic_cpuif_deactivate(); 338 339 /* power down core */ 340 prepare_cpu_pwr_dwn(); 341 342 } else { 343 ERROR("%s: unsupported power down state (%d)\n", __func__, 344 tegra186_system_powerdn_state); 345 } 346 347 wfi(); 348 349 /* wait for the system to power down */ 350 for (;;) { 351 ; 352 } 353 } 354 355 int tegra_soc_prepare_system_reset(void) 356 { 357 mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT); 358 359 return PSCI_E_SUCCESS; 360 } 361