1 /* 2 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <arch_helpers.h> 13 #include <bpmp_ipc.h> 14 #include <common/bl_common.h> 15 #include <common/debug.h> 16 #include <context.h> 17 #include <drivers/delay_timer.h> 18 #include <denver.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/psci/psci.h> 21 #include <mce.h> 22 #include <mce_private.h> 23 #include <memctrl_v2.h> 24 #include <plat/common/platform.h> 25 #include <se.h> 26 #include <smmu.h> 27 #include <t194_nvg.h> 28 #include <tegra194_private.h> 29 #include <tegra_platform.h> 30 #include <tegra_private.h> 31 32 extern uint32_t __tegra194_cpu_reset_handler_data, 33 __tegra194_cpu_reset_handler_end; 34 35 /* TZDRAM offset for saving SMMU context */ 36 #define TEGRA194_SMMU_CTX_OFFSET 16U 37 38 /* state id mask */ 39 #define TEGRA194_STATE_ID_MASK 0xFU 40 /* constants to get power state's wake time */ 41 #define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U 42 #define TEGRA194_WAKE_TIME_SHIFT 4U 43 /* default core wake mask for CPU_SUSPEND */ 44 #define TEGRA194_CORE_WAKE_MASK 0x180cU 45 46 static struct t19x_psci_percpu_data { 47 uint32_t wake_time; 48 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT]; 49 50 int32_t tegra_soc_validate_power_state(uint32_t power_state, 51 psci_power_state_t *req_state) 52 { 53 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & 54 TEGRA194_STATE_ID_MASK; 55 uint32_t cpu = plat_my_core_pos(); 56 int32_t ret = PSCI_E_SUCCESS; 57 58 /* save the core wake time (in TSC ticks)*/ 59 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK) 60 << TEGRA194_WAKE_TIME_SHIFT; 61 62 /* 63 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure 64 * that the correct value is read in tegra_soc_pwr_domain_suspend(), 65 * which is called with caches disabled. It is possible to read a stale 66 * value from DRAM in that function, because the L2 cache is not flushed 67 * unless the cluster is entering CC6/CC7. 68 */ 69 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu], 70 sizeof(t19x_percpu_data[cpu])); 71 72 /* Sanity check the requested state id */ 73 switch (state_id) { 74 case PSTATE_ID_CORE_IDLE: 75 76 if (psci_get_pstate_type(power_state) != PSTATE_TYPE_STANDBY) { 77 ret = PSCI_E_INVALID_PARAMS; 78 break; 79 } 80 81 /* Core idle request */ 82 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE; 83 req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN; 84 break; 85 86 default: 87 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 88 ret = PSCI_E_INVALID_PARAMS; 89 break; 90 } 91 92 return ret; 93 } 94 95 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state) 96 { 97 uint32_t cpu = plat_my_core_pos(); 98 mce_cstate_info_t cstate_info = { 0 }; 99 100 /* Program default wake mask */ 101 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; 102 cstate_info.update_wake_mask = 1; 103 mce_update_cstate_info(&cstate_info); 104 105 /* Enter CPU idle */ 106 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 107 (uint64_t)TEGRA_NVG_CORE_C6, 108 t19x_percpu_data[cpu].wake_time, 109 0U); 110 111 return PSCI_E_SUCCESS; 112 } 113 114 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 115 { 116 const plat_local_state_t *pwr_domain_state; 117 uint8_t stateid_afflvl2; 118 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 119 uint64_t mc_ctx_base; 120 uint32_t val; 121 mce_cstate_info_t sc7_cstate_info = { 122 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6, 123 .ccplex = (uint32_t)TEGRA_NVG_CG_CG7, 124 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7, 125 .system_state_force = 1U, 126 .update_wake_mask = 1U, 127 }; 128 int32_t ret = 0; 129 130 /* get the state ID */ 131 pwr_domain_state = target_state->pwr_domain_state; 132 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 133 TEGRA194_STATE_ID_MASK; 134 135 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 136 137 /* save 'Secure Boot' Processor Feature Config Register */ 138 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 139 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val); 140 141 /* save MC context */ 142 mc_ctx_base = params_from_bl2->tzdram_base + 143 tegra194_get_mc_ctx_offset(); 144 tegra_mc_save_context((uintptr_t)mc_ctx_base); 145 146 /* 147 * Suspend SE, RNG1 and PKA1 only on silcon and fpga, 148 * since VDK does not support atomic se ctx save 149 */ 150 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { 151 ret = tegra_se_suspend(); 152 assert(ret == 0); 153 } 154 155 /* Prepare for system suspend */ 156 mce_update_cstate_info(&sc7_cstate_info); 157 158 do { 159 val = (uint32_t)mce_command_handler( 160 (uint32_t)MCE_CMD_IS_SC7_ALLOWED, 161 (uint32_t)TEGRA_NVG_CORE_C7, 162 MCE_CORE_SLEEP_TIME_INFINITE, 163 0U); 164 } while (val == 0U); 165 166 /* Instruct the MCE to enter system suspend state */ 167 ret = mce_command_handler( 168 (uint64_t)MCE_CMD_ENTER_CSTATE, 169 (uint64_t)TEGRA_NVG_CORE_C7, 170 MCE_CORE_SLEEP_TIME_INFINITE, 171 0U); 172 assert(ret == 0); 173 174 /* set system suspend state for house-keeping */ 175 tegra194_set_system_suspend_entry(); 176 } 177 178 return PSCI_E_SUCCESS; 179 } 180 181 /******************************************************************************* 182 * Helper function to check if this is the last ON CPU in the cluster 183 ******************************************************************************/ 184 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states, 185 uint32_t ncpu) 186 { 187 plat_local_state_t target; 188 bool last_on_cpu = true; 189 uint32_t num_cpus = ncpu, pos = 0; 190 191 do { 192 target = states[pos]; 193 if (target != PLAT_MAX_OFF_STATE) { 194 last_on_cpu = false; 195 } 196 --num_cpus; 197 pos++; 198 } while (num_cpus != 0U); 199 200 return last_on_cpu; 201 } 202 203 /******************************************************************************* 204 * Helper function to get target power state for the cluster 205 ******************************************************************************/ 206 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, 207 uint32_t ncpu) 208 { 209 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK; 210 plat_local_state_t target = states[core_pos]; 211 mce_cstate_info_t cstate_info = { 0 }; 212 213 /* CPU off */ 214 if (target == PLAT_MAX_OFF_STATE) { 215 216 /* Enable cluster powerdn from last CPU in the cluster */ 217 if (tegra_last_on_cpu_in_cluster(states, ncpu)) { 218 219 /* Enable CC6 state and turn off wake mask */ 220 cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6; 221 cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7; 222 cstate_info.system_state_force = 1; 223 cstate_info.update_wake_mask = 1U; 224 mce_update_cstate_info(&cstate_info); 225 226 } else { 227 228 /* Turn off wake_mask */ 229 cstate_info.update_wake_mask = 1U; 230 mce_update_cstate_info(&cstate_info); 231 target = PSCI_LOCAL_STATE_RUN; 232 } 233 } 234 235 return target; 236 } 237 238 /******************************************************************************* 239 * Platform handler to calculate the proper target power level at the 240 * specified affinity level 241 ******************************************************************************/ 242 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, 243 const plat_local_state_t *states, 244 uint32_t ncpu) 245 { 246 plat_local_state_t target = PSCI_LOCAL_STATE_RUN; 247 uint32_t cpu = plat_my_core_pos(); 248 249 /* System Suspend */ 250 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) { 251 target = PSTATE_ID_SOC_POWERDN; 252 } 253 254 /* CPU off, CPU suspend */ 255 if (lvl == (uint32_t)MPIDR_AFFLVL1) { 256 target = tegra_get_afflvl1_pwr_state(states, ncpu); 257 } 258 259 /* target cluster/system state */ 260 return target; 261 } 262 263 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 264 { 265 const plat_local_state_t *pwr_domain_state = 266 target_state->pwr_domain_state; 267 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 268 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 269 TEGRA194_STATE_ID_MASK; 270 uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE; 271 uint64_t val; 272 int32_t ret = PSCI_E_SUCCESS; 273 274 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 275 val = params_from_bl2->tzdram_base + 276 tegra194_get_cpu_reset_handler_size(); 277 278 /* initialise communication channel with BPMP */ 279 ret = tegra_bpmp_ipc_init(); 280 assert(ret == 0); 281 282 /* Enable SE clock before SE context save */ 283 ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE); 284 assert(ret == 0); 285 286 /* 287 * It is very unlikely that the BL31 image would be 288 * bigger than 2^32 bytes 289 */ 290 assert(src_len_in_bytes < UINT32_MAX); 291 292 if (tegra_se_calculate_save_sha256(BL31_BASE, 293 (uint32_t)src_len_in_bytes) != 0) { 294 ERROR("Hash calculation failed. Reboot\n"); 295 (void)tegra_soc_prepare_system_reset(); 296 } 297 298 /* 299 * The TZRAM loses power when we enter system suspend. To 300 * allow graceful exit from system suspend, we need to copy 301 * BL3-1 over to TZDRAM. 302 */ 303 val = params_from_bl2->tzdram_base + 304 tegra194_get_cpu_reset_handler_size(); 305 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 306 src_len_in_bytes); 307 308 /* Disable SE clock after SE context save */ 309 ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE); 310 assert(ret == 0); 311 } 312 313 return ret; 314 } 315 316 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state) 317 { 318 return PSCI_E_NOT_SUPPORTED; 319 } 320 321 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) 322 { 323 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK; 324 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 325 MPIDR_AFFINITY_BITS; 326 int32_t ret = 0; 327 328 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) { 329 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); 330 return PSCI_E_NOT_PRESENT; 331 } 332 333 /* construct the target CPU # */ 334 target_cpu += (target_cluster << 1U); 335 336 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); 337 if (ret < 0) { 338 return PSCI_E_DENIED; 339 } 340 341 return PSCI_E_SUCCESS; 342 } 343 344 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 345 { 346 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 347 uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step; 348 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 349 cpu_context_t *ctx = cm_get_context(NON_SECURE); 350 uint64_t actlr_elx; 351 352 /* 353 * Reset power state info for CPUs when onlining, we set 354 * deepest power when offlining a core but that may not be 355 * requested by non-secure sw which controls idle states. It 356 * will re-init this info from non-secure software when the 357 * core come online. 358 */ 359 actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1)); 360 actlr_elx &= ~DENVER_CPU_PMSTATE_MASK; 361 actlr_elx |= DENVER_CPU_PMSTATE_C1; 362 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 363 364 /* 365 * Check if we are exiting from deep sleep and restore SE 366 * context if we are. 367 */ 368 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 369 370 #if ENABLE_STRICT_CHECKING_MODE 371 /* 372 * Enable strict checking after programming the GSC for 373 * enabling TZSRAM and TZDRAM 374 */ 375 mce_enable_strict_checking(); 376 #endif 377 378 /* Init SMMU */ 379 tegra_smmu_init(); 380 381 /* Resume SE, RNG1 and PKA1 */ 382 tegra_se_resume(); 383 384 /* 385 * Program XUSB STREAMIDs 386 * ====================== 387 * T19x XUSB has support for XUSB virtualization. It will 388 * have one physical function (PF) and four Virtual functions 389 * (VF) 390 * 391 * There were below two SIDs for XUSB until T186. 392 * 1) #define TEGRA_SID_XUSB_HOST 0x1bU 393 * 2) #define TEGRA_SID_XUSB_DEV 0x1cU 394 * 395 * We have below four new SIDs added for VF(s) 396 * 3) #define TEGRA_SID_XUSB_VF0 0x5dU 397 * 4) #define TEGRA_SID_XUSB_VF1 0x5eU 398 * 5) #define TEGRA_SID_XUSB_VF2 0x5fU 399 * 6) #define TEGRA_SID_XUSB_VF3 0x60U 400 * 401 * When virtualization is enabled then we have to disable SID 402 * override and program above SIDs in below newly added SID 403 * registers in XUSB PADCTL MMIO space. These registers are 404 * TZ protected and so need to be done in ATF. 405 * 406 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU) 407 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU) 408 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U) 409 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U) 410 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U) 411 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU) 412 * 413 * This change disables SID override and programs XUSB SIDs 414 * in above registers to support both virtualization and 415 * non-virtualization platforms 416 */ 417 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { 418 419 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 420 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST); 421 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 422 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST); 423 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 424 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0); 425 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 426 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0); 427 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 428 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1); 429 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 430 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1); 431 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 432 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2); 433 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 434 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2); 435 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 436 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3); 437 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 438 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3); 439 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 440 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV); 441 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE + 442 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV); 443 } 444 } 445 446 /* 447 * Enable dual execution optimized translations for all ELx. 448 */ 449 if (enable_ccplex_lock_step != 0U) { 450 actlr_elx = read_actlr_el3(); 451 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3; 452 write_actlr_el3(actlr_elx); 453 454 actlr_elx = read_actlr_el2(); 455 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2; 456 write_actlr_el2(actlr_elx); 457 458 actlr_elx = read_actlr_el1(); 459 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1; 460 write_actlr_el1(actlr_elx); 461 } 462 463 return PSCI_E_SUCCESS; 464 } 465 466 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 467 { 468 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 469 int32_t ret = 0; 470 471 (void)target_state; 472 473 /* Disable Denver's DCO operations */ 474 if (impl == DENVER_IMPL) { 475 denver_disable_dco(); 476 } 477 478 /* Turn off CPU */ 479 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 480 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); 481 assert(ret == 0); 482 483 return PSCI_E_SUCCESS; 484 } 485 486 __dead2 void tegra_soc_prepare_system_off(void) 487 { 488 /* System power off */ 489 mce_system_shutdown(); 490 491 wfi(); 492 493 /* wait for the system to power down */ 494 for (;;) { 495 ; 496 } 497 } 498 499 int32_t tegra_soc_prepare_system_reset(void) 500 { 501 /* System reboot */ 502 mce_system_reboot(); 503 504 return PSCI_E_SUCCESS; 505 } 506