1 /* 2 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <arch_helpers.h> 13 #include <bpmp_ipc.h> 14 #include <common/bl_common.h> 15 #include <common/debug.h> 16 #include <context.h> 17 #include <drivers/delay_timer.h> 18 #include <denver.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/psci/psci.h> 21 #include <mce.h> 22 #include <mce_private.h> 23 #include <memctrl_v2.h> 24 #include <plat/common/platform.h> 25 #include <se.h> 26 #include <smmu.h> 27 #include <t194_nvg.h> 28 #include <tegra194_private.h> 29 #include <tegra_platform.h> 30 #include <tegra_private.h> 31 32 extern uint32_t __tegra194_cpu_reset_handler_data, 33 __tegra194_cpu_reset_handler_end; 34 35 /* TZDRAM offset for saving SMMU context */ 36 #define TEGRA194_SMMU_CTX_OFFSET 16U 37 38 /* state id mask */ 39 #define TEGRA194_STATE_ID_MASK 0xFU 40 /* constants to get power state's wake time */ 41 #define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U 42 #define TEGRA194_WAKE_TIME_SHIFT 4U 43 /* default core wake mask for CPU_SUSPEND */ 44 #define TEGRA194_CORE_WAKE_MASK 0x180cU 45 46 static struct t19x_psci_percpu_data { 47 uint32_t wake_time; 48 } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT]; 49 50 int32_t tegra_soc_validate_power_state(uint32_t power_state, 51 psci_power_state_t *req_state) 52 { 53 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & 54 TEGRA194_STATE_ID_MASK; 55 uint32_t cpu = plat_my_core_pos(); 56 int32_t ret = PSCI_E_SUCCESS; 57 58 /* save the core wake time (in TSC ticks)*/ 59 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK) 60 << TEGRA194_WAKE_TIME_SHIFT; 61 62 /* 63 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure 64 * that the correct value is read in tegra_soc_pwr_domain_suspend(), 65 * which is called with caches disabled. It is possible to read a stale 66 * value from DRAM in that function, because the L2 cache is not flushed 67 * unless the cluster is entering CC6/CC7. 68 */ 69 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu], 70 sizeof(t19x_percpu_data[cpu])); 71 72 /* Sanity check the requested state id */ 73 switch (state_id) { 74 case PSTATE_ID_CORE_IDLE: 75 76 /* Core idle request */ 77 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE; 78 req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN; 79 break; 80 81 case PSTATE_ID_CORE_POWERDN: 82 83 /* Core powerdown request */ 84 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; 85 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; 86 87 break; 88 89 default: 90 ERROR("%s: unsupported state id (%d)\n", __func__, state_id); 91 ret = PSCI_E_INVALID_PARAMS; 92 break; 93 } 94 95 return ret; 96 } 97 98 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state) 99 { 100 uint32_t cpu = plat_my_core_pos(); 101 mce_cstate_info_t cstate_info = { 0 }; 102 103 /* Program default wake mask */ 104 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; 105 cstate_info.update_wake_mask = 1; 106 mce_update_cstate_info(&cstate_info); 107 108 /* Enter CPU idle */ 109 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 110 (uint64_t)TEGRA_NVG_CORE_C6, 111 t19x_percpu_data[cpu].wake_time, 112 0U); 113 114 return PSCI_E_SUCCESS; 115 } 116 117 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) 118 { 119 const plat_local_state_t *pwr_domain_state; 120 uint8_t stateid_afflvl0, stateid_afflvl2; 121 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 122 uint64_t mc_ctx_base; 123 uint32_t val; 124 mce_cstate_info_t sc7_cstate_info = { 125 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6, 126 .ccplex = (uint32_t)TEGRA_NVG_CG_CG7, 127 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7, 128 .system_state_force = 1U, 129 .update_wake_mask = 1U, 130 }; 131 uint32_t cpu = plat_my_core_pos(); 132 int32_t ret = 0; 133 134 /* get the state ID */ 135 pwr_domain_state = target_state->pwr_domain_state; 136 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & 137 TEGRA194_STATE_ID_MASK; 138 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 139 TEGRA194_STATE_ID_MASK; 140 141 if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) { 142 143 /* Enter CPU powerdown */ 144 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 145 (uint64_t)TEGRA_NVG_CORE_C7, 146 t19x_percpu_data[cpu].wake_time, 147 0U); 148 149 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 150 151 /* save 'Secure Boot' Processor Feature Config Register */ 152 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); 153 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val); 154 155 /* save MC context */ 156 mc_ctx_base = params_from_bl2->tzdram_base + 157 tegra194_get_mc_ctx_offset(); 158 tegra_mc_save_context((uintptr_t)mc_ctx_base); 159 160 /* 161 * Suspend SE, RNG1 and PKA1 only on silcon and fpga, 162 * since VDK does not support atomic se ctx save 163 */ 164 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { 165 ret = tegra_se_suspend(); 166 assert(ret == 0); 167 } 168 169 /* Prepare for system suspend */ 170 mce_update_cstate_info(&sc7_cstate_info); 171 172 do { 173 val = (uint32_t)mce_command_handler( 174 (uint32_t)MCE_CMD_IS_SC7_ALLOWED, 175 (uint32_t)TEGRA_NVG_CORE_C7, 176 MCE_CORE_SLEEP_TIME_INFINITE, 177 0U); 178 } while (val == 0U); 179 180 /* Instruct the MCE to enter system suspend state */ 181 ret = mce_command_handler( 182 (uint64_t)MCE_CMD_ENTER_CSTATE, 183 (uint64_t)TEGRA_NVG_CORE_C7, 184 MCE_CORE_SLEEP_TIME_INFINITE, 185 0U); 186 assert(ret == 0); 187 188 /* set system suspend state for house-keeping */ 189 tegra194_set_system_suspend_entry(); 190 } else { 191 ; /* do nothing */ 192 } 193 194 return PSCI_E_SUCCESS; 195 } 196 197 /******************************************************************************* 198 * Helper function to check if this is the last ON CPU in the cluster 199 ******************************************************************************/ 200 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states, 201 uint32_t ncpu) 202 { 203 plat_local_state_t target; 204 bool last_on_cpu = true; 205 uint32_t num_cpus = ncpu, pos = 0; 206 207 do { 208 target = states[pos]; 209 if (target != PLAT_MAX_OFF_STATE) { 210 last_on_cpu = false; 211 } 212 --num_cpus; 213 pos++; 214 } while (num_cpus != 0U); 215 216 return last_on_cpu; 217 } 218 219 /******************************************************************************* 220 * Helper function to get target power state for the cluster 221 ******************************************************************************/ 222 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, 223 uint32_t ncpu) 224 { 225 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK; 226 plat_local_state_t target = states[core_pos]; 227 mce_cstate_info_t cstate_info = { 0 }; 228 229 /* CPU suspend */ 230 if (target == PSTATE_ID_CORE_POWERDN) { 231 232 /* Program default wake mask */ 233 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; 234 cstate_info.update_wake_mask = 1; 235 mce_update_cstate_info(&cstate_info); 236 } 237 238 /* CPU off */ 239 if (target == PLAT_MAX_OFF_STATE) { 240 241 /* Enable cluster powerdn from last CPU in the cluster */ 242 if (tegra_last_on_cpu_in_cluster(states, ncpu)) { 243 244 /* Enable CC6 state and turn off wake mask */ 245 cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6; 246 cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7; 247 cstate_info.system_state_force = 1; 248 cstate_info.update_wake_mask = 1U; 249 mce_update_cstate_info(&cstate_info); 250 251 } else { 252 253 /* Turn off wake_mask */ 254 cstate_info.update_wake_mask = 1U; 255 mce_update_cstate_info(&cstate_info); 256 target = PSCI_LOCAL_STATE_RUN; 257 } 258 } 259 260 return target; 261 } 262 263 /******************************************************************************* 264 * Platform handler to calculate the proper target power level at the 265 * specified affinity level 266 ******************************************************************************/ 267 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, 268 const plat_local_state_t *states, 269 uint32_t ncpu) 270 { 271 plat_local_state_t target = PSCI_LOCAL_STATE_RUN; 272 uint32_t cpu = plat_my_core_pos(); 273 274 /* System Suspend */ 275 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) { 276 target = PSTATE_ID_SOC_POWERDN; 277 } 278 279 /* CPU off, CPU suspend */ 280 if (lvl == (uint32_t)MPIDR_AFFLVL1) { 281 target = tegra_get_afflvl1_pwr_state(states, ncpu); 282 } 283 284 /* target cluster/system state */ 285 return target; 286 } 287 288 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) 289 { 290 const plat_local_state_t *pwr_domain_state = 291 target_state->pwr_domain_state; 292 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 293 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & 294 TEGRA194_STATE_ID_MASK; 295 uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE; 296 uint64_t val; 297 int32_t ret = PSCI_E_SUCCESS; 298 299 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 300 val = params_from_bl2->tzdram_base + 301 tegra194_get_cpu_reset_handler_size(); 302 303 /* initialise communication channel with BPMP */ 304 ret = tegra_bpmp_ipc_init(); 305 assert(ret == 0); 306 307 /* Enable SE clock before SE context save */ 308 ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE); 309 assert(ret == 0); 310 311 /* 312 * It is very unlikely that the BL31 image would be 313 * bigger than 2^32 bytes 314 */ 315 assert(src_len_in_bytes < UINT32_MAX); 316 317 if (tegra_se_calculate_save_sha256(BL31_BASE, 318 (uint32_t)src_len_in_bytes) != 0) { 319 ERROR("Hash calculation failed. Reboot\n"); 320 (void)tegra_soc_prepare_system_reset(); 321 } 322 323 /* 324 * The TZRAM loses power when we enter system suspend. To 325 * allow graceful exit from system suspend, we need to copy 326 * BL3-1 over to TZDRAM. 327 */ 328 val = params_from_bl2->tzdram_base + 329 tegra194_get_cpu_reset_handler_size(); 330 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, 331 src_len_in_bytes); 332 333 /* Disable SE clock after SE context save */ 334 ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE); 335 assert(ret == 0); 336 } 337 338 return ret; 339 } 340 341 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state) 342 { 343 return PSCI_E_NOT_SUPPORTED; 344 } 345 346 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) 347 { 348 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK; 349 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> 350 MPIDR_AFFINITY_BITS; 351 int32_t ret = 0; 352 353 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) { 354 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); 355 return PSCI_E_NOT_PRESENT; 356 } 357 358 /* construct the target CPU # */ 359 target_cpu += (target_cluster << 1U); 360 361 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); 362 if (ret < 0) { 363 return PSCI_E_DENIED; 364 } 365 366 return PSCI_E_SUCCESS; 367 } 368 369 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) 370 { 371 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); 372 uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step; 373 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; 374 cpu_context_t *ctx = cm_get_context(NON_SECURE); 375 uint64_t actlr_elx; 376 377 /* 378 * Reset power state info for CPUs when onlining, we set 379 * deepest power when offlining a core but that may not be 380 * requested by non-secure sw which controls idle states. It 381 * will re-init this info from non-secure software when the 382 * core come online. 383 */ 384 actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1)); 385 actlr_elx &= ~DENVER_CPU_PMSTATE_MASK; 386 actlr_elx |= DENVER_CPU_PMSTATE_C1; 387 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 388 389 /* 390 * Check if we are exiting from deep sleep and restore SE 391 * context if we are. 392 */ 393 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { 394 395 #if ENABLE_STRICT_CHECKING_MODE 396 /* 397 * Enable strict checking after programming the GSC for 398 * enabling TZSRAM and TZDRAM 399 */ 400 mce_enable_strict_checking(); 401 #endif 402 403 /* Init SMMU */ 404 tegra_smmu_init(); 405 406 /* Resume SE, RNG1 and PKA1 */ 407 tegra_se_resume(); 408 409 /* 410 * Program XUSB STREAMIDs 411 * ====================== 412 * T19x XUSB has support for XUSB virtualization. It will 413 * have one physical function (PF) and four Virtual functions 414 * (VF) 415 * 416 * There were below two SIDs for XUSB until T186. 417 * 1) #define TEGRA_SID_XUSB_HOST 0x1bU 418 * 2) #define TEGRA_SID_XUSB_DEV 0x1cU 419 * 420 * We have below four new SIDs added for VF(s) 421 * 3) #define TEGRA_SID_XUSB_VF0 0x5dU 422 * 4) #define TEGRA_SID_XUSB_VF1 0x5eU 423 * 5) #define TEGRA_SID_XUSB_VF2 0x5fU 424 * 6) #define TEGRA_SID_XUSB_VF3 0x60U 425 * 426 * When virtualization is enabled then we have to disable SID 427 * override and program above SIDs in below newly added SID 428 * registers in XUSB PADCTL MMIO space. These registers are 429 * TZ protected and so need to be done in ATF. 430 * 431 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU) 432 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU) 433 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U) 434 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U) 435 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U) 436 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU) 437 * 438 * This change disables SID override and programs XUSB SIDs 439 * in above registers to support both virtualization and 440 * non-virtualization platforms 441 */ 442 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { 443 444 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 445 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST); 446 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 447 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0); 448 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 449 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1); 450 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 451 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2); 452 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 453 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3); 454 mmio_write_32(TEGRA_XUSB_PADCTL_BASE + 455 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV); 456 } 457 } 458 459 /* 460 * Enable dual execution optimized translations for all ELx. 461 */ 462 if (enable_ccplex_lock_step != 0U) { 463 actlr_elx = read_actlr_el3(); 464 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3; 465 write_actlr_el3(actlr_elx); 466 467 actlr_elx = read_actlr_el2(); 468 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2; 469 write_actlr_el2(actlr_elx); 470 471 actlr_elx = read_actlr_el1(); 472 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1; 473 write_actlr_el1(actlr_elx); 474 } 475 476 return PSCI_E_SUCCESS; 477 } 478 479 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) 480 { 481 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 482 int32_t ret = 0; 483 484 (void)target_state; 485 486 /* Disable Denver's DCO operations */ 487 if (impl == DENVER_IMPL) { 488 denver_disable_dco(); 489 } 490 491 /* Turn off CPU */ 492 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, 493 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); 494 assert(ret == 0); 495 496 return PSCI_E_SUCCESS; 497 } 498 499 __dead2 void tegra_soc_prepare_system_off(void) 500 { 501 /* System power off */ 502 mce_system_shutdown(); 503 504 wfi(); 505 506 /* wait for the system to power down */ 507 for (;;) { 508 ; 509 } 510 } 511 512 int32_t tegra_soc_prepare_system_reset(void) 513 { 514 /* System reboot */ 515 mce_system_reboot(); 516 517 return PSCI_E_SUCCESS; 518 } 519