1 /* 2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch.h> 12 #include <arch_helpers.h> 13 #include <common/bl_common.h> 14 #include <common/debug.h> 15 #include <context.h> 16 #include <denver.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/mmio.h> 19 20 #include <mce.h> 21 #include <mce_private.h> 22 #include <t18x_ari.h> 23 #include <tegra_def.h> 24 #include <tegra_platform.h> 25 26 /* NVG functions handlers */ 27 static arch_mce_ops_t nvg_mce_ops = { 28 .enter_cstate = nvg_enter_cstate, 29 .update_cstate_info = nvg_update_cstate_info, 30 .update_crossover_time = nvg_update_crossover_time, 31 .read_cstate_stats = nvg_read_cstate_stats, 32 .write_cstate_stats = nvg_write_cstate_stats, 33 .call_enum_misc = ari_enumeration_misc, 34 .is_ccx_allowed = nvg_is_ccx_allowed, 35 .is_sc7_allowed = nvg_is_sc7_allowed, 36 .online_core = nvg_online_core, 37 .cc3_ctrl = nvg_cc3_ctrl, 38 .update_reset_vector = ari_reset_vector_update, 39 .roc_flush_cache = ari_roc_flush_cache, 40 .roc_flush_cache_trbits = ari_roc_flush_cache_trbits, 41 .roc_clean_cache = ari_roc_clean_cache, 42 .read_write_mca = ari_read_write_mca, 43 .update_ccplex_gsc = ari_update_ccplex_gsc, 44 .enter_ccplex_state = ari_enter_ccplex_state, 45 .read_write_uncore_perfmon = ari_read_write_uncore_perfmon, 46 .misc_ccplex = ari_misc_ccplex 47 }; 48 49 /* ARI functions handlers */ 50 static arch_mce_ops_t ari_mce_ops = { 51 .enter_cstate = ari_enter_cstate, 52 .update_cstate_info = ari_update_cstate_info, 53 .update_crossover_time = ari_update_crossover_time, 54 .read_cstate_stats = ari_read_cstate_stats, 55 .write_cstate_stats = ari_write_cstate_stats, 56 .call_enum_misc = ari_enumeration_misc, 57 .is_ccx_allowed = ari_is_ccx_allowed, 58 .is_sc7_allowed = ari_is_sc7_allowed, 59 .online_core = ari_online_core, 60 .cc3_ctrl = ari_cc3_ctrl, 61 .update_reset_vector = ari_reset_vector_update, 62 .roc_flush_cache = ari_roc_flush_cache, 63 .roc_flush_cache_trbits = ari_roc_flush_cache_trbits, 64 .roc_clean_cache = ari_roc_clean_cache, 65 .read_write_mca = ari_read_write_mca, 66 .update_ccplex_gsc = ari_update_ccplex_gsc, 67 .enter_ccplex_state = ari_enter_ccplex_state, 68 .read_write_uncore_perfmon = ari_read_write_uncore_perfmon, 69 .misc_ccplex = ari_misc_ccplex 70 }; 71 72 typedef struct { 73 uint32_t ari_base; 74 arch_mce_ops_t *ops; 75 } mce_config_t; 76 77 /* Table to hold the per-CPU ARI base address and function handlers */ 78 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = { 79 { 80 /* A57 Core 0 */ 81 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET, 82 .ops = &ari_mce_ops, 83 }, 84 { 85 /* A57 Core 1 */ 86 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET, 87 .ops = &ari_mce_ops, 88 }, 89 { 90 /* A57 Core 2 */ 91 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET, 92 .ops = &ari_mce_ops, 93 }, 94 { 95 /* A57 Core 3 */ 96 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET, 97 .ops = &ari_mce_ops, 98 }, 99 { 100 /* D15 Core 0 */ 101 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET, 102 .ops = &nvg_mce_ops, 103 }, 104 { 105 /* D15 Core 1 */ 106 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET, 107 .ops = &nvg_mce_ops, 108 } 109 }; 110 111 static uint32_t mce_get_curr_cpu_ari_base(void) 112 { 113 uint64_t mpidr = read_mpidr(); 114 uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK; 115 uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; 116 117 /* 118 * T186 has 2 CPU clusters, one with Denver CPUs and the other with 119 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU 120 * numbers start from 0. In order to get the proper arch_mce_ops_t 121 * struct, we have to convert the Denver CPU ids to the corresponding 122 * indices in the mce_ops_table array. 123 */ 124 if (impl == DENVER_IMPL) { 125 cpuid |= 0x4U; 126 } 127 128 return mce_cfg_table[cpuid].ari_base; 129 } 130 131 static arch_mce_ops_t *mce_get_curr_cpu_ops(void) 132 { 133 uint64_t mpidr = read_mpidr(); 134 uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK; 135 uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & 136 (uint64_t)MIDR_IMPL_MASK; 137 138 /* 139 * T186 has 2 CPU clusters, one with Denver CPUs and the other with 140 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU 141 * numbers start from 0. In order to get the proper arch_mce_ops_t 142 * struct, we have to convert the Denver CPU ids to the corresponding 143 * indices in the mce_ops_table array. 144 */ 145 if (impl == DENVER_IMPL) { 146 cpuid |= 0x4U; 147 } 148 149 return mce_cfg_table[cpuid].ops; 150 } 151 152 /******************************************************************************* 153 * Common handler for all MCE commands 154 ******************************************************************************/ 155 int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, 156 uint64_t arg2) 157 { 158 const arch_mce_ops_t *ops; 159 gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE)); 160 uint32_t cpu_ari_base; 161 uint64_t ret64 = 0, arg3, arg4, arg5; 162 int32_t ret = 0; 163 164 assert(gp_regs != NULL); 165 166 /* get a pointer to the CPU's arch_mce_ops_t struct */ 167 ops = mce_get_curr_cpu_ops(); 168 169 /* get the CPU's ARI base address */ 170 cpu_ari_base = mce_get_curr_cpu_ari_base(); 171 172 switch (cmd) { 173 case MCE_CMD_ENTER_CSTATE: 174 ret = ops->enter_cstate(cpu_ari_base, arg0, arg1); 175 if (ret < 0) { 176 ERROR("%s: enter_cstate failed(%d)\n", __func__, ret); 177 } 178 179 break; 180 181 case MCE_CMD_UPDATE_CSTATE_INFO: 182 /* 183 * get the parameters required for the update cstate info 184 * command 185 */ 186 arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4)); 187 arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5)); 188 arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6)); 189 190 ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0, 191 (uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3, 192 (uint32_t)arg4, (uint8_t)arg5); 193 if (ret < 0) { 194 ERROR("%s: update_cstate_info failed(%d)\n", 195 __func__, ret); 196 } 197 198 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0)); 199 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0)); 200 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0)); 201 202 break; 203 204 case MCE_CMD_UPDATE_CROSSOVER_TIME: 205 ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1); 206 if (ret < 0) { 207 ERROR("%s: update_crossover_time failed(%d)\n", 208 __func__, ret); 209 } 210 211 break; 212 213 case MCE_CMD_READ_CSTATE_STATS: 214 ret64 = ops->read_cstate_stats(cpu_ari_base, arg0); 215 216 /* update context to return cstate stats value */ 217 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); 218 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64)); 219 220 break; 221 222 case MCE_CMD_WRITE_CSTATE_STATS: 223 ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1); 224 if (ret < 0) { 225 ERROR("%s: write_cstate_stats failed(%d)\n", 226 __func__, ret); 227 } 228 229 break; 230 231 case MCE_CMD_IS_CCX_ALLOWED: 232 ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1); 233 if (ret < 0) { 234 ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret); 235 break; 236 } 237 238 /* update context to return CCx status value */ 239 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), 240 (uint64_t)(ret)); 241 242 break; 243 244 case MCE_CMD_IS_SC7_ALLOWED: 245 ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1); 246 if (ret < 0) { 247 ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret); 248 break; 249 } 250 251 /* update context to return SC7 status value */ 252 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), 253 (uint64_t)(ret)); 254 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), 255 (uint64_t)(ret)); 256 257 break; 258 259 case MCE_CMD_ONLINE_CORE: 260 ret = ops->online_core(cpu_ari_base, arg0); 261 if (ret < 0) { 262 ERROR("%s: online_core failed(%d)\n", __func__, ret); 263 } 264 265 break; 266 267 case MCE_CMD_CC3_CTRL: 268 ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2); 269 if (ret < 0) { 270 ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret); 271 } 272 273 break; 274 275 case MCE_CMD_ECHO_DATA: 276 ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO, 277 arg0); 278 279 /* update context to return if echo'd data matched source */ 280 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), 281 ((ret64 == arg0) ? 1ULL : 0ULL)); 282 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), 283 ((ret64 == arg0) ? 1ULL : 0ULL)); 284 285 break; 286 287 case MCE_CMD_READ_VERSIONS: 288 ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 289 arg0); 290 291 /* 292 * version = minor(63:32) | major(31:0). Update context 293 * to return major and minor version number. 294 */ 295 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), 296 (ret64)); 297 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), 298 (ret64 >> 32ULL)); 299 300 break; 301 302 case MCE_CMD_ENUM_FEATURES: 303 ret64 = ops->call_enum_misc(cpu_ari_base, 304 TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0); 305 306 /* update context to return features value */ 307 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); 308 309 break; 310 311 case MCE_CMD_ROC_FLUSH_CACHE_TRBITS: 312 ret = ops->roc_flush_cache_trbits(cpu_ari_base); 313 if (ret < 0) { 314 ERROR("%s: flush cache_trbits failed(%d)\n", __func__, 315 ret); 316 } 317 318 break; 319 320 case MCE_CMD_ROC_FLUSH_CACHE: 321 ret = ops->roc_flush_cache(cpu_ari_base); 322 if (ret < 0) { 323 ERROR("%s: flush cache failed(%d)\n", __func__, ret); 324 } 325 326 break; 327 328 case MCE_CMD_ROC_CLEAN_CACHE: 329 ret = ops->roc_clean_cache(cpu_ari_base); 330 if (ret < 0) { 331 ERROR("%s: clean cache failed(%d)\n", __func__, ret); 332 } 333 334 break; 335 336 case MCE_CMD_ENUM_READ_MCA: 337 ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); 338 339 /* update context to return MCA data/error */ 340 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); 341 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1)); 342 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64)); 343 344 break; 345 346 case MCE_CMD_ENUM_WRITE_MCA: 347 ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); 348 349 /* update context to return MCA error */ 350 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); 351 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64)); 352 353 break; 354 355 #if ENABLE_CHIP_VERIFICATION_HARNESS 356 case MCE_CMD_ENABLE_LATIC: 357 /* 358 * This call is not for production use. The constant value, 359 * 0xFFFF0000, is specific to allowing for enabling LATIC on 360 * pre-production parts for the chip verification harness. 361 * 362 * Enabling LATIC allows S/W to read the MINI ISPs in the 363 * CCPLEX. The ISMs are used for various measurements relevant 364 * to particular locations in the Silicon. They are small 365 * counters which can be polled to determine how fast a 366 * particular location in the Silicon is. 367 */ 368 ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), 369 0xFFFF0000); 370 371 break; 372 #endif 373 374 case MCE_CMD_UNCORE_PERFMON_REQ: 375 ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1); 376 377 /* update context to return data */ 378 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1)); 379 break; 380 381 case MCE_CMD_MISC_CCPLEX: 382 ops->misc_ccplex(cpu_ari_base, arg0, arg1); 383 384 break; 385 386 default: 387 ERROR("unknown MCE command (%llu)\n", cmd); 388 ret = EINVAL; 389 break; 390 } 391 392 return ret; 393 } 394 395 /******************************************************************************* 396 * Handler to update the reset vector for CPUs 397 ******************************************************************************/ 398 int32_t mce_update_reset_vector(void) 399 { 400 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops(); 401 402 ops->update_reset_vector(mce_get_curr_cpu_ari_base()); 403 404 return 0; 405 } 406 407 static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx) 408 { 409 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops(); 410 411 ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx); 412 413 return 0; 414 } 415 416 /******************************************************************************* 417 * Handler to update carveout values for Video Memory Carveout region 418 ******************************************************************************/ 419 int32_t mce_update_gsc_videomem(void) 420 { 421 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX); 422 } 423 424 /******************************************************************************* 425 * Handler to update carveout values for TZDRAM aperture 426 ******************************************************************************/ 427 int32_t mce_update_gsc_tzdram(void) 428 { 429 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX); 430 } 431 432 /******************************************************************************* 433 * Handler to update carveout values for TZ SysRAM aperture 434 ******************************************************************************/ 435 int32_t mce_update_gsc_tzram(void) 436 { 437 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM); 438 } 439 440 /******************************************************************************* 441 * Handler to shutdown/reset the entire system 442 ******************************************************************************/ 443 __dead2 void mce_enter_ccplex_state(uint32_t state_idx) 444 { 445 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops(); 446 447 /* sanity check state value */ 448 if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) && 449 (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) { 450 panic(); 451 } 452 453 ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx); 454 455 /* wait till the CCPLEX powers down */ 456 for (;;) { 457 ; 458 } 459 460 } 461 462 /******************************************************************************* 463 * Handler to issue the UPDATE_CSTATE_INFO request 464 ******************************************************************************/ 465 void mce_update_cstate_info(const mce_cstate_info_t *cstate) 466 { 467 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops(); 468 469 /* issue the UPDATE_CSTATE_INFO request */ 470 ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster, 471 cstate->ccplex, cstate->system, cstate->system_state_force, 472 cstate->wake_mask, cstate->update_wake_mask); 473 } 474 475 /******************************************************************************* 476 * Handler to read the MCE firmware version and check if it is compatible 477 * with interface header the BL3-1 was compiled against 478 ******************************************************************************/ 479 void mce_verify_firmware_version(void) 480 { 481 const arch_mce_ops_t *ops; 482 uint32_t cpu_ari_base; 483 uint64_t version; 484 uint32_t major, minor; 485 486 /* 487 * MCE firmware is not supported on simulation platforms. 488 */ 489 if (tegra_platform_is_emulation()) { 490 491 INFO("MCE firmware is not supported\n"); 492 493 } else { 494 /* get a pointer to the CPU's arch_mce_ops_t struct */ 495 ops = mce_get_curr_cpu_ops(); 496 497 /* get the CPU's ARI base address */ 498 cpu_ari_base = mce_get_curr_cpu_ari_base(); 499 500 /* 501 * Read the MCE firmware version and extract the major and minor 502 * version fields 503 */ 504 version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0); 505 major = (uint32_t)version; 506 minor = (uint32_t)(version >> 32); 507 508 INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor, 509 TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR); 510 511 /* 512 * Verify that the MCE firmware version and the interface header 513 * match 514 */ 515 if (major != TEGRA_ARI_VERSION_MAJOR) { 516 ERROR("ARI major version mismatch\n"); 517 panic(); 518 } 519 520 if (minor < TEGRA_ARI_VERSION_MINOR) { 521 ERROR("ARI minor version mismatch\n"); 522 panic(); 523 } 524 } 525 } 526