1 /* 2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <drivers/delay_timer.h> 14 #include <denver.h> 15 #include <lib/mmio.h> 16 #include <plat/common/platform.h> 17 18 #include <mce_private.h> 19 #include <t18x_ari.h> 20 21 /******************************************************************************* 22 * Register offsets for ARI request/results 23 ******************************************************************************/ 24 #define ARI_REQUEST 0x0U 25 #define ARI_REQUEST_EVENT_MASK 0x4U 26 #define ARI_STATUS 0x8U 27 #define ARI_REQUEST_DATA_LO 0xCU 28 #define ARI_REQUEST_DATA_HI 0x10U 29 #define ARI_RESPONSE_DATA_LO 0x14U 30 #define ARI_RESPONSE_DATA_HI 0x18U 31 32 /* Status values for the current request */ 33 #define ARI_REQ_PENDING 1U 34 #define ARI_REQ_ONGOING 3U 35 #define ARI_REQUEST_VALID_BIT (1U << 8) 36 #define ARI_EVT_MASK_STANDBYWFI_BIT (1U << 7) 37 38 /* default timeout (ms) to wait for ARI completion */ 39 #define ARI_MAX_RETRY_COUNT 2000 40 41 /******************************************************************************* 42 * ARI helper functions 43 ******************************************************************************/ 44 static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg) 45 { 46 return mmio_read_32((uint64_t)ari_base + (uint64_t)reg); 47 } 48 49 static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg) 50 { 51 mmio_write_32((uint64_t)ari_base + (uint64_t)reg, val); 52 } 53 54 static inline uint32_t ari_get_request_low(uint32_t ari_base) 55 { 56 return ari_read_32(ari_base, ARI_REQUEST_DATA_LO); 57 } 58 59 static inline uint32_t ari_get_request_high(uint32_t ari_base) 60 { 61 return ari_read_32(ari_base, ARI_REQUEST_DATA_HI); 62 } 63 64 static inline uint32_t ari_get_response_low(uint32_t ari_base) 65 { 66 return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO); 67 } 68 69 static inline uint32_t ari_get_response_high(uint32_t ari_base) 70 { 71 return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI); 72 } 73 74 static inline void ari_clobber_response(uint32_t ari_base) 75 { 76 ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO); 77 ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI); 78 } 79 80 static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req, 81 uint32_t lo, uint32_t hi) 82 { 83 uint32_t retries = ARI_MAX_RETRY_COUNT; 84 uint32_t status; 85 int32_t ret = 0; 86 87 /* program the request, event_mask, hi and lo registers */ 88 ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO); 89 ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI); 90 ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK); 91 ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST); 92 93 /* 94 * For commands that have an event trigger, we should bypass 95 * ARI_STATUS polling, since MCE is waiting for SW to trigger 96 * the event. 97 */ 98 if (evt_mask != 0U) { 99 ret = 0; 100 } else { 101 /* For shutdown/reboot commands, we dont have to check for timeouts */ 102 if ((req == TEGRA_ARI_MISC_CCPLEX) && 103 ((lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) || 104 (lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) { 105 ret = 0; 106 } else { 107 /* 108 * Wait for the command response for not more than the timeout 109 */ 110 while (retries != 0U) { 111 112 /* read the command status */ 113 status = ari_read_32(ari_base, ARI_STATUS); 114 if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U) { 115 break; 116 } 117 118 /* delay 1 ms */ 119 mdelay(1); 120 121 /* decrement the retry count */ 122 retries--; 123 } 124 125 /* assert if the command timed out */ 126 if (retries == 0U) { 127 ERROR("ARI request timed out: req %d on CPU %d\n", 128 req, plat_my_core_pos()); 129 assert(retries != 0U); 130 } 131 } 132 } 133 134 return ret; 135 } 136 137 int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time) 138 { 139 int32_t ret = 0; 140 141 /* check for allowed power state */ 142 if ((state != TEGRA_ARI_CORE_C0) && 143 (state != TEGRA_ARI_CORE_C1) && 144 (state != TEGRA_ARI_CORE_C6) && 145 (state != TEGRA_ARI_CORE_C7)) { 146 ERROR("%s: unknown cstate (%d)\n", __func__, state); 147 ret = EINVAL; 148 } else { 149 /* clean the previous response state */ 150 ari_clobber_response(ari_base); 151 152 /* Enter the cstate, to be woken up after wake_time (TSC ticks) */ 153 ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT, 154 TEGRA_ARI_ENTER_CSTATE, state, wake_time); 155 } 156 157 return ret; 158 } 159 160 int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex, 161 uint32_t system, uint8_t sys_state_force, uint32_t wake_mask, 162 uint8_t update_wake_mask) 163 { 164 uint64_t val = 0U; 165 166 /* clean the previous response state */ 167 ari_clobber_response(ari_base); 168 169 /* update CLUSTER_CSTATE? */ 170 if (cluster != 0U) { 171 val |= (cluster & CLUSTER_CSTATE_MASK) | 172 CLUSTER_CSTATE_UPDATE_BIT; 173 } 174 175 /* update CCPLEX_CSTATE? */ 176 if (ccplex != 0U) { 177 val |= ((ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) | 178 CCPLEX_CSTATE_UPDATE_BIT; 179 } 180 181 /* update SYSTEM_CSTATE? */ 182 if (system != 0U) { 183 val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) | 184 (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) | 185 SYSTEM_CSTATE_UPDATE_BIT); 186 } 187 188 /* update wake mask value? */ 189 if (update_wake_mask != 0U) { 190 val |= CSTATE_WAKE_MASK_UPDATE_BIT; 191 } 192 193 /* set the updated cstate info */ 194 return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, 195 (uint32_t)val, wake_mask); 196 } 197 198 int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time) 199 { 200 int32_t ret = 0; 201 202 /* sanity check crossover type */ 203 if ((type == TEGRA_ARI_CROSSOVER_C1_C6) || 204 (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)) { 205 ret = EINVAL; 206 } else { 207 /* clean the previous response state */ 208 ari_clobber_response(ari_base); 209 210 /* update crossover threshold time */ 211 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER, 212 type, time); 213 } 214 215 return ret; 216 } 217 218 uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state) 219 { 220 int32_t ret; 221 uint64_t result; 222 223 /* sanity check crossover type */ 224 if (state == 0U) { 225 result = EINVAL; 226 } else { 227 /* clean the previous response state */ 228 ari_clobber_response(ari_base); 229 230 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U); 231 if (ret != 0) { 232 result = EINVAL; 233 } else { 234 result = (uint64_t)ari_get_response_low(ari_base); 235 } 236 } 237 return result; 238 } 239 240 int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats) 241 { 242 /* clean the previous response state */ 243 ari_clobber_response(ari_base); 244 245 /* write the cstate stats */ 246 return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state, 247 stats); 248 } 249 250 uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data) 251 { 252 uint64_t resp; 253 int32_t ret; 254 uint32_t local_data = data; 255 256 /* clean the previous response state */ 257 ari_clobber_response(ari_base); 258 259 /* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */ 260 if (cmd != TEGRA_ARI_MISC_ECHO) { 261 local_data = 0U; 262 } 263 264 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data); 265 if (ret != 0) { 266 resp = (uint64_t)ret; 267 } else { 268 /* get the command response */ 269 resp = ari_get_response_low(ari_base); 270 resp |= ((uint64_t)ari_get_response_high(ari_base) << 32); 271 } 272 273 return resp; 274 } 275 276 int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time) 277 { 278 int32_t ret; 279 uint32_t result; 280 281 /* clean the previous response state */ 282 ari_clobber_response(ari_base); 283 284 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U, 285 wake_time); 286 if (ret != 0) { 287 ERROR("%s: failed (%d)\n", __func__, ret); 288 result = 0U; 289 } else { 290 result = ari_get_response_low(ari_base) & 0x1U; 291 } 292 293 /* 1 = CCx allowed, 0 = CCx not allowed */ 294 return (int32_t)result; 295 } 296 297 int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time) 298 { 299 int32_t ret, result; 300 301 /* check for allowed power state */ 302 if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) && 303 (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) { 304 ERROR("%s: unknown cstate (%d)\n", __func__, state); 305 result = EINVAL; 306 } else { 307 /* clean the previous response state */ 308 ari_clobber_response(ari_base); 309 310 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state, 311 wake_time); 312 if (ret != 0) { 313 ERROR("%s: failed (%d)\n", __func__, ret); 314 result = 0; 315 } else { 316 /* 1 = SC7 allowed, 0 = SC7 not allowed */ 317 result = (ari_get_response_low(ari_base) != 0U) ? 1 : 0; 318 } 319 } 320 321 return result; 322 } 323 324 int32_t ari_online_core(uint32_t ari_base, uint32_t core) 325 { 326 uint64_t cpu = read_mpidr() & (MPIDR_CPU_MASK); 327 uint64_t cluster = (read_mpidr() & (MPIDR_CLUSTER_MASK)) >> 328 (MPIDR_AFFINITY_BITS); 329 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; 330 int32_t ret; 331 332 /* construct the current CPU # */ 333 cpu |= (cluster << 2); 334 335 /* sanity check target core id */ 336 if ((core >= MCE_CORE_ID_MAX) || (cpu == (uint64_t)core)) { 337 ERROR("%s: unsupported core id (%d)\n", __func__, core); 338 ret = EINVAL; 339 } else { 340 /* 341 * The Denver cluster has 2 CPUs only - 0, 1. 342 */ 343 if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) { 344 ERROR("%s: unknown core id (%d)\n", __func__, core); 345 ret = EINVAL; 346 } else { 347 /* clean the previous response state */ 348 ari_clobber_response(ari_base); 349 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U); 350 } 351 } 352 353 return ret; 354 } 355 356 int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable) 357 { 358 uint32_t val; 359 360 /* clean the previous response state */ 361 ari_clobber_response(ari_base); 362 363 /* 364 * If the enable bit is cleared, Auto-CC3 will be disabled by setting 365 * the SW visible voltage/frequency request registers for all non 366 * floorswept cores valid independent of StandbyWFI and disabling 367 * the IDLE voltage/frequency request register. If set, Auto-CC3 368 * will be enabled by setting the ARM SW visible voltage/frequency 369 * request registers for all non floorswept cores to be enabled by 370 * StandbyWFI or the equivalent signal, and always keeping the IDLE 371 * voltage/frequency request register enabled. 372 */ 373 val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\ 374 ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\ 375 ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U)); 376 377 return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U); 378 } 379 380 int32_t ari_reset_vector_update(uint32_t ari_base) 381 { 382 /* clean the previous response state */ 383 ari_clobber_response(ari_base); 384 385 /* 386 * Need to program the CPU reset vector one time during cold boot 387 * and SC7 exit 388 */ 389 (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U); 390 391 return 0; 392 } 393 394 int32_t ari_roc_flush_cache_trbits(uint32_t ari_base) 395 { 396 /* clean the previous response state */ 397 ari_clobber_response(ari_base); 398 399 return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS, 400 0U, 0U); 401 } 402 403 int32_t ari_roc_flush_cache(uint32_t ari_base) 404 { 405 /* clean the previous response state */ 406 ari_clobber_response(ari_base); 407 408 return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY, 409 0U, 0U); 410 } 411 412 int32_t ari_roc_clean_cache(uint32_t ari_base) 413 { 414 /* clean the previous response state */ 415 ari_clobber_response(ari_base); 416 417 return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY, 418 0U, 0U); 419 } 420 421 uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data) 422 { 423 uint64_t mca_arg_data, result = 0; 424 uint32_t resp_lo, resp_hi; 425 uint32_t mca_arg_err, mca_arg_finish; 426 int32_t ret; 427 428 /* Set data (write) */ 429 mca_arg_data = (data != NULL) ? *data : 0ULL; 430 431 /* Set command */ 432 ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO); 433 ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI); 434 435 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA, 436 (uint32_t)mca_arg_data, 437 (uint32_t)(mca_arg_data >> 32U)); 438 if (ret == 0) { 439 resp_lo = ari_get_response_low(ari_base); 440 resp_hi = ari_get_response_high(ari_base); 441 442 mca_arg_err = resp_lo & MCA_ARG_ERROR_MASK; 443 mca_arg_finish = (resp_hi >> MCA_ARG_FINISH_SHIFT) & 444 MCA_ARG_FINISH_MASK; 445 446 if (mca_arg_finish == 0U) { 447 result = (uint64_t)mca_arg_err; 448 } else { 449 if (data != NULL) { 450 resp_lo = ari_get_request_low(ari_base); 451 resp_hi = ari_get_request_high(ari_base); 452 *data = ((uint64_t)resp_hi << 32U) | 453 (uint64_t)resp_lo; 454 } 455 } 456 } 457 458 return result; 459 } 460 461 int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx) 462 { 463 int32_t ret = 0; 464 /* sanity check GSC ID */ 465 if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX) { 466 ret = EINVAL; 467 } else { 468 /* clean the previous response state */ 469 ari_clobber_response(ari_base); 470 471 /* 472 * The MCE code will read the GSC carveout value, corrseponding to 473 * the ID, from the MC registers and update the internal GSC registers 474 * of the CCPLEX. 475 */ 476 (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U); 477 } 478 479 return ret; 480 } 481 482 void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx) 483 { 484 /* clean the previous response state */ 485 ari_clobber_response(ari_base); 486 487 /* 488 * The MCE will shutdown or restart the entire system 489 */ 490 (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U); 491 } 492 493 int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req, 494 uint64_t *data) 495 { 496 int32_t ret, result; 497 uint32_t val, req_status; 498 uint8_t req_cmd; 499 500 req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT); 501 502 /* clean the previous response state */ 503 ari_clobber_response(ari_base); 504 505 /* sanity check input parameters */ 506 if ((req_cmd == UNCORE_PERFMON_CMD_READ) && (data == NULL)) { 507 ERROR("invalid parameters\n"); 508 result = EINVAL; 509 } else { 510 /* 511 * For "write" commands get the value that has to be written 512 * to the uncore perfmon registers 513 */ 514 val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ? 515 (uint32_t)*data : 0U; 516 517 ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val, 518 (uint32_t)req); 519 if (ret != 0) { 520 result = ret; 521 } else { 522 /* read the command status value */ 523 req_status = ari_get_response_high(ari_base) & 524 UNCORE_PERFMON_RESP_STATUS_MASK; 525 526 /* 527 * For "read" commands get the data from the uncore 528 * perfmon registers 529 */ 530 req_status >>= UNCORE_PERFMON_RESP_STATUS_SHIFT; 531 if ((req_status == 0U) && (req_cmd == UNCORE_PERFMON_CMD_READ)) { 532 *data = ari_get_response_low(ari_base); 533 } 534 result = (int32_t)req_status; 535 } 536 } 537 538 return result; 539 } 540 541 void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value) 542 { 543 /* 544 * This invokes the ARI_MISC_CCPLEX commands. This can be 545 * used to enable/disable coresight clock gating. 546 */ 547 548 if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) || 549 ((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) && 550 (value > 1U))) { 551 ERROR("%s: invalid parameters \n", __func__); 552 } else { 553 /* clean the previous response state */ 554 ari_clobber_response(ari_base); 555 (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value); 556 } 557 } 558