1 /* 2 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <stdbool.h> 8 #include <stdint.h> 9 10 #include <common/debug.h> 11 #include <lib/bakery_lock.h> 12 #include <lib/extensions/ras.h> 13 #include <lib/utils_def.h> 14 #include <services/sdei.h> 15 16 #include <plat/common/platform.h> 17 #include <platform_def.h> 18 #include <tegra194_ras_private.h> 19 #include <tegra_def.h> 20 #include <tegra_platform.h> 21 #include <tegra_private.h> 22 23 /* 24 * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled 25 * by setting corresponding bits in ERR<n>CTLR 26 */ 27 #define ERR_FR_EN_BITS_MASK 0xFFFFFFFF00000000ULL 28 29 /* bakery lock for platform RAS handler. */ 30 static DEFINE_BAKERY_LOCK(ras_handler_lock); 31 #define ras_lock() bakery_lock_get(&ras_handler_lock) 32 #define ras_unlock() bakery_lock_release(&ras_handler_lock) 33 34 /* 35 * Function to handle an External Abort received at EL3. 36 * This function is invoked by RAS framework. 37 */ 38 static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome, 39 void *cookie, void *handle, uint64_t flags) 40 { 41 int32_t ret; 42 43 ras_lock(); 44 45 ERROR("exception reason=%u syndrome=0x%llx on 0x%lx at EL3.\n", 46 ea_reason, syndrome, read_mpidr_el1()); 47 48 /* Call RAS EA handler */ 49 ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags); 50 if (ret != 0) { 51 ERROR("RAS error handled!\n"); 52 ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 + 53 plat_my_core_pos()); 54 if (ret != 0) 55 ERROR("sdei_dispatch_event returned %d\n", ret); 56 } else { 57 ERROR("Not a RAS error!\n"); 58 } 59 60 ras_unlock(); 61 } 62 63 /* 64 * Function to enable all supported RAS error report. 65 * 66 * Uncorrected errors are set to report as External abort (SError) 67 * Corrected errors are set to report as interrupt. 68 */ 69 void tegra194_ras_enable(void) 70 { 71 VERBOSE("%s\n", __func__); 72 73 /* skip RAS enablement if not a silicon platform. */ 74 if (!tegra_platform_is_silicon()) { 75 return; 76 } 77 78 /* 79 * Iterate for each group(num_idx ERRSELRs starting from idx_start) 80 * use normal for loop instead of for_each_err_record_info to get rid 81 * of MISRA noise.. 82 */ 83 for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) { 84 85 const struct err_record_info *info = &err_record_mappings.err_records[i]; 86 87 uint32_t idx_start = info->sysreg.idx_start; 88 uint32_t num_idx = info->sysreg.num_idx; 89 const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data; 90 91 assert(aux_data != NULL); 92 93 for (uint32_t j = 0; j < num_idx; j++) { 94 95 /* ERR<n>CTLR register value. */ 96 uint64_t err_ctrl = 0ULL; 97 /* all supported errors for this node. */ 98 uint64_t err_fr; 99 /* uncorrectable errors */ 100 uint64_t uncorr_errs; 101 /* correctable errors */ 102 uint64_t corr_errs; 103 104 /* 105 * Catch error if something wrong with the RAS aux data 106 * record table. 107 */ 108 assert(aux_data[j].err_ctrl != NULL); 109 110 /* 111 * Write to ERRSELR_EL1 to select the RAS error node. 112 * Always program this at first to select corresponding 113 * RAS node before any other RAS register r/w. 114 */ 115 ser_sys_select_record(idx_start + j); 116 117 err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK; 118 uncorr_errs = aux_data[j].err_ctrl(); 119 corr_errs = ~uncorr_errs & err_fr; 120 121 /* enable error reporting */ 122 ERR_CTLR_ENABLE_FIELD(err_ctrl, ED); 123 124 /* enable SError reporting for uncorrectable errors */ 125 if ((uncorr_errs & err_fr) != 0ULL) { 126 ERR_CTLR_ENABLE_FIELD(err_ctrl, UE); 127 } 128 129 /* generate interrupt for corrected errors. */ 130 if (corr_errs != 0ULL) { 131 ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI); 132 } 133 134 /* enable the supported errors */ 135 err_ctrl |= err_fr; 136 137 VERBOSE("errselr_el1:0x%x, erxfr:0x%llx, err_ctrl:0x%llx\n", 138 idx_start + j, err_fr, err_ctrl); 139 140 /* enable specified errors, or set to 0 if no supported error */ 141 write_erxctlr_el1(err_ctrl); 142 143 /* 144 * Check if all the bit settings have been enabled to detect 145 * uncorrected/corrected errors, if not assert. 146 */ 147 assert(read_erxctlr_el1() == err_ctrl); 148 } 149 } 150 } 151 152 /* 153 * Function to clear RAS ERR<n>STATUS for corrected RAS error. 154 * This function ignores any new RAS error signaled during clearing; it is not 155 * multi-core safe(no ras_lock is taken to reduce overhead). 156 */ 157 void tegra194_ras_corrected_err_clear(void) 158 { 159 uint64_t clear_ce_status = 0ULL; 160 161 ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL); 162 ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL); 163 ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL); 164 ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL); 165 ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL); 166 167 for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) { 168 169 const struct err_record_info *info = &err_record_mappings.err_records[i]; 170 uint32_t idx_start = info->sysreg.idx_start; 171 uint32_t num_idx = info->sysreg.num_idx; 172 173 for (uint32_t j = 0U; j < num_idx; j++) { 174 175 uint64_t status; 176 uint32_t err_idx = idx_start + j; 177 178 write_errselr_el1(err_idx); 179 status = read_erxstatus_el1(); 180 181 if (ERR_STATUS_GET_FIELD(status, CE) != 0U) { 182 write_erxstatus_el1(clear_ce_status); 183 } 184 } 185 } 186 } 187 188 /* Function to probe an error from error record group. */ 189 static int32_t tegra194_ras_record_probe(const struct err_record_info *info, 190 int *probe_data) 191 { 192 /* Skip probing if not a silicon platform */ 193 if (!tegra_platform_is_silicon()) { 194 return 0; 195 } 196 197 return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data); 198 } 199 200 /* Function to handle error from one given node */ 201 static int32_t tegra194_ras_node_handler(uint32_t errselr, 202 const struct ras_error *errors, uint64_t status) 203 { 204 bool found = false; 205 uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR); 206 uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR); 207 208 /* not a valid error. */ 209 if (ERR_STATUS_GET_FIELD(status, V) == 0U) { 210 return 0; 211 } 212 213 /* Print uncorrectable errror information. */ 214 if (ERR_STATUS_GET_FIELD(status, UE) != 0U) { 215 216 /* IERR to error message */ 217 for (uint32_t i = 0; errors[i].error_msg != NULL; i++) { 218 if (ierr == errors[i].error_code) { 219 ERROR("ERRSELR_EL1:0x%x\n, IERR = %s(0x%x)\n", 220 errselr, errors[i].error_msg, 221 errors[i].error_code); 222 found = true; 223 break; 224 } 225 } 226 227 if (!found) { 228 ERROR("unknown uncorrectable eror, " 229 "ERRSELR_EL1:0x%x, IERR: 0x%x\n", errselr, ierr); 230 } 231 232 ERROR("SERR = %s(0x%x)\n", ras_serr_to_str(serr), serr); 233 } else { 234 /* For corrected error, simply clear it. */ 235 VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, " 236 "IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr); 237 } 238 239 /* Write to clear reported errors. */ 240 write_erxstatus_el1(status); 241 242 return 0; 243 } 244 245 /* Function to handle one error node from an error record group. */ 246 static int32_t tegra194_ras_record_handler(const struct err_record_info *info, 247 int probe_data, const struct err_handler_data *const data __unused) 248 { 249 uint32_t num_idx = info->sysreg.num_idx; 250 uint32_t idx_start = info->sysreg.idx_start; 251 const struct ras_aux_data *aux_data = info->aux_data; 252 const struct ras_error *errors; 253 uint32_t offset; 254 255 uint64_t status = 0ULL; 256 257 VERBOSE("%s\n", __func__); 258 259 assert(probe_data >= 0); 260 assert((uint32_t)probe_data < num_idx); 261 262 offset = (uint32_t)probe_data; 263 errors = aux_data[offset].error_records; 264 265 assert(errors != NULL); 266 267 /* Write to ERRSELR_EL1 to select the error record */ 268 ser_sys_select_record(idx_start + offset); 269 270 /* Retrieve status register from the error record */ 271 status = read_erxstatus_el1(); 272 273 return tegra194_ras_node_handler(idx_start + offset, errors, status); 274 } 275 276 277 /* Instantiate RAS nodes */ 278 PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) 279 PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) 280 SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) 281 CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE) 282 283 /* Instantiate RAS node groups */ 284 static struct ras_aux_data per_core_ras_group[] = { 285 PER_CORE_RAS_GROUP_NODES 286 }; 287 288 static struct ras_aux_data per_cluster_ras_group[] = { 289 PER_CLUSTER_RAS_GROUP_NODES 290 }; 291 292 static struct ras_aux_data scf_l3_ras_group[] = { 293 SCF_L3_BANK_RAS_GROUP_NODES 294 }; 295 296 static struct ras_aux_data ccplex_ras_group[] = { 297 CCPLEX_RAS_GROUP_NODES 298 }; 299 300 /* 301 * We have same probe and handler for each error record group, use a macro to 302 * simply the record definition. 303 */ 304 #define ADD_ONE_ERR_GROUP(errselr_start, group) \ 305 ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \ 306 &tegra194_ras_record_probe, \ 307 &tegra194_ras_record_handler, (group)) 308 309 /* RAS error record group information */ 310 static struct err_record_info carmel_ras_records[] = { 311 /* 312 * Per core ras error records 313 * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to 314 * 0*256 + Logical_CPU_ID*16 + 5 for each group. 315 * 8 cores/groups, 6 * 8 nodes in total. 316 */ 317 ADD_ONE_ERR_GROUP(0x000, per_core_ras_group), 318 ADD_ONE_ERR_GROUP(0x010, per_core_ras_group), 319 ADD_ONE_ERR_GROUP(0x020, per_core_ras_group), 320 ADD_ONE_ERR_GROUP(0x030, per_core_ras_group), 321 ADD_ONE_ERR_GROUP(0x040, per_core_ras_group), 322 ADD_ONE_ERR_GROUP(0x050, per_core_ras_group), 323 ADD_ONE_ERR_GROUP(0x060, per_core_ras_group), 324 ADD_ONE_ERR_GROUP(0x070, per_core_ras_group), 325 326 /* 327 * Per cluster ras error records 328 * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to 329 * 2*256 + Logical_Cluster_ID*16 + 3. 330 * 4 clusters/groups, 3 * 4 nodes in total. 331 */ 332 ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group), 333 ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group), 334 ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group), 335 ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group), 336 337 /* 338 * SCF L3_Bank ras error records 339 * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3 340 * 1 groups, 4 nodes in total. 341 */ 342 ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group), 343 344 /* 345 * CCPLEX ras error records 346 * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4 347 * 1 groups, 5 nodes in total. 348 */ 349 ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group), 350 }; 351 352 REGISTER_ERR_RECORD_INFO(carmel_ras_records); 353 354 /* dummy RAS interrupt */ 355 static struct ras_interrupt carmel_ras_interrupts[] = {}; 356 REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts); 357 358 /******************************************************************************* 359 * RAS handler for the platform 360 ******************************************************************************/ 361 void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie, 362 void *handle, uint64_t flags) 363 { 364 #if RAS_EXTENSION 365 tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags); 366 #else 367 ERROR("Unhandled External Abort received on 0x%llx at EL3!\n", 368 read_mpidr_el1()); 369 ERROR(" exception reason=%u syndrome=0x%lx\n", ea_reason, syndrome); 370 panic(); 371 #endif 372 } 373