xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t194/plat_ras.c (revision cf9346cb83804feb083b56a668eb0a462983e038)
1 /*
2  * Copyright (c) 2020-2021, NVIDIA Corporation. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 
11 #include <common/debug.h>
12 #include <lib/bakery_lock.h>
13 #include <lib/cassert.h>
14 #include <lib/extensions/ras.h>
15 #include <lib/utils_def.h>
16 #include <services/sdei.h>
17 
18 #include <plat/common/platform.h>
19 #include <platform_def.h>
20 #include <tegra194_ras_private.h>
21 #include <tegra_def.h>
22 #include <tegra_platform.h>
23 #include <tegra_private.h>
24 
25 /*
26  * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled
27  * by setting corresponding bits in ERR<n>CTLR
28  */
29 #define ERR_FR_EN_BITS_MASK	0xFFFFFFFF00000000ULL
30 
31 /*
32  * Number of RAS errors will be cleared per 'tegra194_ras_corrected_err_clear'
33  * function call.
34  */
35 #define RAS_ERRORS_PER_CALL	8
36 
37 /*
38  * the max possible RAS node index value.
39  */
40 #define RAS_NODE_INDEX_MAX	0x1FFFFFFFU
41 
42 /* bakery lock for platform RAS handler. */
43 static DEFINE_BAKERY_LOCK(ras_handler_lock);
44 #define ras_lock()		bakery_lock_get(&ras_handler_lock)
45 #define ras_unlock()		bakery_lock_release(&ras_handler_lock)
46 
47 /*
48  * Function to handle an External Abort received at EL3.
49  * This function is invoked by RAS framework.
50  */
51 static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome,
52 		void *cookie, void *handle, uint64_t flags)
53 {
54 	int32_t ret;
55 
56 	ras_lock();
57 
58 	ERROR("MPIDR 0x%lx: exception reason=%u syndrome=0x%" PRIx64 "\n",
59 		read_mpidr(), ea_reason, syndrome);
60 
61 	/* Call RAS EA handler */
62 	ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
63 	if (ret != 0) {
64 		ERROR("RAS error handled!\n");
65 		ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 +
66 				plat_my_core_pos());
67 		if (ret != 0)
68 			ERROR("sdei_dispatch_event returned %d\n", ret);
69 	} else {
70 		ERROR("Not a RAS error!\n");
71 	}
72 
73 	ras_unlock();
74 }
75 
76 /*
77  * Function to enable all supported RAS error report.
78  *
79  * Uncorrected errors are set to report as External abort (SError)
80  * Corrected errors are set to report as interrupt.
81  */
82 void tegra194_ras_enable(void)
83 {
84 	VERBOSE("%s\n", __func__);
85 
86 	/* skip RAS enablement if not a silicon platform. */
87 	if (!tegra_platform_is_silicon()) {
88 		return;
89 	}
90 
91 	/*
92 	 * Iterate for each group(num_idx ERRSELRs starting from idx_start)
93 	 * use normal for loop instead of for_each_err_record_info to get rid
94 	 * of MISRA noise..
95 	 */
96 	for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) {
97 
98 		const struct err_record_info *info = &err_record_mappings.err_records[i];
99 
100 		uint32_t idx_start = info->sysreg.idx_start;
101 		uint32_t num_idx = info->sysreg.num_idx;
102 		const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data;
103 
104 		assert(aux_data != NULL);
105 
106 		for (uint32_t j = 0; j < num_idx; j++) {
107 
108 			/* ERR<n>CTLR register value. */
109 			uint64_t err_ctrl = 0ULL;
110 			/* all supported errors for this node. */
111 			uint64_t err_fr;
112 			/* uncorrectable errors */
113 			uint64_t uncorr_errs;
114 			/* correctable errors */
115 			uint64_t corr_errs;
116 
117 			/*
118 			 * Catch error if something wrong with the RAS aux data
119 			 * record table.
120 			 */
121 			assert(aux_data[j].err_ctrl != NULL);
122 
123 			/*
124 			 * Write to ERRSELR_EL1 to select the RAS error node.
125 			 * Always program this at first to select corresponding
126 			 * RAS node before any other RAS register r/w.
127 			 */
128 			ser_sys_select_record(idx_start + j);
129 
130 			err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK;
131 			uncorr_errs = aux_data[j].err_ctrl();
132 			corr_errs = ~uncorr_errs & err_fr;
133 
134 			/* enable error reporting */
135 			ERR_CTLR_ENABLE_FIELD(err_ctrl, ED);
136 
137 			/* enable SError reporting for uncorrectable errors */
138 			if ((uncorr_errs & err_fr) != 0ULL) {
139 				ERR_CTLR_ENABLE_FIELD(err_ctrl, UE);
140 			}
141 
142 			/* generate interrupt for corrected errors. */
143 			if (corr_errs != 0ULL) {
144 				ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI);
145 			}
146 
147 			/* enable the supported errors */
148 			err_ctrl |= err_fr;
149 
150 			VERBOSE("errselr_el1:0x%x, erxfr:0x%" PRIx64 ", err_ctrl:0x%" PRIx64 "\n",
151 				idx_start + j, err_fr, err_ctrl);
152 
153 			/* enable specified errors, or set to 0 if no supported error */
154 			write_erxctlr_el1(err_ctrl);
155 		}
156 	}
157 }
158 
159 /*
160  * Function to clear RAS ERR<n>STATUS for corrected RAS error.
161  *
162  * This function clears number of 'RAS_ERRORS_PER_CALL' RAS errors at most.
163  * 'cookie' - in/out cookie parameter to specify/store last visited RAS
164  *            error record index. it is set to '0' to indicate no more RAS
165  *            error record to clear.
166  */
167 void tegra194_ras_corrected_err_clear(uint64_t *cookie)
168 {
169 	/*
170 	 * 'last_node' and 'last_idx' represent last visited RAS node index from
171 	 * previous function call. they are set to 0 when first smc call is made
172 	 * or all RAS error are visited by followed multipile smc calls.
173 	 */
174 	union prev_record {
175 		struct record {
176 			uint32_t last_node;
177 			uint32_t last_idx;
178 		} rec;
179 		uint64_t value;
180 	} prev;
181 
182 	uint64_t clear_ce_status = 0ULL;
183 	int32_t nerrs_per_call = RAS_ERRORS_PER_CALL;
184 	uint32_t i;
185 
186 	if (cookie == NULL) {
187 		return;
188 	}
189 
190 	prev.value = *cookie;
191 
192 	if ((prev.rec.last_node >= RAS_NODE_INDEX_MAX) ||
193 		(prev.rec.last_idx >= RAS_NODE_INDEX_MAX)) {
194 		return;
195 	}
196 
197 	ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL);
198 	ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL);
199 	ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL);
200 	ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL);
201 	ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL);
202 
203 
204 	for (i = prev.rec.last_node; i < err_record_mappings.num_err_records; i++) {
205 
206 		const struct err_record_info *info = &err_record_mappings.err_records[i];
207 		uint32_t idx_start = info->sysreg.idx_start;
208 		uint32_t num_idx = info->sysreg.num_idx;
209 
210 		uint32_t j;
211 
212 		j = (i == prev.rec.last_node && prev.value != 0UL) ?
213 				(prev.rec.last_idx + 1U) : 0U;
214 
215 		for (; j < num_idx; j++) {
216 
217 			uint64_t status;
218 			uint32_t err_idx = idx_start + j;
219 
220 			if (err_idx >= RAS_NODE_INDEX_MAX) {
221 				return;
222 			}
223 
224 			write_errselr_el1(err_idx);
225 			status = read_erxstatus_el1();
226 
227 			if (ERR_STATUS_GET_FIELD(status, CE) != 0U) {
228 				write_erxstatus_el1(clear_ce_status);
229 			}
230 
231 			--nerrs_per_call;
232 
233 			/* only clear 'nerrs_per_call' errors each time. */
234 			if (nerrs_per_call <= 0) {
235 				prev.rec.last_idx = j;
236 				prev.rec.last_node = i;
237 				/* save last visited error record index
238 				 * into cookie.
239 				 */
240 				*cookie = prev.value;
241 
242 				return;
243 			}
244 		}
245 	}
246 
247 	/*
248 	 * finish if all ras error records are checked or provided index is out
249 	 * of range.
250 	 */
251 	*cookie = 0ULL;
252 	return;
253 }
254 
255 /* Function to probe an error from error record group. */
256 static int32_t tegra194_ras_record_probe(const struct err_record_info *info,
257 		int *probe_data)
258 {
259 	/* Skip probing if not a silicon platform */
260 	if (!tegra_platform_is_silicon()) {
261 		return 0;
262 	}
263 
264 	return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data);
265 }
266 
267 /* Function to handle error from one given node */
268 static int32_t tegra194_ras_node_handler(uint32_t errselr, const char *name,
269 		const struct ras_error *errors, uint64_t status)
270 {
271 	bool found = false;
272 	uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR);
273 	uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR);
274 	uint64_t val = 0;
275 
276 	/* not a valid error. */
277 	if (ERR_STATUS_GET_FIELD(status, V) == 0U) {
278 		return 0;
279 	}
280 
281 	ERR_STATUS_SET_FIELD(val, V, 1);
282 
283 	/* keep the log print same as linux arm64_ras driver. */
284 	ERROR("**************************************\n");
285 	ERROR("RAS Error in %s, ERRSELR_EL1=0x%x:\n", name, errselr);
286 	ERROR("\tStatus = 0x%" PRIx64 "\n", status);
287 
288 	/* Print uncorrectable errror information. */
289 	if (ERR_STATUS_GET_FIELD(status, UE) != 0U) {
290 
291 		ERR_STATUS_SET_FIELD(val, UE, 1);
292 		ERR_STATUS_SET_FIELD(val, UET, 1);
293 
294 		/* IERR to error message */
295 		for (uint32_t i = 0; errors[i].error_msg != NULL; i++) {
296 			if (ierr == errors[i].error_code) {
297 				ERROR("\tIERR = %s: 0x%x\n",
298 					errors[i].error_msg, ierr);
299 
300 				found = true;
301 				break;
302 			}
303 		}
304 
305 		if (!found) {
306 			ERROR("\tUnknown IERR: 0x%x\n", ierr);
307 		}
308 
309 		ERROR("SERR = %s: 0x%x\n", ras_serr_to_str(serr), serr);
310 
311 		/* Overflow, multiple errors have been detected. */
312 		if (ERR_STATUS_GET_FIELD(status, OF) != 0U) {
313 			ERROR("\tOverflow (there may be more errors) - "
314 				"Uncorrectable\n");
315 			ERR_STATUS_SET_FIELD(val, OF, 1);
316 		}
317 
318 		ERROR("\tUncorrectable (this is fatal)\n");
319 
320 		/* Miscellaneous Register Valid. */
321 		if (ERR_STATUS_GET_FIELD(status, MV) != 0U) {
322 			ERROR("\tMISC0 = 0x%lx\n", read_erxmisc0_el1());
323 			ERROR("\tMISC1 = 0x%lx\n", read_erxmisc1_el1());
324 			ERR_STATUS_SET_FIELD(val, MV, 1);
325 		}
326 
327 		/* Address Valid. */
328 		if (ERR_STATUS_GET_FIELD(status, AV) != 0U) {
329 			ERROR("\tADDR = 0x%lx\n", read_erxaddr_el1());
330 			ERR_STATUS_SET_FIELD(val, AV, 1);
331 		}
332 
333 		/* Deferred error */
334 		if (ERR_STATUS_GET_FIELD(status, DE) != 0U) {
335 			ERROR("\tDeferred error\n");
336 			ERR_STATUS_SET_FIELD(val, DE, 1);
337 		}
338 
339 	} else {
340 		/* For corrected error, simply clear it. */
341 		VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, "
342 			"IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr);
343 		ERR_STATUS_SET_FIELD(val, CE, 1);
344 	}
345 
346 	ERROR("**************************************\n");
347 
348 	/* Write to clear reported errors. */
349 	write_erxstatus_el1(val);
350 
351 	/* error handled */
352 	return 0;
353 }
354 
355 /* Function to handle one error node from an error record group. */
356 static int32_t tegra194_ras_record_handler(const struct err_record_info *info,
357 		int probe_data, const struct err_handler_data *const data __unused)
358 {
359 	uint32_t num_idx = info->sysreg.num_idx;
360 	uint32_t idx_start = info->sysreg.idx_start;
361 	const struct ras_aux_data *aux_data = info->aux_data;
362 	const struct ras_error *errors;
363 	uint32_t offset;
364 	const char *node_name;
365 
366 	uint64_t status = 0ULL;
367 
368 	VERBOSE("%s\n", __func__);
369 
370 	assert(probe_data >= 0);
371 	assert((uint32_t)probe_data < num_idx);
372 
373 	offset = (uint32_t)probe_data;
374 	errors = aux_data[offset].error_records;
375 	node_name = aux_data[offset].name;
376 
377 	assert(errors != NULL);
378 
379 	/* Write to ERRSELR_EL1 to select the error record */
380 	ser_sys_select_record(idx_start + offset);
381 
382 	/* Retrieve status register from the error record */
383 	status = read_erxstatus_el1();
384 
385 	return tegra194_ras_node_handler(idx_start + offset, node_name,
386 			errors, status);
387 }
388 
389 
390 /* Instantiate RAS nodes */
391 PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
392 PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
393 SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
394 CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
395 
396 /* Instantiate RAS node groups */
397 static struct ras_aux_data per_core_ras_group[] = {
398 	PER_CORE_RAS_GROUP_NODES
399 };
400 CASSERT(ARRAY_SIZE(per_core_ras_group) < RAS_NODE_INDEX_MAX,
401 	assert_max_per_core_ras_group_size);
402 
403 static struct ras_aux_data per_cluster_ras_group[] = {
404 	PER_CLUSTER_RAS_GROUP_NODES
405 };
406 CASSERT(ARRAY_SIZE(per_cluster_ras_group) < RAS_NODE_INDEX_MAX,
407 	assert_max_per_cluster_ras_group_size);
408 
409 static struct ras_aux_data scf_l3_ras_group[] = {
410 	SCF_L3_BANK_RAS_GROUP_NODES
411 };
412 CASSERT(ARRAY_SIZE(scf_l3_ras_group) < RAS_NODE_INDEX_MAX,
413 	assert_max_scf_l3_ras_group_size);
414 
415 static struct ras_aux_data ccplex_ras_group[] = {
416     CCPLEX_RAS_GROUP_NODES
417 };
418 CASSERT(ARRAY_SIZE(ccplex_ras_group) < RAS_NODE_INDEX_MAX,
419 	assert_max_ccplex_ras_group_size);
420 
421 /*
422  * We have same probe and handler for each error record group, use a macro to
423  * simply the record definition.
424  */
425 #define ADD_ONE_ERR_GROUP(errselr_start, group) \
426 	ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \
427 			&tegra194_ras_record_probe, \
428 			&tegra194_ras_record_handler, (group))
429 
430 /* RAS error record group information */
431 static struct err_record_info carmel_ras_records[] = {
432 	/*
433 	 * Per core ras error records
434 	 * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to
435 	 * 0*256 + Logical_CPU_ID*16 + 5 for each group.
436 	 * 8 cores/groups, 6 * 8 nodes in total.
437 	 */
438 	ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
439 	ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
440 	ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
441 	ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
442 	ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
443 	ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
444 	ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
445 	ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
446 
447 	/*
448 	 * Per cluster ras error records
449 	 * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
450 	 * 2*256 + Logical_Cluster_ID*16 + 3.
451 	 * 4 clusters/groups, 3 * 4 nodes in total.
452 	 */
453 	ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
454 	ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
455 	ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
456 	ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
457 
458 	/*
459 	 * SCF L3_Bank ras error records
460 	 * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
461 	 * 1 groups, 4 nodes in total.
462 	 */
463 	ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
464 
465 	/*
466 	 * CCPLEX ras error records
467 	 * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
468 	 * 1 groups, 5 nodes in total.
469 	 */
470 	ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
471 };
472 
473 CASSERT(ARRAY_SIZE(carmel_ras_records) < RAS_NODE_INDEX_MAX,
474 	assert_max_carmel_ras_records_size);
475 
476 REGISTER_ERR_RECORD_INFO(carmel_ras_records);
477 
478 /* dummy RAS interrupt */
479 static struct ras_interrupt carmel_ras_interrupts[] = {};
480 REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts);
481 
482 /*******************************************************************************
483  * RAS handler for the platform
484  ******************************************************************************/
485 void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
486 		void *handle, uint64_t flags)
487 {
488 #if RAS_EXTENSION
489 	tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags);
490 #else
491 	plat_default_ea_handler(ea_reason, syndrome, cookie, handle, flags);
492 #endif
493 }
494