xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c (revision bcc3c49c90a1e79befa72b8871d4d4c6031c15b7)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <denver.h>
39 #include <mce.h>
40 #include <mmio.h>
41 #include <string.h>
42 #include <sys/errno.h>
43 #include <t18x_ari.h>
44 #include <tegra_def.h>
45 #include <tegra_platform.h>
46 
47 /* NVG functions handlers */
48 static arch_mce_ops_t nvg_mce_ops = {
49 	.enter_cstate = nvg_enter_cstate,
50 	.update_cstate_info = nvg_update_cstate_info,
51 	.update_crossover_time = nvg_update_crossover_time,
52 	.read_cstate_stats = nvg_read_cstate_stats,
53 	.write_cstate_stats = nvg_write_cstate_stats,
54 	.call_enum_misc = ari_enumeration_misc,
55 	.is_ccx_allowed = nvg_is_ccx_allowed,
56 	.is_sc7_allowed = nvg_is_sc7_allowed,
57 	.online_core = nvg_online_core,
58 	.cc3_ctrl = nvg_cc3_ctrl,
59 	.update_reset_vector = ari_reset_vector_update,
60 	.roc_flush_cache = ari_roc_flush_cache,
61 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
62 	.roc_clean_cache = ari_roc_clean_cache,
63 	.read_write_mca = ari_read_write_mca,
64 	.update_ccplex_gsc = ari_update_ccplex_gsc,
65 	.enter_ccplex_state = ari_enter_ccplex_state,
66 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
67 	.misc_ccplex = ari_misc_ccplex
68 };
69 
70 /* ARI functions handlers */
71 static arch_mce_ops_t ari_mce_ops = {
72 	.enter_cstate = ari_enter_cstate,
73 	.update_cstate_info = ari_update_cstate_info,
74 	.update_crossover_time = ari_update_crossover_time,
75 	.read_cstate_stats = ari_read_cstate_stats,
76 	.write_cstate_stats = ari_write_cstate_stats,
77 	.call_enum_misc = ari_enumeration_misc,
78 	.is_ccx_allowed = ari_is_ccx_allowed,
79 	.is_sc7_allowed = ari_is_sc7_allowed,
80 	.online_core = ari_online_core,
81 	.cc3_ctrl = ari_cc3_ctrl,
82 	.update_reset_vector = ari_reset_vector_update,
83 	.roc_flush_cache = ari_roc_flush_cache,
84 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
85 	.roc_clean_cache = ari_roc_clean_cache,
86 	.read_write_mca = ari_read_write_mca,
87 	.update_ccplex_gsc = ari_update_ccplex_gsc,
88 	.enter_ccplex_state = ari_enter_ccplex_state,
89 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
90 	.misc_ccplex = ari_misc_ccplex
91 };
92 
93 typedef struct mce_config {
94 	uint32_t ari_base;
95 	arch_mce_ops_t *ops;
96 } mce_config_t;
97 
98 /* Table to hold the per-CPU ARI base address and function handlers */
99 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
100 	{
101 		/* A57 Core 0 */
102 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
103 		.ops = &ari_mce_ops,
104 	},
105 	{
106 		/* A57 Core 1 */
107 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
108 		.ops = &ari_mce_ops,
109 	},
110 	{
111 		/* A57 Core 2 */
112 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
113 		.ops = &ari_mce_ops,
114 	},
115 	{
116 		/* A57 Core 3 */
117 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
118 		.ops = &ari_mce_ops,
119 	},
120 	{
121 		/* D15 Core 0 */
122 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
123 		.ops = &nvg_mce_ops,
124 	},
125 	{
126 		/* D15 Core 1 */
127 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
128 		.ops = &nvg_mce_ops,
129 	}
130 };
131 
132 static uint32_t mce_get_curr_cpu_ari_base(void)
133 {
134 	uint32_t mpidr = read_mpidr();
135 	int cpuid =  mpidr & MPIDR_CPU_MASK;
136 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
137 
138 	/*
139 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
140 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
141 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
142 	 * struct, we have to convert the Denver CPU ids to the corresponding
143 	 * indices in the mce_ops_table array.
144 	 */
145 	if (impl == DENVER_IMPL)
146 		cpuid |= 0x4;
147 
148 	return mce_cfg_table[cpuid].ari_base;
149 }
150 
151 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
152 {
153 	uint32_t mpidr = read_mpidr();
154 	int cpuid =  mpidr & MPIDR_CPU_MASK;
155 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
156 
157 	/*
158 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
159 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
160 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
161 	 * struct, we have to convert the Denver CPU ids to the corresponding
162 	 * indices in the mce_ops_table array.
163 	 */
164 	if (impl == DENVER_IMPL)
165 		cpuid |= 0x4;
166 
167 	return mce_cfg_table[cpuid].ops;
168 }
169 
170 /*******************************************************************************
171  * Common handler for all MCE commands
172  ******************************************************************************/
173 int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
174 			uint64_t arg2)
175 {
176 	arch_mce_ops_t *ops;
177 	uint32_t cpu_ari_base;
178 	uint64_t ret64 = 0, arg3, arg4, arg5;
179 	int ret = 0;
180 	mca_cmd_t mca_cmd;
181 	uncore_perfmon_req_t req;
182 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
183 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
184 
185 	assert(ctx);
186 	assert(gp_regs);
187 
188 	/* get a pointer to the CPU's arch_mce_ops_t struct */
189 	ops = mce_get_curr_cpu_ops();
190 
191 	/* get the CPU's ARI base address */
192 	cpu_ari_base = mce_get_curr_cpu_ari_base();
193 
194 	switch (cmd) {
195 	case MCE_CMD_ENTER_CSTATE:
196 		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
197 		if (ret < 0)
198 			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
199 
200 		break;
201 
202 	case MCE_CMD_UPDATE_CSTATE_INFO:
203 		/*
204 		 * get the parameters required for the update cstate info
205 		 * command
206 		 */
207 		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
208 		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
209 		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
210 
211 		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
212 				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
213 				(uint32_t)arg4, (uint8_t)arg5);
214 		if (ret < 0)
215 			ERROR("%s: update_cstate_info failed(%d)\n",
216 				__func__, ret);
217 
218 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
219 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
220 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
221 
222 		break;
223 
224 	case MCE_CMD_UPDATE_CROSSOVER_TIME:
225 		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
226 		if (ret < 0)
227 			ERROR("%s: update_crossover_time failed(%d)\n",
228 				__func__, ret);
229 
230 		break;
231 
232 	case MCE_CMD_READ_CSTATE_STATS:
233 		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
234 
235 		/* update context to return cstate stats value */
236 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
237 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
238 
239 		break;
240 
241 	case MCE_CMD_WRITE_CSTATE_STATS:
242 		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
243 		if (ret < 0)
244 			ERROR("%s: write_cstate_stats failed(%d)\n",
245 				__func__, ret);
246 
247 		break;
248 
249 	case MCE_CMD_IS_CCX_ALLOWED:
250 		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
251 		if (ret < 0) {
252 			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
253 			break;
254 		}
255 
256 		/* update context to return CCx status value */
257 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
258 
259 		break;
260 
261 	case MCE_CMD_IS_SC7_ALLOWED:
262 		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
263 		if (ret < 0) {
264 			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
265 			break;
266 		}
267 
268 		/* update context to return SC7 status value */
269 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
270 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
271 
272 		break;
273 
274 	case MCE_CMD_ONLINE_CORE:
275 		ret = ops->online_core(cpu_ari_base, arg0);
276 		if (ret < 0)
277 			ERROR("%s: online_core failed(%d)\n", __func__, ret);
278 
279 		break;
280 
281 	case MCE_CMD_CC3_CTRL:
282 		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
283 		if (ret < 0)
284 			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
285 
286 		break;
287 
288 	case MCE_CMD_ECHO_DATA:
289 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
290 				arg0);
291 
292 		/* update context to return if echo'd data matched source */
293 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
294 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
295 
296 		break;
297 
298 	case MCE_CMD_READ_VERSIONS:
299 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
300 			arg0);
301 
302 		/*
303 		 * version = minor(63:32) | major(31:0). Update context
304 		 * to return major and minor version number.
305 		 */
306 		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
307 		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
308 
309 		break;
310 
311 	case MCE_CMD_ENUM_FEATURES:
312 		ret64 = ops->call_enum_misc(cpu_ari_base,
313 				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
314 
315 		/* update context to return features value */
316 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
317 
318 		break;
319 
320 	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
321 		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
322 		if (ret < 0)
323 			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
324 				ret);
325 
326 		break;
327 
328 	case MCE_CMD_ROC_FLUSH_CACHE:
329 		ret = ops->roc_flush_cache(cpu_ari_base);
330 		if (ret < 0)
331 			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
332 
333 		break;
334 
335 	case MCE_CMD_ROC_CLEAN_CACHE:
336 		ret = ops->roc_clean_cache(cpu_ari_base);
337 		if (ret < 0)
338 			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
339 
340 		break;
341 
342 	case MCE_CMD_ENUM_READ_MCA:
343 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
344 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
345 
346 		/* update context to return MCA data/error */
347 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
348 		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
349 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
350 
351 		break;
352 
353 	case MCE_CMD_ENUM_WRITE_MCA:
354 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
355 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
356 
357 		/* update context to return MCA error */
358 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
359 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
360 
361 		break;
362 
363 #if ENABLE_CHIP_VERIFICATION_HARNESS
364 	case MCE_CMD_ENABLE_LATIC:
365 		/*
366 		 * This call is not for production use. The constant value,
367 		 * 0xFFFF0000, is specific to allowing for enabling LATIC on
368 		 * pre-production parts for the chip verification harness.
369 		 *
370 		 * Enabling LATIC allows S/W to read the MINI ISPs in the
371 		 * CCPLEX. The ISMs are used for various measurements relevant
372 		 * to particular locations in the Silicon. They are small
373 		 * counters which can be polled to determine how fast a
374 		 * particular location in the Silicon is.
375 		 */
376 		ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
377 			0xFFFF0000);
378 
379 		break;
380 #endif
381 
382 	case MCE_CMD_UNCORE_PERFMON_REQ:
383 		memcpy(&req, &arg0, sizeof(arg0));
384 		ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1);
385 
386 		/* update context to return data */
387 		write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1);
388 		break;
389 
390 	case MCE_CMD_MISC_CCPLEX:
391 		ops->misc_ccplex(cpu_ari_base, arg0, arg1);
392 
393 		break;
394 
395 	default:
396 		ERROR("unknown MCE command (%d)\n", cmd);
397 		return EINVAL;
398 	}
399 
400 	return ret;
401 }
402 
403 /*******************************************************************************
404  * Handler to update the reset vector for CPUs
405  ******************************************************************************/
406 int mce_update_reset_vector(void)
407 {
408 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
409 
410 	ops->update_reset_vector(mce_get_curr_cpu_ari_base());
411 
412 	return 0;
413 }
414 
415 static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
416 {
417 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
418 
419 	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
420 
421 	return 0;
422 }
423 
424 /*******************************************************************************
425  * Handler to update carveout values for Video Memory Carveout region
426  ******************************************************************************/
427 int mce_update_gsc_videomem(void)
428 {
429 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
430 }
431 
432 /*******************************************************************************
433  * Handler to update carveout values for TZDRAM aperture
434  ******************************************************************************/
435 int mce_update_gsc_tzdram(void)
436 {
437 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
438 }
439 
440 /*******************************************************************************
441  * Handler to update carveout values for TZ SysRAM aperture
442  ******************************************************************************/
443 int mce_update_gsc_tzram(void)
444 {
445 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
446 }
447 
448 /*******************************************************************************
449  * Handler to shutdown/reset the entire system
450  ******************************************************************************/
451 __dead2 void mce_enter_ccplex_state(uint32_t state_idx)
452 {
453 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
454 
455 	/* sanity check state value */
456 	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
457 	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
458 		panic();
459 
460 	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
461 
462 	/* wait till the CCPLEX powers down */
463 	for (;;)
464 		;
465 
466 	panic();
467 }
468 
469 /*******************************************************************************
470  * Handler to issue the UPDATE_CSTATE_INFO request
471  ******************************************************************************/
472 void mce_update_cstate_info(mce_cstate_info_t *cstate)
473 {
474 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
475 
476 	/* issue the UPDATE_CSTATE_INFO request */
477 	ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
478 		cstate->ccplex, cstate->system, cstate->system_state_force,
479 		cstate->wake_mask, cstate->update_wake_mask);
480 }
481 
482 /*******************************************************************************
483  * Handler to read the MCE firmware version and check if it is compatible
484  * with interface header the BL3-1 was compiled against
485  ******************************************************************************/
486 void mce_verify_firmware_version(void)
487 {
488 	arch_mce_ops_t *ops;
489 	uint32_t cpu_ari_base;
490 	uint64_t version;
491 	uint32_t major, minor;
492 
493 	/*
494 	 * MCE firmware is not running on simulation platforms.
495 	 */
496 	if (tegra_platform_is_emulation())
497 		return;
498 
499 	/* get a pointer to the CPU's arch_mce_ops_t struct */
500 	ops = mce_get_curr_cpu_ops();
501 
502 	/* get the CPU's ARI base address */
503 	cpu_ari_base = mce_get_curr_cpu_ari_base();
504 
505 	/*
506 	 * Read the MCE firmware version and extract the major and minor
507 	 * version fields
508 	 */
509 	version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
510 	major = (uint32_t)version;
511 	minor = (uint32_t)(version >> 32);
512 
513 	INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
514 		TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
515 
516 	/*
517 	 * Verify that the MCE firmware version and the interface header
518 	 * match
519 	 */
520 	if (major != TEGRA_ARI_VERSION_MAJOR) {
521 		ERROR("ARI major version mismatch\n");
522 		panic();
523 	}
524 
525 	if (minor < TEGRA_ARI_VERSION_MINOR) {
526 		ERROR("ARI minor version mismatch\n");
527 		panic();
528 	}
529 }
530