xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c (revision 719f3ec242e671cf012b2e88f7a9ab3cfa063c91)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <denver.h>
39 #include <mce.h>
40 #include <mmio.h>
41 #include <string.h>
42 #include <sys/errno.h>
43 #include <t18x_ari.h>
44 #include <tegra_def.h>
45 #include <tegra_platform.h>
46 
47 /* NVG functions handlers */
48 static arch_mce_ops_t nvg_mce_ops = {
49 	.enter_cstate = nvg_enter_cstate,
50 	.update_cstate_info = nvg_update_cstate_info,
51 	.update_crossover_time = nvg_update_crossover_time,
52 	.read_cstate_stats = nvg_read_cstate_stats,
53 	.write_cstate_stats = nvg_write_cstate_stats,
54 	.call_enum_misc = ari_enumeration_misc,
55 	.is_ccx_allowed = nvg_is_ccx_allowed,
56 	.is_sc7_allowed = nvg_is_sc7_allowed,
57 	.online_core = nvg_online_core,
58 	.cc3_ctrl = nvg_cc3_ctrl,
59 	.update_reset_vector = ari_reset_vector_update,
60 	.roc_flush_cache = ari_roc_flush_cache,
61 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
62 	.roc_clean_cache = ari_roc_clean_cache,
63 	.read_write_mca = ari_read_write_mca,
64 	.update_ccplex_gsc = ari_update_ccplex_gsc,
65 	.enter_ccplex_state = ari_enter_ccplex_state,
66 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
67 };
68 
69 /* ARI functions handlers */
70 static arch_mce_ops_t ari_mce_ops = {
71 	.enter_cstate = ari_enter_cstate,
72 	.update_cstate_info = ari_update_cstate_info,
73 	.update_crossover_time = ari_update_crossover_time,
74 	.read_cstate_stats = ari_read_cstate_stats,
75 	.write_cstate_stats = ari_write_cstate_stats,
76 	.call_enum_misc = ari_enumeration_misc,
77 	.is_ccx_allowed = ari_is_ccx_allowed,
78 	.is_sc7_allowed = ari_is_sc7_allowed,
79 	.online_core = ari_online_core,
80 	.cc3_ctrl = ari_cc3_ctrl,
81 	.update_reset_vector = ari_reset_vector_update,
82 	.roc_flush_cache = ari_roc_flush_cache,
83 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
84 	.roc_clean_cache = ari_roc_clean_cache,
85 	.read_write_mca = ari_read_write_mca,
86 	.update_ccplex_gsc = ari_update_ccplex_gsc,
87 	.enter_ccplex_state = ari_enter_ccplex_state,
88 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
89 };
90 
91 typedef struct mce_config {
92 	uint32_t ari_base;
93 	arch_mce_ops_t *ops;
94 } mce_config_t;
95 
96 /* Table to hold the per-CPU ARI base address and function handlers */
97 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
98 	{
99 		/* A57 Core 0 */
100 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
101 		.ops = &ari_mce_ops,
102 	},
103 	{
104 		/* A57 Core 1 */
105 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
106 		.ops = &ari_mce_ops,
107 	},
108 	{
109 		/* A57 Core 2 */
110 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
111 		.ops = &ari_mce_ops,
112 	},
113 	{
114 		/* A57 Core 3 */
115 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
116 		.ops = &ari_mce_ops,
117 	},
118 	{
119 		/* D15 Core 0 */
120 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
121 		.ops = &nvg_mce_ops,
122 	},
123 	{
124 		/* D15 Core 1 */
125 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
126 		.ops = &nvg_mce_ops,
127 	}
128 };
129 
130 static uint32_t mce_get_curr_cpu_ari_base(void)
131 {
132 	uint32_t mpidr = read_mpidr();
133 	int cpuid =  mpidr & MPIDR_CPU_MASK;
134 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
135 
136 	/*
137 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
138 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
139 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
140 	 * struct, we have to convert the Denver CPU ids to the corresponding
141 	 * indices in the mce_ops_table array.
142 	 */
143 	if (impl == DENVER_IMPL)
144 		cpuid |= 0x4;
145 
146 	return mce_cfg_table[cpuid].ari_base;
147 }
148 
149 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
150 {
151 	uint32_t mpidr = read_mpidr();
152 	int cpuid =  mpidr & MPIDR_CPU_MASK;
153 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
154 
155 	/*
156 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
157 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
158 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
159 	 * struct, we have to convert the Denver CPU ids to the corresponding
160 	 * indices in the mce_ops_table array.
161 	 */
162 	if (impl == DENVER_IMPL)
163 		cpuid |= 0x4;
164 
165 	return mce_cfg_table[cpuid].ops;
166 }
167 
168 /*******************************************************************************
169  * Common handler for all MCE commands
170  ******************************************************************************/
171 int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
172 			uint64_t arg2)
173 {
174 	arch_mce_ops_t *ops;
175 	uint32_t cpu_ari_base;
176 	uint64_t ret64 = 0, arg3, arg4, arg5;
177 	int ret = 0;
178 	mca_cmd_t mca_cmd;
179 	uncore_perfmon_req_t req;
180 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
181 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
182 
183 	assert(ctx);
184 	assert(gp_regs);
185 
186 	/* get a pointer to the CPU's arch_mce_ops_t struct */
187 	ops = mce_get_curr_cpu_ops();
188 
189 	/* get the CPU's ARI base address */
190 	cpu_ari_base = mce_get_curr_cpu_ari_base();
191 
192 	switch (cmd) {
193 	case MCE_CMD_ENTER_CSTATE:
194 		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
195 		if (ret < 0)
196 			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
197 
198 		break;
199 
200 	case MCE_CMD_UPDATE_CSTATE_INFO:
201 		/*
202 		 * get the parameters required for the update cstate info
203 		 * command
204 		 */
205 		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
206 		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
207 		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
208 
209 		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
210 				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
211 				(uint32_t)arg4, (uint8_t)arg5);
212 		if (ret < 0)
213 			ERROR("%s: update_cstate_info failed(%d)\n",
214 				__func__, ret);
215 
216 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
217 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
218 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
219 
220 		break;
221 
222 	case MCE_CMD_UPDATE_CROSSOVER_TIME:
223 		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
224 		if (ret < 0)
225 			ERROR("%s: update_crossover_time failed(%d)\n",
226 				__func__, ret);
227 
228 		break;
229 
230 	case MCE_CMD_READ_CSTATE_STATS:
231 		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
232 
233 		/* update context to return cstate stats value */
234 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
235 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
236 
237 		break;
238 
239 	case MCE_CMD_WRITE_CSTATE_STATS:
240 		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
241 		if (ret < 0)
242 			ERROR("%s: write_cstate_stats failed(%d)\n",
243 				__func__, ret);
244 
245 		break;
246 
247 	case MCE_CMD_IS_CCX_ALLOWED:
248 		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
249 		if (ret < 0) {
250 			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
251 			break;
252 		}
253 
254 		/* update context to return CCx status value */
255 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
256 
257 		break;
258 
259 	case MCE_CMD_IS_SC7_ALLOWED:
260 		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
261 		if (ret < 0) {
262 			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
263 			break;
264 		}
265 
266 		/* update context to return SC7 status value */
267 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
268 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
269 
270 		break;
271 
272 	case MCE_CMD_ONLINE_CORE:
273 		ret = ops->online_core(cpu_ari_base, arg0);
274 		if (ret < 0)
275 			ERROR("%s: online_core failed(%d)\n", __func__, ret);
276 
277 		break;
278 
279 	case MCE_CMD_CC3_CTRL:
280 		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
281 		if (ret < 0)
282 			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
283 
284 		break;
285 
286 	case MCE_CMD_ECHO_DATA:
287 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
288 				arg0);
289 
290 		/* update context to return if echo'd data matched source */
291 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
292 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
293 
294 		break;
295 
296 	case MCE_CMD_READ_VERSIONS:
297 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
298 			arg0);
299 
300 		/*
301 		 * version = minor(63:32) | major(31:0). Update context
302 		 * to return major and minor version number.
303 		 */
304 		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
305 		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
306 
307 		break;
308 
309 	case MCE_CMD_ENUM_FEATURES:
310 		ret64 = ops->call_enum_misc(cpu_ari_base,
311 				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
312 
313 		/* update context to return features value */
314 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
315 
316 		break;
317 
318 	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
319 		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
320 		if (ret < 0)
321 			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
322 				ret);
323 
324 		break;
325 
326 	case MCE_CMD_ROC_FLUSH_CACHE:
327 		ret = ops->roc_flush_cache(cpu_ari_base);
328 		if (ret < 0)
329 			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
330 
331 		break;
332 
333 	case MCE_CMD_ROC_CLEAN_CACHE:
334 		ret = ops->roc_clean_cache(cpu_ari_base);
335 		if (ret < 0)
336 			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
337 
338 		break;
339 
340 	case MCE_CMD_ENUM_READ_MCA:
341 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
342 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
343 
344 		/* update context to return MCA data/error */
345 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
346 		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
347 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
348 
349 		break;
350 
351 	case MCE_CMD_ENUM_WRITE_MCA:
352 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
353 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
354 
355 		/* update context to return MCA error */
356 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
357 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
358 
359 		break;
360 
361 #if ENABLE_CHIP_VERIFICATION_HARNESS
362 	case MCE_CMD_ENABLE_LATIC:
363 		/*
364 		 * This call is not for production use. The constant value,
365 		 * 0xFFFF0000, is specific to allowing for enabling LATIC on
366 		 * pre-production parts for the chip verification harness.
367 		 *
368 		 * Enabling LATIC allows S/W to read the MINI ISPs in the
369 		 * CCPLEX. The ISMs are used for various measurements relevant
370 		 * to particular locations in the Silicon. They are small
371 		 * counters which can be polled to determine how fast a
372 		 * particular location in the Silicon is.
373 		 */
374 		ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
375 			0xFFFF0000);
376 
377 		break;
378 #endif
379 
380 	case MCE_CMD_UNCORE_PERFMON_REQ:
381 		memcpy(&req, &arg0, sizeof(arg0));
382 		ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1);
383 
384 		/* update context to return data */
385 		write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1);
386 		break;
387 
388 	default:
389 		ERROR("unknown MCE command (%d)\n", cmd);
390 		return EINVAL;
391 	}
392 
393 	return ret;
394 }
395 
396 /*******************************************************************************
397  * Handler to update the reset vector for CPUs
398  ******************************************************************************/
399 int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi)
400 {
401 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
402 
403 	ops->update_reset_vector(mce_get_curr_cpu_ari_base(), addr_lo, addr_hi);
404 
405 	return 0;
406 }
407 
408 static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
409 {
410 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
411 
412 	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
413 
414 	return 0;
415 }
416 
417 /*******************************************************************************
418  * Handler to update carveout values for Video Memory Carveout region
419  ******************************************************************************/
420 int mce_update_gsc_videomem(void)
421 {
422 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
423 }
424 
425 /*******************************************************************************
426  * Handler to update carveout values for TZDRAM aperture
427  ******************************************************************************/
428 int mce_update_gsc_tzdram(void)
429 {
430 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
431 }
432 
433 /*******************************************************************************
434  * Handler to update carveout values for TZ SysRAM aperture
435  ******************************************************************************/
436 int mce_update_gsc_tzram(void)
437 {
438 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
439 }
440 
441 /*******************************************************************************
442  * Handler to shutdown/reset the entire system
443  ******************************************************************************/
444 __dead2 void mce_enter_ccplex_state(uint32_t state_idx)
445 {
446 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
447 
448 	/* sanity check state value */
449 	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
450 	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
451 		panic();
452 
453 	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
454 
455 	/* wait till the CCPLEX powers down */
456 	for (;;)
457 		;
458 
459 	panic();
460 }
461 
462 /*******************************************************************************
463  * Handler to issue the UPDATE_CSTATE_INFO request
464  ******************************************************************************/
465 void mce_update_cstate_info(mce_cstate_info_t *cstate)
466 {
467 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
468 
469 	/* issue the UPDATE_CSTATE_INFO request */
470 	ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
471 		cstate->ccplex, cstate->system, cstate->system_state_force,
472 		cstate->wake_mask, cstate->update_wake_mask);
473 }
474 
475 /*******************************************************************************
476  * Handler to read the MCE firmware version and check if it is compatible
477  * with interface header the BL3-1 was compiled against
478  ******************************************************************************/
479 void mce_verify_firmware_version(void)
480 {
481 	arch_mce_ops_t *ops;
482 	uint32_t cpu_ari_base;
483 	uint64_t version;
484 	uint32_t major, minor;
485 
486 	/*
487 	 * MCE firmware is not running on simulation platforms.
488 	 */
489 	if (tegra_platform_is_emulation())
490 		return;
491 
492 	/* get a pointer to the CPU's arch_mce_ops_t struct */
493 	ops = mce_get_curr_cpu_ops();
494 
495 	/* get the CPU's ARI base address */
496 	cpu_ari_base = mce_get_curr_cpu_ari_base();
497 
498 	/*
499 	 * Read the MCE firmware version and extract the major and minor
500 	 * version fields
501 	 */
502 	version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
503 	major = (uint32_t)version;
504 	minor = (uint32_t)(version >> 32);
505 
506 	INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
507 		TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
508 
509 	/*
510 	 * Verify that the MCE firmware version and the interface header
511 	 * match
512 	 */
513 	if (major != TEGRA_ARI_VERSION_MAJOR) {
514 		ERROR("ARI major version mismatch\n");
515 		panic();
516 	}
517 
518 	if (minor < TEGRA_ARI_VERSION_MINOR) {
519 		ERROR("ARI minor version mismatch\n");
520 		panic();
521 	}
522 }
523