xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c (revision c11e0ddfbff1475c581cfb2babc27d3e48984c74)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <denver.h>
39 #include <mce.h>
40 #include <mmio.h>
41 #include <string.h>
42 #include <sys/errno.h>
43 #include <t18x_ari.h>
44 #include <tegra_def.h>
45 
46 /* NVG functions handlers */
47 static arch_mce_ops_t nvg_mce_ops = {
48 	.enter_cstate = nvg_enter_cstate,
49 	.update_cstate_info = nvg_update_cstate_info,
50 	.update_crossover_time = nvg_update_crossover_time,
51 	.read_cstate_stats = nvg_read_cstate_stats,
52 	.write_cstate_stats = nvg_write_cstate_stats,
53 	.call_enum_misc = ari_enumeration_misc,
54 	.is_ccx_allowed = nvg_is_ccx_allowed,
55 	.is_sc7_allowed = nvg_is_sc7_allowed,
56 	.online_core = nvg_online_core,
57 	.cc3_ctrl = nvg_cc3_ctrl,
58 	.update_reset_vector = ari_reset_vector_update,
59 	.roc_flush_cache = ari_roc_flush_cache,
60 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
61 	.roc_clean_cache = ari_roc_clean_cache,
62 	.read_write_mca = ari_read_write_mca,
63 	.update_ccplex_gsc = ari_update_ccplex_gsc,
64 	.enter_ccplex_state = ari_enter_ccplex_state,
65 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
66 };
67 
68 /* ARI functions handlers */
69 static arch_mce_ops_t ari_mce_ops = {
70 	.enter_cstate = ari_enter_cstate,
71 	.update_cstate_info = ari_update_cstate_info,
72 	.update_crossover_time = ari_update_crossover_time,
73 	.read_cstate_stats = ari_read_cstate_stats,
74 	.write_cstate_stats = ari_write_cstate_stats,
75 	.call_enum_misc = ari_enumeration_misc,
76 	.is_ccx_allowed = ari_is_ccx_allowed,
77 	.is_sc7_allowed = ari_is_sc7_allowed,
78 	.online_core = ari_online_core,
79 	.cc3_ctrl = ari_cc3_ctrl,
80 	.update_reset_vector = ari_reset_vector_update,
81 	.roc_flush_cache = ari_roc_flush_cache,
82 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
83 	.roc_clean_cache = ari_roc_clean_cache,
84 	.read_write_mca = ari_read_write_mca,
85 	.update_ccplex_gsc = ari_update_ccplex_gsc,
86 	.enter_ccplex_state = ari_enter_ccplex_state,
87 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
88 };
89 
90 typedef struct mce_config {
91 	uint32_t ari_base;
92 	arch_mce_ops_t *ops;
93 } mce_config_t;
94 
95 /* Table to hold the per-CPU ARI base address and function handlers */
96 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
97 	{
98 		/* A57 Core 0 */
99 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
100 		.ops = &ari_mce_ops,
101 	},
102 	{
103 		/* A57 Core 1 */
104 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
105 		.ops = &ari_mce_ops,
106 	},
107 	{
108 		/* A57 Core 2 */
109 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
110 		.ops = &ari_mce_ops,
111 	},
112 	{
113 		/* A57 Core 3 */
114 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
115 		.ops = &ari_mce_ops,
116 	},
117 	{
118 		/* D15 Core 0 */
119 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
120 		.ops = &nvg_mce_ops,
121 	},
122 	{
123 		/* D15 Core 1 */
124 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
125 		.ops = &nvg_mce_ops,
126 	}
127 };
128 
129 static uint32_t mce_get_curr_cpu_ari_base(void)
130 {
131 	uint32_t mpidr = read_mpidr();
132 	int cpuid =  mpidr & MPIDR_CPU_MASK;
133 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
134 
135 	/*
136 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
137 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
138 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
139 	 * struct, we have to convert the Denver CPU ids to the corresponding
140 	 * indices in the mce_ops_table array.
141 	 */
142 	if (impl == DENVER_IMPL)
143 		cpuid |= 0x4;
144 
145 	return mce_cfg_table[cpuid].ari_base;
146 }
147 
148 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
149 {
150 	uint32_t mpidr = read_mpidr();
151 	int cpuid =  mpidr & MPIDR_CPU_MASK;
152 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
153 
154 	/*
155 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
156 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
157 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
158 	 * struct, we have to convert the Denver CPU ids to the corresponding
159 	 * indices in the mce_ops_table array.
160 	 */
161 	if (impl == DENVER_IMPL)
162 		cpuid |= 0x4;
163 
164 	return mce_cfg_table[cpuid].ops;
165 }
166 
167 /*******************************************************************************
168  * Common handler for all MCE commands
169  ******************************************************************************/
170 int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
171 			uint64_t arg2)
172 {
173 	arch_mce_ops_t *ops;
174 	uint32_t cpu_ari_base;
175 	uint64_t ret64 = 0, arg3, arg4, arg5;
176 	int ret = 0;
177 	mca_cmd_t mca_cmd;
178 	uncore_perfmon_req_t req;
179 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
180 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
181 
182 	assert(ctx);
183 	assert(gp_regs);
184 
185 	/* get a pointer to the CPU's arch_mce_ops_t struct */
186 	ops = mce_get_curr_cpu_ops();
187 
188 	/* get the CPU's ARI base address */
189 	cpu_ari_base = mce_get_curr_cpu_ari_base();
190 
191 	switch (cmd) {
192 	case MCE_CMD_ENTER_CSTATE:
193 		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
194 		if (ret < 0)
195 			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
196 
197 		break;
198 
199 	case MCE_CMD_UPDATE_CSTATE_INFO:
200 		/*
201 		 * get the parameters required for the update cstate info
202 		 * command
203 		 */
204 		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
205 		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
206 		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
207 
208 		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
209 				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
210 				(uint32_t)arg4, (uint8_t)arg5);
211 		if (ret < 0)
212 			ERROR("%s: update_cstate_info failed(%d)\n",
213 				__func__, ret);
214 
215 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
216 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
217 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
218 
219 		break;
220 
221 	case MCE_CMD_UPDATE_CROSSOVER_TIME:
222 		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
223 		if (ret < 0)
224 			ERROR("%s: update_crossover_time failed(%d)\n",
225 				__func__, ret);
226 
227 		break;
228 
229 	case MCE_CMD_READ_CSTATE_STATS:
230 		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
231 
232 		/* update context to return cstate stats value */
233 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
234 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
235 
236 		break;
237 
238 	case MCE_CMD_WRITE_CSTATE_STATS:
239 		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
240 		if (ret < 0)
241 			ERROR("%s: write_cstate_stats failed(%d)\n",
242 				__func__, ret);
243 
244 		break;
245 
246 	case MCE_CMD_IS_CCX_ALLOWED:
247 		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
248 		if (ret < 0) {
249 			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
250 			break;
251 		}
252 
253 		/* update context to return CCx status value */
254 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
255 
256 		break;
257 
258 	case MCE_CMD_IS_SC7_ALLOWED:
259 		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
260 		if (ret < 0) {
261 			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
262 			break;
263 		}
264 
265 		/* update context to return SC7 status value */
266 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
267 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
268 
269 		break;
270 
271 	case MCE_CMD_ONLINE_CORE:
272 		ret = ops->online_core(cpu_ari_base, arg0);
273 		if (ret < 0)
274 			ERROR("%s: online_core failed(%d)\n", __func__, ret);
275 
276 		break;
277 
278 	case MCE_CMD_CC3_CTRL:
279 		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
280 		if (ret < 0)
281 			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
282 
283 		break;
284 
285 	case MCE_CMD_ECHO_DATA:
286 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
287 				arg0);
288 
289 		/* update context to return if echo'd data matched source */
290 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
291 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
292 
293 		break;
294 
295 	case MCE_CMD_READ_VERSIONS:
296 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
297 			arg0);
298 
299 		/*
300 		 * version = minor(63:32) | major(31:0). Update context
301 		 * to return major and minor version number.
302 		 */
303 		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
304 		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
305 
306 		break;
307 
308 	case MCE_CMD_ENUM_FEATURES:
309 		ret = ops->call_enum_misc(cpu_ari_base,
310 				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
311 
312 		/* update context to return features value */
313 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
314 
315 		ret = 0;
316 
317 		break;
318 
319 	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
320 		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
321 		if (ret < 0)
322 			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
323 				ret);
324 
325 		break;
326 
327 	case MCE_CMD_ROC_FLUSH_CACHE:
328 		ret = ops->roc_flush_cache(cpu_ari_base);
329 		if (ret < 0)
330 			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
331 
332 		break;
333 
334 	case MCE_CMD_ROC_CLEAN_CACHE:
335 		ret = ops->roc_clean_cache(cpu_ari_base);
336 		if (ret < 0)
337 			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
338 
339 		break;
340 
341 	case MCE_CMD_ENUM_READ_MCA:
342 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
343 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
344 
345 		/* update context to return MCA data/error */
346 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
347 		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
348 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
349 
350 		break;
351 
352 	case MCE_CMD_ENUM_WRITE_MCA:
353 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
354 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
355 
356 		/* update context to return MCA error */
357 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
358 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
359 
360 		break;
361 
362 #if ENABLE_CHIP_VERIFICATION_HARNESS
363 	case MCE_CMD_ENABLE_LATIC:
364 		/*
365 		 * This call is not for production use. The constant value,
366 		 * 0xFFFF0000, is specific to allowing for enabling LATIC on
367 		 * pre-production parts for the chip verification harness.
368 		 *
369 		 * Enabling LATIC allows S/W to read the MINI ISPs in the
370 		 * CCPLEX. The ISMs are used for various measurements relevant
371 		 * to particular locations in the Silicon. They are small
372 		 * counters which can be polled to determine how fast a
373 		 * particular location in the Silicon is.
374 		 */
375 		ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
376 			0xFFFF0000);
377 
378 		break;
379 #endif
380 
381 	case MCE_CMD_UNCORE_PERFMON_REQ:
382 		memcpy(&req, &arg0, sizeof(arg0));
383 		ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1);
384 
385 		/* update context to return data */
386 		write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1);
387 		break;
388 
389 	default:
390 		ERROR("unknown MCE command (%d)\n", cmd);
391 		return EINVAL;
392 	}
393 
394 	return ret;
395 }
396 
397 /*******************************************************************************
398  * Handler to update the reset vector for CPUs
399  ******************************************************************************/
400 int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi)
401 {
402 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
403 
404 	ops->update_reset_vector(mce_get_curr_cpu_ari_base(), addr_lo, addr_hi);
405 
406 	return 0;
407 }
408 
409 static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
410 {
411 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
412 
413 	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
414 
415 	return 0;
416 }
417 
418 /*******************************************************************************
419  * Handler to update carveout values for Video Memory Carveout region
420  ******************************************************************************/
421 int mce_update_gsc_videomem(void)
422 {
423 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
424 }
425 
426 /*******************************************************************************
427  * Handler to update carveout values for TZDRAM aperture
428  ******************************************************************************/
429 int mce_update_gsc_tzdram(void)
430 {
431 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
432 }
433 
434 /*******************************************************************************
435  * Handler to update carveout values for TZ SysRAM aperture
436  ******************************************************************************/
437 int mce_update_gsc_tzram(void)
438 {
439 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
440 }
441 
442 /*******************************************************************************
443  * Handler to shutdown/reset the entire system
444  ******************************************************************************/
445 __dead2 void mce_enter_ccplex_state(uint32_t state_idx)
446 {
447 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
448 
449 	/* sanity check state value */
450 	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
451 	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
452 		panic();
453 
454 	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
455 
456 	/* wait till the CCPLEX powers down */
457 	for (;;)
458 		;
459 
460 	panic();
461 }
462 
463 /*******************************************************************************
464  * Handler to issue the UPDATE_CSTATE_INFO request
465  ******************************************************************************/
466 void mce_update_cstate_info(mce_cstate_info_t *cstate)
467 {
468 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
469 
470 	/* issue the UPDATE_CSTATE_INFO request */
471 	ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
472 		cstate->ccplex, cstate->system, cstate->system_state_force,
473 		cstate->wake_mask, cstate->update_wake_mask);
474 }
475 
476 /*******************************************************************************
477  * Handler to read the MCE firmware version and check if it is compatible
478  * with interface header the BL3-1 was compiled against
479  ******************************************************************************/
480 void mce_verify_firmware_version(void)
481 {
482 	arch_mce_ops_t *ops;
483 	uint32_t cpu_ari_base;
484 	uint64_t version;
485 	uint32_t major, minor, chip_minor, chip_major;
486 
487 	/* get a pointer to the CPU's arch_mce_ops_t struct */
488 	ops = mce_get_curr_cpu_ops();
489 
490 	/* get the CPU's ARI base address */
491 	cpu_ari_base = mce_get_curr_cpu_ari_base();
492 
493 	/*
494 	 * Read the MCE firmware version and extract the major and minor
495 	 * version fields
496 	 */
497 	version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
498 	major = (uint32_t)version;
499 	minor = (uint32_t)(version >> 32);
500 
501 	INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
502 		TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
503 
504 	/*
505 	 * MCE firmware is not running on simulation platforms. Simulation
506 	 * platforms are identified by v0.3 from the Tegra Chip ID value.
507 	 */
508 	chip_major = (mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET) >>
509 			MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
510 	chip_minor = (mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET) >>
511 			MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
512 	if ((chip_major == 0) && (chip_minor == 3))
513 		return;
514 
515 	/*
516 	 * Verify that the MCE firmware version and the interface header
517 	 * match
518 	 */
519 	if (major != TEGRA_ARI_VERSION_MAJOR) {
520 		ERROR("ARI major version mismatch\n");
521 		panic();
522 	}
523 
524 	if (minor < TEGRA_ARI_VERSION_MINOR) {
525 		ERROR("ARI minor version mismatch\n");
526 		panic();
527 	}
528 }
529