xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c (revision 0d5ec955b8f7900ca33abf88638d499742531159)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <denver.h>
39 #include <mce.h>
40 #include <mmio.h>
41 #include <string.h>
42 #include <sys/errno.h>
43 #include <t18x_ari.h>
44 #include <tegra_def.h>
45 #include <tegra_platform.h>
46 
47 /* NVG functions handlers */
48 static arch_mce_ops_t nvg_mce_ops = {
49 	.enter_cstate = nvg_enter_cstate,
50 	.update_cstate_info = nvg_update_cstate_info,
51 	.update_crossover_time = nvg_update_crossover_time,
52 	.read_cstate_stats = nvg_read_cstate_stats,
53 	.write_cstate_stats = nvg_write_cstate_stats,
54 	.call_enum_misc = ari_enumeration_misc,
55 	.is_ccx_allowed = nvg_is_ccx_allowed,
56 	.is_sc7_allowed = nvg_is_sc7_allowed,
57 	.online_core = nvg_online_core,
58 	.cc3_ctrl = nvg_cc3_ctrl,
59 	.update_reset_vector = ari_reset_vector_update,
60 	.roc_flush_cache = ari_roc_flush_cache,
61 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
62 	.roc_clean_cache = ari_roc_clean_cache,
63 	.read_write_mca = ari_read_write_mca,
64 	.update_ccplex_gsc = ari_update_ccplex_gsc,
65 	.enter_ccplex_state = ari_enter_ccplex_state,
66 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
67 };
68 
69 /* ARI functions handlers */
70 static arch_mce_ops_t ari_mce_ops = {
71 	.enter_cstate = ari_enter_cstate,
72 	.update_cstate_info = ari_update_cstate_info,
73 	.update_crossover_time = ari_update_crossover_time,
74 	.read_cstate_stats = ari_read_cstate_stats,
75 	.write_cstate_stats = ari_write_cstate_stats,
76 	.call_enum_misc = ari_enumeration_misc,
77 	.is_ccx_allowed = ari_is_ccx_allowed,
78 	.is_sc7_allowed = ari_is_sc7_allowed,
79 	.online_core = ari_online_core,
80 	.cc3_ctrl = ari_cc3_ctrl,
81 	.update_reset_vector = ari_reset_vector_update,
82 	.roc_flush_cache = ari_roc_flush_cache,
83 	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
84 	.roc_clean_cache = ari_roc_clean_cache,
85 	.read_write_mca = ari_read_write_mca,
86 	.update_ccplex_gsc = ari_update_ccplex_gsc,
87 	.enter_ccplex_state = ari_enter_ccplex_state,
88 	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon
89 };
90 
91 typedef struct mce_config {
92 	uint32_t ari_base;
93 	arch_mce_ops_t *ops;
94 } mce_config_t;
95 
96 /* Table to hold the per-CPU ARI base address and function handlers */
97 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
98 	{
99 		/* A57 Core 0 */
100 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
101 		.ops = &ari_mce_ops,
102 	},
103 	{
104 		/* A57 Core 1 */
105 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
106 		.ops = &ari_mce_ops,
107 	},
108 	{
109 		/* A57 Core 2 */
110 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
111 		.ops = &ari_mce_ops,
112 	},
113 	{
114 		/* A57 Core 3 */
115 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
116 		.ops = &ari_mce_ops,
117 	},
118 	{
119 		/* D15 Core 0 */
120 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
121 		.ops = &nvg_mce_ops,
122 	},
123 	{
124 		/* D15 Core 1 */
125 		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
126 		.ops = &nvg_mce_ops,
127 	}
128 };
129 
130 static uint32_t mce_get_curr_cpu_ari_base(void)
131 {
132 	uint32_t mpidr = read_mpidr();
133 	int cpuid =  mpidr & MPIDR_CPU_MASK;
134 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
135 
136 	/*
137 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
138 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
139 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
140 	 * struct, we have to convert the Denver CPU ids to the corresponding
141 	 * indices in the mce_ops_table array.
142 	 */
143 	if (impl == DENVER_IMPL)
144 		cpuid |= 0x4;
145 
146 	return mce_cfg_table[cpuid].ari_base;
147 }
148 
149 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
150 {
151 	uint32_t mpidr = read_mpidr();
152 	int cpuid =  mpidr & MPIDR_CPU_MASK;
153 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
154 
155 	/*
156 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
157 	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
158 	 * numbers start from 0. In order to get the proper arch_mce_ops_t
159 	 * struct, we have to convert the Denver CPU ids to the corresponding
160 	 * indices in the mce_ops_table array.
161 	 */
162 	if (impl == DENVER_IMPL)
163 		cpuid |= 0x4;
164 
165 	return mce_cfg_table[cpuid].ops;
166 }
167 
168 /*******************************************************************************
169  * Common handler for all MCE commands
170  ******************************************************************************/
171 int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
172 			uint64_t arg2)
173 {
174 	arch_mce_ops_t *ops;
175 	uint32_t cpu_ari_base;
176 	uint64_t ret64 = 0, arg3, arg4, arg5;
177 	int ret = 0;
178 	mca_cmd_t mca_cmd;
179 	uncore_perfmon_req_t req;
180 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
181 	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
182 
183 	assert(ctx);
184 	assert(gp_regs);
185 
186 	/* get a pointer to the CPU's arch_mce_ops_t struct */
187 	ops = mce_get_curr_cpu_ops();
188 
189 	/* get the CPU's ARI base address */
190 	cpu_ari_base = mce_get_curr_cpu_ari_base();
191 
192 	switch (cmd) {
193 	case MCE_CMD_ENTER_CSTATE:
194 		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
195 		if (ret < 0)
196 			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
197 
198 		break;
199 
200 	case MCE_CMD_UPDATE_CSTATE_INFO:
201 		/*
202 		 * get the parameters required for the update cstate info
203 		 * command
204 		 */
205 		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
206 		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
207 		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
208 
209 		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
210 				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
211 				(uint32_t)arg4, (uint8_t)arg5);
212 		if (ret < 0)
213 			ERROR("%s: update_cstate_info failed(%d)\n",
214 				__func__, ret);
215 
216 		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
217 		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
218 		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
219 
220 		break;
221 
222 	case MCE_CMD_UPDATE_CROSSOVER_TIME:
223 		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
224 		if (ret < 0)
225 			ERROR("%s: update_crossover_time failed(%d)\n",
226 				__func__, ret);
227 
228 		break;
229 
230 	case MCE_CMD_READ_CSTATE_STATS:
231 		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
232 
233 		/* update context to return cstate stats value */
234 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
235 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
236 
237 		break;
238 
239 	case MCE_CMD_WRITE_CSTATE_STATS:
240 		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
241 		if (ret < 0)
242 			ERROR("%s: write_cstate_stats failed(%d)\n",
243 				__func__, ret);
244 
245 		break;
246 
247 	case MCE_CMD_IS_CCX_ALLOWED:
248 		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
249 		if (ret < 0) {
250 			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
251 			break;
252 		}
253 
254 		/* update context to return CCx status value */
255 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
256 
257 		break;
258 
259 	case MCE_CMD_IS_SC7_ALLOWED:
260 		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
261 		if (ret < 0) {
262 			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
263 			break;
264 		}
265 
266 		/* update context to return SC7 status value */
267 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
268 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
269 
270 		break;
271 
272 	case MCE_CMD_ONLINE_CORE:
273 		ret = ops->online_core(cpu_ari_base, arg0);
274 		if (ret < 0)
275 			ERROR("%s: online_core failed(%d)\n", __func__, ret);
276 
277 		break;
278 
279 	case MCE_CMD_CC3_CTRL:
280 		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
281 		if (ret < 0)
282 			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
283 
284 		break;
285 
286 	case MCE_CMD_ECHO_DATA:
287 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
288 				arg0);
289 
290 		/* update context to return if echo'd data matched source */
291 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
292 		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
293 
294 		break;
295 
296 	case MCE_CMD_READ_VERSIONS:
297 		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
298 			arg0);
299 
300 		/*
301 		 * version = minor(63:32) | major(31:0). Update context
302 		 * to return major and minor version number.
303 		 */
304 		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
305 		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
306 
307 		break;
308 
309 	case MCE_CMD_ENUM_FEATURES:
310 		ret = ops->call_enum_misc(cpu_ari_base,
311 				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
312 
313 		/* update context to return features value */
314 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
315 
316 		ret = 0;
317 
318 		break;
319 
320 	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
321 		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
322 		if (ret < 0)
323 			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
324 				ret);
325 
326 		break;
327 
328 	case MCE_CMD_ROC_FLUSH_CACHE:
329 		ret = ops->roc_flush_cache(cpu_ari_base);
330 		if (ret < 0)
331 			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
332 
333 		break;
334 
335 	case MCE_CMD_ROC_CLEAN_CACHE:
336 		ret = ops->roc_clean_cache(cpu_ari_base);
337 		if (ret < 0)
338 			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
339 
340 		break;
341 
342 	case MCE_CMD_ENUM_READ_MCA:
343 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
344 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
345 
346 		/* update context to return MCA data/error */
347 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
348 		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
349 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
350 
351 		break;
352 
353 	case MCE_CMD_ENUM_WRITE_MCA:
354 		memcpy(&mca_cmd, &arg0, sizeof(arg0));
355 		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
356 
357 		/* update context to return MCA error */
358 		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
359 		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
360 
361 		break;
362 
363 #if ENABLE_CHIP_VERIFICATION_HARNESS
364 	case MCE_CMD_ENABLE_LATIC:
365 		/*
366 		 * This call is not for production use. The constant value,
367 		 * 0xFFFF0000, is specific to allowing for enabling LATIC on
368 		 * pre-production parts for the chip verification harness.
369 		 *
370 		 * Enabling LATIC allows S/W to read the MINI ISPs in the
371 		 * CCPLEX. The ISMs are used for various measurements relevant
372 		 * to particular locations in the Silicon. They are small
373 		 * counters which can be polled to determine how fast a
374 		 * particular location in the Silicon is.
375 		 */
376 		ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
377 			0xFFFF0000);
378 
379 		break;
380 #endif
381 
382 	case MCE_CMD_UNCORE_PERFMON_REQ:
383 		memcpy(&req, &arg0, sizeof(arg0));
384 		ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1);
385 
386 		/* update context to return data */
387 		write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1);
388 		break;
389 
390 	default:
391 		ERROR("unknown MCE command (%d)\n", cmd);
392 		return EINVAL;
393 	}
394 
395 	return ret;
396 }
397 
398 /*******************************************************************************
399  * Handler to update the reset vector for CPUs
400  ******************************************************************************/
401 int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi)
402 {
403 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
404 
405 	ops->update_reset_vector(mce_get_curr_cpu_ari_base(), addr_lo, addr_hi);
406 
407 	return 0;
408 }
409 
410 static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
411 {
412 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
413 
414 	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
415 
416 	return 0;
417 }
418 
419 /*******************************************************************************
420  * Handler to update carveout values for Video Memory Carveout region
421  ******************************************************************************/
422 int mce_update_gsc_videomem(void)
423 {
424 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
425 }
426 
427 /*******************************************************************************
428  * Handler to update carveout values for TZDRAM aperture
429  ******************************************************************************/
430 int mce_update_gsc_tzdram(void)
431 {
432 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
433 }
434 
435 /*******************************************************************************
436  * Handler to update carveout values for TZ SysRAM aperture
437  ******************************************************************************/
438 int mce_update_gsc_tzram(void)
439 {
440 	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
441 }
442 
443 /*******************************************************************************
444  * Handler to shutdown/reset the entire system
445  ******************************************************************************/
446 __dead2 void mce_enter_ccplex_state(uint32_t state_idx)
447 {
448 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
449 
450 	/* sanity check state value */
451 	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
452 	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
453 		panic();
454 
455 	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
456 
457 	/* wait till the CCPLEX powers down */
458 	for (;;)
459 		;
460 
461 	panic();
462 }
463 
464 /*******************************************************************************
465  * Handler to issue the UPDATE_CSTATE_INFO request
466  ******************************************************************************/
467 void mce_update_cstate_info(mce_cstate_info_t *cstate)
468 {
469 	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
470 
471 	/* issue the UPDATE_CSTATE_INFO request */
472 	ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
473 		cstate->ccplex, cstate->system, cstate->system_state_force,
474 		cstate->wake_mask, cstate->update_wake_mask);
475 }
476 
477 /*******************************************************************************
478  * Handler to read the MCE firmware version and check if it is compatible
479  * with interface header the BL3-1 was compiled against
480  ******************************************************************************/
481 void mce_verify_firmware_version(void)
482 {
483 	arch_mce_ops_t *ops;
484 	uint32_t cpu_ari_base;
485 	uint64_t version;
486 	uint32_t major, minor;
487 
488 	/*
489 	 * MCE firmware is not running on simulation platforms.
490 	 */
491 	if (tegra_platform_is_emulation())
492 		return;
493 
494 	/* get a pointer to the CPU's arch_mce_ops_t struct */
495 	ops = mce_get_curr_cpu_ops();
496 
497 	/* get the CPU's ARI base address */
498 	cpu_ari_base = mce_get_curr_cpu_ari_base();
499 
500 	/*
501 	 * Read the MCE firmware version and extract the major and minor
502 	 * version fields
503 	 */
504 	version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
505 	major = (uint32_t)version;
506 	minor = (uint32_t)(version >> 32);
507 
508 	INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
509 		TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
510 
511 	/*
512 	 * Verify that the MCE firmware version and the interface header
513 	 * match
514 	 */
515 	if (major != TEGRA_ARI_VERSION_MAJOR) {
516 		ERROR("ARI major version mismatch\n");
517 		panic();
518 	}
519 
520 	if (minor < TEGRA_ARI_VERSION_MINOR) {
521 		ERROR("ARI minor version mismatch\n");
522 		panic();
523 	}
524 }
525