xref: /rk3399_ARM-atf/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c (revision bcc3c49c90a1e79befa72b8871d4d4c6031c15b7)
1 /*
2  * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <debug.h>
34 #include <denver.h>
35 #include <mmio.h>
36 #include <mce.h>
37 #include <sys/errno.h>
38 #include <t18x_ari.h>
39 
40 /*******************************************************************************
41  * Register offsets for ARI request/results
42  ******************************************************************************/
43 #define ARI_REQUEST			0x0
44 #define ARI_REQUEST_EVENT_MASK		0x4
45 #define ARI_STATUS			0x8
46 #define ARI_REQUEST_DATA_LO		0xC
47 #define ARI_REQUEST_DATA_HI		0x10
48 #define ARI_RESPONSE_DATA_LO		0x14
49 #define ARI_RESPONSE_DATA_HI		0x18
50 
51 /* Status values for the current request */
52 #define ARI_REQ_PENDING			1
53 #define ARI_REQ_ONGOING			3
54 #define ARI_REQUEST_VALID_BIT		(1 << 8)
55 #define ARI_EVT_MASK_STANDBYWFI_BIT	(1 << 7)
56 
57 /*******************************************************************************
58  * ARI helper functions
59  ******************************************************************************/
60 static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
61 {
62 	return mmio_read_32(ari_base + reg);
63 }
64 
65 static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
66 {
67 	mmio_write_32(ari_base + reg, val);
68 }
69 
70 static inline uint32_t ari_get_request_low(uint32_t ari_base)
71 {
72 	return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
73 }
74 
75 static inline uint32_t ari_get_request_high(uint32_t ari_base)
76 {
77 	return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
78 }
79 
80 static inline uint32_t ari_get_response_low(uint32_t ari_base)
81 {
82 	return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
83 }
84 
85 static inline uint32_t ari_get_response_high(uint32_t ari_base)
86 {
87 	return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
88 }
89 
90 static inline void ari_clobber_response(uint32_t ari_base)
91 {
92 	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
93 	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
94 }
95 
96 static int ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
97 		uint32_t lo, uint32_t hi)
98 {
99 	int status;
100 
101 	/* program the request, event_mask, hi and lo registers */
102 	ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
103 	ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
104 	ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
105 	ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
106 
107 	/*
108 	 * For commands that have an event trigger, we should bypass
109 	 * ARI_STATUS polling, since MCE is waiting for SW to trigger
110 	 * the event.
111 	 */
112 	if (evt_mask)
113 		return 0;
114 
115 	/* NOTE: add timeout check if needed */
116 	status = ari_read_32(ari_base, ARI_STATUS);
117 	while (status & (ARI_REQ_ONGOING | ARI_REQ_PENDING))
118 		status = ari_read_32(ari_base, ARI_STATUS);
119 
120 	return 0;
121 }
122 
123 int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
124 {
125 	/* check for allowed power state */
126 	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
127 	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
128 		ERROR("%s: unknown cstate (%d)\n", __func__, state);
129 		return EINVAL;
130 	}
131 
132 	/* clean the previous response state */
133 	ari_clobber_response(ari_base);
134 
135 	/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
136 	return ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
137 		TEGRA_ARI_ENTER_CSTATE, state, wake_time);
138 }
139 
140 int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
141 	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
142 	uint8_t update_wake_mask)
143 {
144 	uint32_t val = 0;
145 
146 	/* clean the previous response state */
147 	ari_clobber_response(ari_base);
148 
149 	/* update CLUSTER_CSTATE? */
150 	if (cluster)
151 		val |= (cluster & CLUSTER_CSTATE_MASK) |
152 			CLUSTER_CSTATE_UPDATE_BIT;
153 
154 	/* update CCPLEX_CSTATE? */
155 	if (ccplex)
156 		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
157 			CCPLEX_CSTATE_UPDATE_BIT;
158 
159 	/* update SYSTEM_CSTATE? */
160 	if (system)
161 		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
162 		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
163 			SYSTEM_CSTATE_UPDATE_BIT);
164 
165 	/* update wake mask value? */
166 	if (update_wake_mask)
167 		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
168 
169 	/* set the updated cstate info */
170 	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
171 			wake_mask);
172 }
173 
174 int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
175 {
176 	/* sanity check crossover type */
177 	if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
178 	    (type > TEGRA_ARI_CROSSOVER_CCP3_SC1))
179 		return EINVAL;
180 
181 	/* clean the previous response state */
182 	ari_clobber_response(ari_base);
183 
184 	/* update crossover threshold time */
185 	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CROSSOVER,
186 			type, time);
187 }
188 
189 uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
190 {
191 	int ret;
192 
193 	/* sanity check crossover type */
194 	if (state == 0)
195 		return EINVAL;
196 
197 	/* clean the previous response state */
198 	ari_clobber_response(ari_base);
199 
200 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_CSTATE_STATS, state, 0);
201 	if (ret != 0)
202 		return EINVAL;
203 
204 	return (uint64_t)ari_get_response_low(ari_base);
205 }
206 
207 int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
208 {
209 	/* clean the previous response state */
210 	ari_clobber_response(ari_base);
211 
212 	/* write the cstate stats */
213 	return ari_request_wait(ari_base, 0, TEGRA_ARI_WRITE_CSTATE_STATS, state,
214 			stats);
215 }
216 
217 uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
218 {
219 	uint64_t resp;
220 	int ret;
221 
222 	/* clean the previous response state */
223 	ari_clobber_response(ari_base);
224 
225 	/* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
226 	if (cmd != TEGRA_ARI_MISC_ECHO)
227 		data = 0;
228 
229 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MISC, cmd, data);
230 	if (ret)
231 		return (uint64_t)ret;
232 
233 	/* get the command response */
234 	resp = ari_get_response_low(ari_base);
235 	resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
236 
237 	return resp;
238 }
239 
240 int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
241 {
242 	int ret;
243 
244 	/* clean the previous response state */
245 	ari_clobber_response(ari_base);
246 
247 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7,
248 			wake_time);
249 	if (ret) {
250 		ERROR("%s: failed (%d)\n", __func__, ret);
251 		return 0;
252 	}
253 
254 	/* 1 = CCx allowed, 0 = CCx not allowed */
255 	return (ari_get_response_low(ari_base) & 0x1);
256 }
257 
258 int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
259 {
260 	int ret;
261 
262 	/* check for allowed power state */
263 	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
264 	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
265 		ERROR("%s: unknown cstate (%d)\n", __func__, state);
266 		return EINVAL;
267 	}
268 
269 	/* clean the previous response state */
270 	ari_clobber_response(ari_base);
271 
272 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_SC7_ALLOWED, state,
273 			wake_time);
274 	if (ret) {
275 		ERROR("%s: failed (%d)\n", __func__, ret);
276 		return 0;
277 	}
278 
279 	/* 1 = SC7 allowed, 0 = SC7 not allowed */
280 	return !!ari_get_response_low(ari_base);
281 }
282 
283 int ari_online_core(uint32_t ari_base, uint32_t core)
284 {
285 	int cpu = read_mpidr() & MPIDR_CPU_MASK;
286 	int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >>
287 			MPIDR_AFFINITY_BITS;
288 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
289 
290 	/* construct the current CPU # */
291 	cpu |= (cluster << 2);
292 
293 	/* sanity check target core id */
294 	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
295 		ERROR("%s: unsupported core id (%d)\n", __func__, core);
296 		return EINVAL;
297 	}
298 
299 	/*
300 	 * The Denver cluster has 2 CPUs only - 0, 1.
301 	 */
302 	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
303 		ERROR("%s: unknown core id (%d)\n", __func__, core);
304 		return EINVAL;
305 	}
306 
307 	/* clean the previous response state */
308 	ari_clobber_response(ari_base);
309 
310 	return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0);
311 }
312 
313 int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
314 {
315 	int val;
316 
317 	/* clean the previous response state */
318 	ari_clobber_response(ari_base);
319 
320 	/*
321 	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
322 	 * the SW visible voltage/frequency request registers for all non
323 	 * floorswept cores valid independent of StandbyWFI and disabling
324 	 * the IDLE voltage/frequency request register. If set, Auto-CC3
325 	 * will be enabled by setting the ARM SW visible voltage/frequency
326 	 * request registers for all non floorswept cores to be enabled by
327 	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
328 	 * voltage/frequency request register enabled.
329 	 */
330 	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
331 		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
332 		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
333 
334 	return ari_request_wait(ari_base, 0, TEGRA_ARI_CC3_CTRL, val, 0);
335 }
336 
337 int ari_reset_vector_update(uint32_t ari_base)
338 {
339 	/* clean the previous response state */
340 	ari_clobber_response(ari_base);
341 
342 	/*
343 	 * Need to program the CPU reset vector one time during cold boot
344 	 * and SC7 exit
345 	 */
346 	ari_request_wait(ari_base, 0, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0, 0);
347 
348 	return 0;
349 }
350 
351 int ari_roc_flush_cache_trbits(uint32_t ari_base)
352 {
353 	/* clean the previous response state */
354 	ari_clobber_response(ari_base);
355 
356 	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
357 			0, 0);
358 }
359 
360 int ari_roc_flush_cache(uint32_t ari_base)
361 {
362 	/* clean the previous response state */
363 	ari_clobber_response(ari_base);
364 
365 	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
366 			0, 0);
367 }
368 
369 int ari_roc_clean_cache(uint32_t ari_base)
370 {
371 	/* clean the previous response state */
372 	ari_clobber_response(ari_base);
373 
374 	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
375 			0, 0);
376 }
377 
378 uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data)
379 {
380 	mca_arg_t mca_arg;
381 	int ret;
382 
383 	/* Set data (write) */
384 	mca_arg.data = data ? *data : 0ull;
385 
386 	/* Set command */
387 	ari_write_32(ari_base, cmd.input.low, ARI_RESPONSE_DATA_LO);
388 	ari_write_32(ari_base, cmd.input.high, ARI_RESPONSE_DATA_HI);
389 
390 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MCA, mca_arg.arg.low,
391 			mca_arg.arg.high);
392 	if (!ret) {
393 		mca_arg.arg.low = ari_get_response_low(ari_base);
394 		mca_arg.arg.high = ari_get_response_high(ari_base);
395 		if (!mca_arg.err.finish)
396 			return (uint64_t)mca_arg.err.error;
397 
398 		if (data) {
399 			mca_arg.arg.low = ari_get_request_low(ari_base);
400 			mca_arg.arg.high = ari_get_request_high(ari_base);
401 			*data = mca_arg.data;
402 		}
403 	}
404 
405 	return 0;
406 }
407 
408 int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
409 {
410 	/* sanity check GSC ID */
411 	if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX)
412 		return EINVAL;
413 
414 	/* clean the previous response state */
415 	ari_clobber_response(ari_base);
416 
417 	/*
418 	 * The MCE code will read the GSC carveout value, corrseponding to
419 	 * the ID, from the MC registers and update the internal GSC registers
420 	 * of the CCPLEX.
421 	 */
422 	ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0);
423 
424 	return 0;
425 }
426 
427 void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
428 {
429 	/* clean the previous response state */
430 	ari_clobber_response(ari_base);
431 
432 	/*
433 	 * The MCE will shutdown or restart the entire system
434 	 */
435 	(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, state_idx, 0);
436 }
437 
438 int ari_read_write_uncore_perfmon(uint32_t ari_base,
439 		uncore_perfmon_req_t req, uint64_t *data)
440 {
441 	int ret;
442 	uint32_t val;
443 
444 	/* clean the previous response state */
445 	ari_clobber_response(ari_base);
446 
447 	/* sanity check input parameters */
448 	if (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_READ && !data) {
449 		ERROR("invalid parameters\n");
450 		return EINVAL;
451 	}
452 
453 	/*
454 	 * For "write" commands get the value that has to be written
455 	 * to the uncore perfmon registers
456 	 */
457 	val = (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_WRITE) ?
458 		*data : 0;
459 
460 	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_PERFMON, val, req.data);
461 	if (ret)
462 		return ret;
463 
464 	/* read the command status value */
465 	req.perfmon_status.val = ari_get_response_high(ari_base) &
466 				 UNCORE_PERFMON_RESP_STATUS_MASK;
467 
468 	/*
469 	 * For "read" commands get the data from the uncore
470 	 * perfmon registers
471 	 */
472 	if ((req.perfmon_status.val == 0) && (req.perfmon_command.cmd ==
473 	     UNCORE_PERFMON_CMD_READ))
474 		*data = ari_get_response_low(ari_base);
475 
476 	return (int)req.perfmon_status.val;
477 }
478 
479 void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
480 {
481 	/*
482 	 * This invokes the ARI_MISC_CCPLEX commands. This can be
483 	 * used to enable/disable coresight clock gating.
484 	 */
485 
486 	if ((index > TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) ||
487 		((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) &&
488 		(value > 1))) {
489 		ERROR("%s: invalid parameters \n", __func__);
490 		return;
491 	}
492 
493 	/* clean the previous response state */
494 	ari_clobber_response(ari_base);
495 	(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, index, value);
496 }
497