xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 742ca2307f4e9f82cb2c21518819425e5bcc0f90)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include "../amu_private.h"
12 #include <arch.h>
13 #include <arch_features.h>
14 #include <arch_helpers.h>
15 #include <common/debug.h>
16 #include <lib/el3_runtime/pubsub_events.h>
17 #include <lib/extensions/amu.h>
18 
19 #include <plat/common/platform.h>
20 
21 #if ENABLE_AMU_FCONF
22 #	include <lib/fconf/fconf.h>
23 #	include <lib/fconf/fconf_amu_getter.h>
24 #endif
25 
26 struct amu_ctx {
27 	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
28 #if ENABLE_AMU_AUXILIARY_COUNTERS
29 	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
30 #endif
31 
32 	/* Architected event counter 1 does not have an offset register */
33 	uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
34 #if ENABLE_AMU_AUXILIARY_COUNTERS
35 	uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
36 #endif
37 
38 	uint16_t group0_enable;
39 #if ENABLE_AMU_AUXILIARY_COUNTERS
40 	uint16_t group1_enable;
41 #endif
42 };
43 
44 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
45 
46 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
47 	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
48 
49 #if ENABLE_AMU_AUXILIARY_COUNTERS
50 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
51 	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
52 #endif
53 
54 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
55 {
56 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
57 		ID_AA64PFR0_AMU_MASK;
58 }
59 
60 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
61 {
62 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
63 		HCR_AMVOFFEN_SHIFT;
64 }
65 
66 static inline __unused void write_cptr_el2_tam(uint64_t value)
67 {
68 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
69 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
70 }
71 
72 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
73 {
74 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
75 
76 	value &= ~TAM_BIT;
77 	value |= (tam << TAM_SHIFT) & TAM_BIT;
78 
79 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
80 }
81 
82 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
83 {
84 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
85 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
86 }
87 
88 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
89 {
90 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
91 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
92 }
93 
94 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
95 {
96 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
97 		AMCFGR_EL0_NCG_MASK;
98 }
99 
100 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
101 {
102 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
103 		AMCGCR_EL0_CG0NC_MASK;
104 }
105 
106 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
107 {
108 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
109 		AMCG1IDR_VOFF_MASK;
110 }
111 
112 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
113 {
114 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
115 		AMCGCR_EL0_CG1NC_MASK;
116 }
117 
118 static inline __unused uint64_t read_amcntenset0_el0_px(void)
119 {
120 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
121 		AMCNTENSET0_EL0_Pn_MASK;
122 }
123 
124 static inline __unused uint64_t read_amcntenset1_el0_px(void)
125 {
126 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
127 		AMCNTENSET1_EL0_Pn_MASK;
128 }
129 
130 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
131 {
132 	uint64_t value = read_amcntenset0_el0();
133 
134 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
135 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
136 
137 	write_amcntenset0_el0(value);
138 }
139 
140 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
141 {
142 	uint64_t value = read_amcntenset1_el0();
143 
144 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
145 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
146 
147 	write_amcntenset1_el0(value);
148 }
149 
150 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
151 {
152 	uint64_t value = read_amcntenclr0_el0();
153 
154 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
155 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
156 
157 	write_amcntenclr0_el0(value);
158 }
159 
160 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
161 {
162 	uint64_t value = read_amcntenclr1_el0();
163 
164 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
165 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
166 
167 	write_amcntenclr1_el0(value);
168 }
169 
170 static __unused bool amu_supported(void)
171 {
172 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
173 }
174 
175 static __unused bool amu_v1p1_supported(void)
176 {
177 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
178 }
179 
180 #if ENABLE_AMU_AUXILIARY_COUNTERS
181 static __unused bool amu_group1_supported(void)
182 {
183 	return read_amcfgr_el0_ncg() > 0U;
184 }
185 #endif
186 
187 /*
188  * Enable counters. This function is meant to be invoked by the context
189  * management library before exiting from EL3.
190  */
191 void amu_enable(bool el2_unused, cpu_context_t *ctx)
192 {
193 	uint64_t id_aa64pfr0_el1_amu;		/* AMU version */
194 
195 	uint64_t amcfgr_el0_ncg;		/* Number of counter groups */
196 	uint64_t amcgcr_el0_cg0nc;		/* Number of group 0 counters */
197 
198 	uint64_t amcntenset0_el0_px = 0x0;	/* Group 0 enable mask */
199 	uint64_t amcntenset1_el0_px = 0x0;	/* Group 1 enable mask */
200 
201 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
202 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
203 		/*
204 		 * If the AMU is unsupported, nothing needs to be done.
205 		 */
206 
207 		return;
208 	}
209 
210 	if (el2_unused) {
211 		/*
212 		 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
213 		 * Monitor registers do not trap to EL2.
214 		 */
215 		write_cptr_el2_tam(0U);
216 	}
217 
218 	/*
219 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
220 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
221 	 * the Activity Monitor registers do not trap to EL3.
222 	 */
223 	write_cptr_el3_tam(ctx, 0U);
224 
225 	/*
226 	 * Retrieve the number of architected counters. All of these counters
227 	 * are enabled by default.
228 	 */
229 
230 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
231 	amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
232 
233 	assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
234 
235 	/*
236 	 * The platform may opt to enable specific auxiliary counters. This can
237 	 * be done via the common FCONF getter, or via the platform-implemented
238 	 * function.
239 	 */
240 
241 #if ENABLE_AMU_AUXILIARY_COUNTERS
242 	const struct amu_topology *topology;
243 
244 #if ENABLE_AMU_FCONF
245 	topology = FCONF_GET_PROPERTY(amu, config, topology);
246 #else
247 	topology = plat_amu_topology();
248 #endif /* ENABLE_AMU_FCONF */
249 
250 	if (topology != NULL) {
251 		unsigned int core_pos = plat_my_core_pos();
252 
253 		amcntenset1_el0_px = topology->cores[core_pos].enable;
254 	} else {
255 		ERROR("AMU: failed to generate AMU topology\n");
256 	}
257 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
258 
259 	/*
260 	 * Enable the requested counters.
261 	 */
262 
263 	write_amcntenset0_el0_px(amcntenset0_el0_px);
264 
265 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
266 	if (amcfgr_el0_ncg > 0U) {
267 		write_amcntenset1_el0_px(amcntenset1_el0_px);
268 
269 #if !ENABLE_AMU_AUXILIARY_COUNTERS
270 		VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
271 #endif
272 	}
273 
274 	/* Initialize FEAT_AMUv1p1 features if present. */
275 	if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
276 		return;
277 	}
278 
279 	if (el2_unused) {
280 		/* Make sure virtual offsets are disabled if EL2 not used. */
281 		write_hcr_el2_amvoffen(0U);
282 	}
283 
284 #if AMU_RESTRICT_COUNTERS
285 	/*
286 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
287 	 * counters at all but the highest implemented EL.  This is controlled
288 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
289 	 * register reads at lower ELs return zero.  Reads from the memory
290 	 * mapped view are unaffected.
291 	 */
292 	VERBOSE("AMU group 1 counter access restricted.\n");
293 	write_amcr_el0_cg1rz(1U);
294 #else
295 	write_amcr_el0_cg1rz(0U);
296 #endif
297 }
298 
299 /* Read the group 0 counter identified by the given `idx`. */
300 static uint64_t amu_group0_cnt_read(unsigned int idx)
301 {
302 	assert(amu_supported());
303 	assert(idx < read_amcgcr_el0_cg0nc());
304 
305 	return amu_group0_cnt_read_internal(idx);
306 }
307 
308 /* Write the group 0 counter identified by the given `idx` with `val` */
309 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
310 {
311 	assert(amu_supported());
312 	assert(idx < read_amcgcr_el0_cg0nc());
313 
314 	amu_group0_cnt_write_internal(idx, val);
315 	isb();
316 }
317 
318 /*
319  * Unlike with auxiliary counters, we cannot detect at runtime whether an
320  * architected counter supports a virtual offset. These are instead fixed
321  * according to FEAT_AMUv1p1, but this switch will need to be updated if later
322  * revisions of FEAT_AMU add additional architected counters.
323  */
324 static bool amu_group0_voffset_supported(uint64_t idx)
325 {
326 	switch (idx) {
327 	case 0U:
328 	case 2U:
329 	case 3U:
330 		return true;
331 
332 	case 1U:
333 		return false;
334 
335 	default:
336 		ERROR("AMU: can't set up virtual offset for unknown "
337 		      "architected counter %llu!\n", idx);
338 
339 		panic();
340 	}
341 }
342 
343 /*
344  * Read the group 0 offset register for a given index. Index must be 0, 2,
345  * or 3, the register for 1 does not exist.
346  *
347  * Using this function requires FEAT_AMUv1p1 support.
348  */
349 static uint64_t amu_group0_voffset_read(unsigned int idx)
350 {
351 	assert(amu_v1p1_supported());
352 	assert(idx < read_amcgcr_el0_cg0nc());
353 	assert(idx != 1U);
354 
355 	return amu_group0_voffset_read_internal(idx);
356 }
357 
358 /*
359  * Write the group 0 offset register for a given index. Index must be 0, 2, or
360  * 3, the register for 1 does not exist.
361  *
362  * Using this function requires FEAT_AMUv1p1 support.
363  */
364 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
365 {
366 	assert(amu_v1p1_supported());
367 	assert(idx < read_amcgcr_el0_cg0nc());
368 	assert(idx != 1U);
369 
370 	amu_group0_voffset_write_internal(idx, val);
371 	isb();
372 }
373 
374 #if ENABLE_AMU_AUXILIARY_COUNTERS
375 /* Read the group 1 counter identified by the given `idx` */
376 static uint64_t amu_group1_cnt_read(unsigned int idx)
377 {
378 	assert(amu_supported());
379 	assert(amu_group1_supported());
380 	assert(idx < read_amcgcr_el0_cg1nc());
381 
382 	return amu_group1_cnt_read_internal(idx);
383 }
384 
385 /* Write the group 1 counter identified by the given `idx` with `val` */
386 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
387 {
388 	assert(amu_supported());
389 	assert(amu_group1_supported());
390 	assert(idx < read_amcgcr_el0_cg1nc());
391 
392 	amu_group1_cnt_write_internal(idx, val);
393 	isb();
394 }
395 
396 /*
397  * Read the group 1 offset register for a given index.
398  *
399  * Using this function requires FEAT_AMUv1p1 support.
400  */
401 static uint64_t amu_group1_voffset_read(unsigned int idx)
402 {
403 	assert(amu_v1p1_supported());
404 	assert(amu_group1_supported());
405 	assert(idx < read_amcgcr_el0_cg1nc());
406 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
407 
408 	return amu_group1_voffset_read_internal(idx);
409 }
410 
411 /*
412  * Write the group 1 offset register for a given index.
413  *
414  * Using this function requires FEAT_AMUv1p1 support.
415  */
416 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
417 {
418 	assert(amu_v1p1_supported());
419 	assert(amu_group1_supported());
420 	assert(idx < read_amcgcr_el0_cg1nc());
421 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
422 
423 	amu_group1_voffset_write_internal(idx, val);
424 	isb();
425 }
426 #endif
427 
428 static void *amu_context_save(const void *arg)
429 {
430 	uint64_t i, j;
431 
432 	unsigned int core_pos;
433 	struct amu_ctx *ctx;
434 
435 	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
436 	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
437 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
438 
439 #if ENABLE_AMU_AUXILIARY_COUNTERS
440 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
441 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
442 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
443 #endif
444 
445 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
446 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
447 		return (void *)0;
448 	}
449 
450 	core_pos = plat_my_core_pos();
451 	ctx = &amu_ctxs_[core_pos];
452 
453 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
454 	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
455 		read_hcr_el2_amvoffen() : 0U;
456 
457 #if ENABLE_AMU_AUXILIARY_COUNTERS
458 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
459 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
460 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
461 #endif
462 
463 	/*
464 	 * Disable all AMU counters.
465 	 */
466 
467 	ctx->group0_enable = read_amcntenset0_el0_px();
468 	write_amcntenclr0_el0_px(ctx->group0_enable);
469 
470 #if ENABLE_AMU_AUXILIARY_COUNTERS
471 	if (amcfgr_el0_ncg > 0U) {
472 		ctx->group1_enable = read_amcntenset1_el0_px();
473 		write_amcntenclr1_el0_px(ctx->group1_enable);
474 	}
475 #endif
476 
477 	/*
478 	 * Save the counters to the local context.
479 	 */
480 
481 	isb(); /* Ensure counters have been stopped */
482 
483 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
484 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
485 	}
486 
487 #if ENABLE_AMU_AUXILIARY_COUNTERS
488 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
489 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
490 	}
491 #endif
492 
493 	/*
494 	 * Save virtual offsets for counters that offer them.
495 	 */
496 
497 	if (hcr_el2_amvoffen != 0U) {
498 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
499 			if (!amu_group0_voffset_supported(i)) {
500 				continue; /* No virtual offset */
501 			}
502 
503 			ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
504 		}
505 
506 #if ENABLE_AMU_AUXILIARY_COUNTERS
507 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
508 			if ((amcg1idr_el0_voff >> i) & 1U) {
509 				continue; /* No virtual offset */
510 			}
511 
512 			ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
513 		}
514 #endif
515 	}
516 
517 	return (void *)0;
518 }
519 
520 static void *amu_context_restore(const void *arg)
521 {
522 	uint64_t i, j;
523 
524 	unsigned int core_pos;
525 	struct amu_ctx *ctx;
526 
527 	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
528 
529 	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
530 
531 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
532 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
533 
534 #if ENABLE_AMU_AUXILIARY_COUNTERS
535 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
536 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
537 #endif
538 
539 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
540 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
541 		return (void *)0;
542 	}
543 
544 	core_pos = plat_my_core_pos();
545 	ctx = &amu_ctxs_[core_pos];
546 
547 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
548 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
549 
550 	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
551 		read_hcr_el2_amvoffen() : 0U;
552 
553 #if ENABLE_AMU_AUXILIARY_COUNTERS
554 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
555 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
556 #endif
557 
558 	/*
559 	 * Sanity check that all counters were disabled when the context was
560 	 * previously saved.
561 	 */
562 
563 	assert(read_amcntenset0_el0_px() == 0U);
564 
565 	if (amcfgr_el0_ncg > 0U) {
566 		assert(read_amcntenset1_el0_px() == 0U);
567 	}
568 
569 	/*
570 	 * Restore the counter values from the local context.
571 	 */
572 
573 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
574 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
575 	}
576 
577 #if ENABLE_AMU_AUXILIARY_COUNTERS
578 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
579 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
580 	}
581 #endif
582 
583 	/*
584 	 * Restore virtual offsets for counters that offer them.
585 	 */
586 
587 	if (hcr_el2_amvoffen != 0U) {
588 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
589 			if (!amu_group0_voffset_supported(i)) {
590 				continue; /* No virtual offset */
591 			}
592 
593 			amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
594 		}
595 
596 #if ENABLE_AMU_AUXILIARY_COUNTERS
597 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
598 			if ((amcg1idr_el0_voff >> i) & 1U) {
599 				continue; /* No virtual offset */
600 			}
601 
602 			amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
603 		}
604 #endif
605 	}
606 
607 	/*
608 	 * Re-enable counters that were disabled during context save.
609 	 */
610 
611 	write_amcntenset0_el0_px(ctx->group0_enable);
612 
613 #if ENABLE_AMU_AUXILIARY_COUNTERS
614 	if (amcfgr_el0_ncg > 0) {
615 		write_amcntenset1_el0_px(ctx->group1_enable);
616 	}
617 #endif
618 
619 	return (void *)0;
620 }
621 
622 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
623 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
624