1 /*
2 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12
13 #include "../amu_private.h"
14 #include <arch.h>
15 #include <arch_features.h>
16 #include <arch_helpers.h>
17 #include <common/debug.h>
18 #include <lib/el3_runtime/pubsub_events.h>
19 #include <lib/extensions/amu.h>
20
21 #include <plat/common/platform.h>
22
23 #if ENABLE_AMU_FCONF
24 # include <lib/fconf/fconf.h>
25 # include <lib/fconf/fconf_amu_getter.h>
26 #endif
27
28 #if ENABLE_MPMM
29 # include <lib/mpmm/mpmm.h>
30 #endif
31
32 struct amu_ctx {
33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
34 #if ENABLE_AMU_AUXILIARY_COUNTERS
35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
36 #endif
37
38 /* Architected event counter 1 does not have an offset register */
39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
40 #if ENABLE_AMU_AUXILIARY_COUNTERS
41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
42 #endif
43
44 uint16_t group0_enable;
45 #if ENABLE_AMU_AUXILIARY_COUNTERS
46 uint16_t group1_enable;
47 #endif
48 };
49
50 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
51
52 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
53 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
54
55 #if ENABLE_AMU_AUXILIARY_COUNTERS
56 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
57 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
58 #endif
59
read_id_aa64pfr0_el1_amu(void)60 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
61 {
62 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
63 ID_AA64PFR0_AMU_MASK;
64 }
65
read_hcr_el2_amvoffen(void)66 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
67 {
68 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
69 HCR_AMVOFFEN_SHIFT;
70 }
71
write_cptr_el2_tam(uint64_t value)72 static inline __unused void write_cptr_el2_tam(uint64_t value)
73 {
74 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
75 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
76 }
77
ctx_write_cptr_el3_tam(cpu_context_t * ctx,uint64_t tam)78 static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
79 {
80 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
81
82 value &= ~TAM_BIT;
83 value |= (tam << TAM_SHIFT) & TAM_BIT;
84
85 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
86 }
87
ctx_write_scr_el3_amvoffen(cpu_context_t * ctx,uint64_t amvoffen)88 static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
89 {
90 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
91
92 value &= ~SCR_AMVOFFEN_BIT;
93 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
94
95 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
96 }
97
write_hcr_el2_amvoffen(uint64_t value)98 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
99 {
100 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
101 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
102 }
103
write_amcr_el0_cg1rz(uint64_t value)104 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
105 {
106 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
107 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
108 }
109
read_amcfgr_el0_ncg(void)110 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
111 {
112 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
113 AMCFGR_EL0_NCG_MASK;
114 }
115
read_amcgcr_el0_cg0nc(void)116 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
117 {
118 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
119 AMCGCR_EL0_CG0NC_MASK;
120 }
121
read_amcg1idr_el0_voff(void)122 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
123 {
124 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
125 AMCG1IDR_VOFF_MASK;
126 }
127
read_amcgcr_el0_cg1nc(void)128 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
129 {
130 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
131 AMCGCR_EL0_CG1NC_MASK;
132 }
133
read_amcntenset0_el0_px(void)134 static inline __unused uint64_t read_amcntenset0_el0_px(void)
135 {
136 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
137 AMCNTENSET0_EL0_Pn_MASK;
138 }
139
read_amcntenset1_el0_px(void)140 static inline __unused uint64_t read_amcntenset1_el0_px(void)
141 {
142 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
143 AMCNTENSET1_EL0_Pn_MASK;
144 }
145
write_amcntenset0_el0_px(uint64_t px)146 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
147 {
148 uint64_t value = read_amcntenset0_el0();
149
150 value &= ~AMCNTENSET0_EL0_Pn_MASK;
151 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
152
153 write_amcntenset0_el0(value);
154 }
155
write_amcntenset1_el0_px(uint64_t px)156 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
157 {
158 uint64_t value = read_amcntenset1_el0();
159
160 value &= ~AMCNTENSET1_EL0_Pn_MASK;
161 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
162
163 write_amcntenset1_el0(value);
164 }
165
write_amcntenclr0_el0_px(uint64_t px)166 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
167 {
168 uint64_t value = read_amcntenclr0_el0();
169
170 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
171 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
172
173 write_amcntenclr0_el0(value);
174 }
175
write_amcntenclr1_el0_px(uint64_t px)176 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
177 {
178 uint64_t value = read_amcntenclr1_el0();
179
180 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
181 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
182
183 write_amcntenclr1_el0(value);
184 }
185
amu_supported(void)186 static __unused bool amu_supported(void)
187 {
188 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
189 }
190
amu_v1p1_supported(void)191 static __unused bool amu_v1p1_supported(void)
192 {
193 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
194 }
195
196 #if ENABLE_AMU_AUXILIARY_COUNTERS
amu_group1_supported(void)197 static __unused bool amu_group1_supported(void)
198 {
199 return read_amcfgr_el0_ncg() > 0U;
200 }
201 #endif
202
203 /*
204 * Enable counters. This function is meant to be invoked by the context
205 * management library before exiting from EL3.
206 */
amu_enable(bool el2_unused,cpu_context_t * ctx)207 void amu_enable(bool el2_unused, cpu_context_t *ctx)
208 {
209 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
210
211 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
212 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
213
214 uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
215 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
216
217 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
218 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
219 /*
220 * If the AMU is unsupported, nothing needs to be done.
221 */
222
223 return;
224 }
225
226 if (el2_unused) {
227 /*
228 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
229 * Monitor registers do not trap to EL2.
230 */
231 write_cptr_el2_tam(0U);
232 }
233
234 /*
235 * Retrieve and update the CPTR_EL3 value from the context mentioned
236 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
237 * the Activity Monitor registers do not trap to EL3.
238 */
239 ctx_write_cptr_el3_tam(ctx, 0U);
240
241 /*
242 * Retrieve the number of architected counters. All of these counters
243 * are enabled by default.
244 */
245
246 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
247 amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
248
249 assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
250
251 /*
252 * The platform may opt to enable specific auxiliary counters. This can
253 * be done via the common FCONF getter, or via the platform-implemented
254 * function.
255 */
256
257 #if ENABLE_AMU_AUXILIARY_COUNTERS
258 const struct amu_topology *topology;
259
260 #if ENABLE_AMU_FCONF
261 topology = FCONF_GET_PROPERTY(amu, config, topology);
262 #else
263 topology = plat_amu_topology();
264 #endif /* ENABLE_AMU_FCONF */
265
266 if (topology != NULL) {
267 unsigned int core_pos = plat_my_core_pos();
268
269 amcntenset1_el0_px = topology->cores[core_pos].enable;
270 } else {
271 ERROR("AMU: failed to generate AMU topology\n");
272 }
273 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
274
275 /*
276 * Enable the requested counters.
277 */
278
279 write_amcntenset0_el0_px(amcntenset0_el0_px);
280
281 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
282 if (amcfgr_el0_ncg > 0U) {
283 write_amcntenset1_el0_px(amcntenset1_el0_px);
284
285 #if !ENABLE_AMU_AUXILIARY_COUNTERS
286 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
287 #endif
288 }
289
290 /* Initialize FEAT_AMUv1p1 features if present. */
291 if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
292 if (el2_unused) {
293 /*
294 * Make sure virtual offsets are disabled if EL2 not
295 * used.
296 */
297 write_hcr_el2_amvoffen(0U);
298 } else {
299 /*
300 * Virtual offset registers are only accessible from EL3
301 * and EL2, when clear, this bit traps accesses from EL2
302 * so we set it to 1 when EL2 is present.
303 */
304 ctx_write_scr_el3_amvoffen(ctx, 1U);
305 }
306
307 #if AMU_RESTRICT_COUNTERS
308 /*
309 * FEAT_AMUv1p1 adds a register field to restrict access to
310 * group 1 counters at all but the highest implemented EL. This
311 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
312 * flag, when set, system register reads at lower ELs return
313 * zero. Reads from the memory mapped view are unaffected.
314 */
315 VERBOSE("AMU group 1 counter access restricted.\n");
316 write_amcr_el0_cg1rz(1U);
317 #else
318 write_amcr_el0_cg1rz(0U);
319 #endif
320 }
321
322 #if ENABLE_MPMM
323 mpmm_enable();
324 #endif
325 }
326
327 /* Read the group 0 counter identified by the given `idx`. */
amu_group0_cnt_read(unsigned int idx)328 static uint64_t amu_group0_cnt_read(unsigned int idx)
329 {
330 assert(amu_supported());
331 assert(idx < read_amcgcr_el0_cg0nc());
332
333 return amu_group0_cnt_read_internal(idx);
334 }
335
336 /* Write the group 0 counter identified by the given `idx` with `val` */
amu_group0_cnt_write(unsigned int idx,uint64_t val)337 static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
338 {
339 assert(amu_supported());
340 assert(idx < read_amcgcr_el0_cg0nc());
341
342 amu_group0_cnt_write_internal(idx, val);
343 isb();
344 }
345
346 /*
347 * Unlike with auxiliary counters, we cannot detect at runtime whether an
348 * architected counter supports a virtual offset. These are instead fixed
349 * according to FEAT_AMUv1p1, but this switch will need to be updated if later
350 * revisions of FEAT_AMU add additional architected counters.
351 */
amu_group0_voffset_supported(uint64_t idx)352 static bool amu_group0_voffset_supported(uint64_t idx)
353 {
354 switch (idx) {
355 case 0U:
356 case 2U:
357 case 3U:
358 return true;
359
360 case 1U:
361 return false;
362
363 default:
364 ERROR("AMU: can't set up virtual offset for unknown "
365 "architected counter %" PRIu64 "!\n", idx);
366
367 panic();
368 }
369 }
370
371 /*
372 * Read the group 0 offset register for a given index. Index must be 0, 2,
373 * or 3, the register for 1 does not exist.
374 *
375 * Using this function requires FEAT_AMUv1p1 support.
376 */
amu_group0_voffset_read(unsigned int idx)377 static uint64_t amu_group0_voffset_read(unsigned int idx)
378 {
379 assert(amu_v1p1_supported());
380 assert(idx < read_amcgcr_el0_cg0nc());
381 assert(idx != 1U);
382
383 return amu_group0_voffset_read_internal(idx);
384 }
385
386 /*
387 * Write the group 0 offset register for a given index. Index must be 0, 2, or
388 * 3, the register for 1 does not exist.
389 *
390 * Using this function requires FEAT_AMUv1p1 support.
391 */
amu_group0_voffset_write(unsigned int idx,uint64_t val)392 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
393 {
394 assert(amu_v1p1_supported());
395 assert(idx < read_amcgcr_el0_cg0nc());
396 assert(idx != 1U);
397
398 amu_group0_voffset_write_internal(idx, val);
399 isb();
400 }
401
402 #if ENABLE_AMU_AUXILIARY_COUNTERS
403 /* Read the group 1 counter identified by the given `idx` */
amu_group1_cnt_read(unsigned int idx)404 static uint64_t amu_group1_cnt_read(unsigned int idx)
405 {
406 assert(amu_supported());
407 assert(amu_group1_supported());
408 assert(idx < read_amcgcr_el0_cg1nc());
409
410 return amu_group1_cnt_read_internal(idx);
411 }
412
413 /* Write the group 1 counter identified by the given `idx` with `val` */
amu_group1_cnt_write(unsigned int idx,uint64_t val)414 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
415 {
416 assert(amu_supported());
417 assert(amu_group1_supported());
418 assert(idx < read_amcgcr_el0_cg1nc());
419
420 amu_group1_cnt_write_internal(idx, val);
421 isb();
422 }
423
424 /*
425 * Read the group 1 offset register for a given index.
426 *
427 * Using this function requires FEAT_AMUv1p1 support.
428 */
amu_group1_voffset_read(unsigned int idx)429 static uint64_t amu_group1_voffset_read(unsigned int idx)
430 {
431 assert(amu_v1p1_supported());
432 assert(amu_group1_supported());
433 assert(idx < read_amcgcr_el0_cg1nc());
434 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
435
436 return amu_group1_voffset_read_internal(idx);
437 }
438
439 /*
440 * Write the group 1 offset register for a given index.
441 *
442 * Using this function requires FEAT_AMUv1p1 support.
443 */
amu_group1_voffset_write(unsigned int idx,uint64_t val)444 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
445 {
446 assert(amu_v1p1_supported());
447 assert(amu_group1_supported());
448 assert(idx < read_amcgcr_el0_cg1nc());
449 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
450
451 amu_group1_voffset_write_internal(idx, val);
452 isb();
453 }
454 #endif
455
amu_context_save(const void * arg)456 static void *amu_context_save(const void *arg)
457 {
458 uint64_t i, j;
459
460 unsigned int core_pos;
461 struct amu_ctx *ctx;
462
463 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
464 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
465 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
466
467 #if ENABLE_AMU_AUXILIARY_COUNTERS
468 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
469 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
470 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
471 #endif
472
473 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
474 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
475 return (void *)0;
476 }
477
478 core_pos = plat_my_core_pos();
479 ctx = &amu_ctxs_[core_pos];
480
481 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
482 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
483 read_hcr_el2_amvoffen() : 0U;
484
485 #if ENABLE_AMU_AUXILIARY_COUNTERS
486 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
487 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
488 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
489 #endif
490
491 /*
492 * Disable all AMU counters.
493 */
494
495 ctx->group0_enable = read_amcntenset0_el0_px();
496 write_amcntenclr0_el0_px(ctx->group0_enable);
497
498 #if ENABLE_AMU_AUXILIARY_COUNTERS
499 if (amcfgr_el0_ncg > 0U) {
500 ctx->group1_enable = read_amcntenset1_el0_px();
501 write_amcntenclr1_el0_px(ctx->group1_enable);
502 }
503 #endif
504
505 /*
506 * Save the counters to the local context.
507 */
508
509 isb(); /* Ensure counters have been stopped */
510
511 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
512 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
513 }
514
515 #if ENABLE_AMU_AUXILIARY_COUNTERS
516 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
517 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
518 }
519 #endif
520
521 /*
522 * Save virtual offsets for counters that offer them.
523 */
524
525 if (hcr_el2_amvoffen != 0U) {
526 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
527 if (!amu_group0_voffset_supported(i)) {
528 continue; /* No virtual offset */
529 }
530
531 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
532 }
533
534 #if ENABLE_AMU_AUXILIARY_COUNTERS
535 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
536 if ((amcg1idr_el0_voff >> i) & 1U) {
537 continue; /* No virtual offset */
538 }
539
540 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
541 }
542 #endif
543 }
544
545 return (void *)0;
546 }
547
amu_context_restore(const void * arg)548 static void *amu_context_restore(const void *arg)
549 {
550 uint64_t i, j;
551
552 unsigned int core_pos;
553 struct amu_ctx *ctx;
554
555 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
556
557 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
558
559 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
560 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
561
562 #if ENABLE_AMU_AUXILIARY_COUNTERS
563 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
564 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
565 #endif
566
567 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
568 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
569 return (void *)0;
570 }
571
572 core_pos = plat_my_core_pos();
573 ctx = &amu_ctxs_[core_pos];
574
575 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
576 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
577
578 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
579 read_hcr_el2_amvoffen() : 0U;
580
581 #if ENABLE_AMU_AUXILIARY_COUNTERS
582 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
583 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
584 #endif
585
586 /*
587 * Sanity check that all counters were disabled when the context was
588 * previously saved.
589 */
590
591 assert(read_amcntenset0_el0_px() == 0U);
592
593 if (amcfgr_el0_ncg > 0U) {
594 assert(read_amcntenset1_el0_px() == 0U);
595 }
596
597 /*
598 * Restore the counter values from the local context.
599 */
600
601 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
602 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
603 }
604
605 #if ENABLE_AMU_AUXILIARY_COUNTERS
606 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
607 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
608 }
609 #endif
610
611 /*
612 * Restore virtual offsets for counters that offer them.
613 */
614
615 if (hcr_el2_amvoffen != 0U) {
616 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
617 if (!amu_group0_voffset_supported(i)) {
618 continue; /* No virtual offset */
619 }
620
621 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
622 }
623
624 #if ENABLE_AMU_AUXILIARY_COUNTERS
625 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
626 if ((amcg1idr_el0_voff >> i) & 1U) {
627 continue; /* No virtual offset */
628 }
629
630 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
631 }
632 #endif
633 }
634
635 /*
636 * Re-enable counters that were disabled during context save.
637 */
638
639 write_amcntenset0_el0_px(ctx->group0_enable);
640
641 #if ENABLE_AMU_AUXILIARY_COUNTERS
642 if (amcfgr_el0_ncg > 0) {
643 write_amcntenset1_el0_px(ctx->group1_enable);
644 }
645 #endif
646
647 #if ENABLE_MPMM
648 mpmm_enable();
649 #endif
650
651 return (void *)0;
652 }
653
654 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
655 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
656