1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <assert.h> 8*91f16700Schasinglulu #include <cdefs.h> 9*91f16700Schasinglulu #include <inttypes.h> 10*91f16700Schasinglulu #include <stdbool.h> 11*91f16700Schasinglulu #include <stdint.h> 12*91f16700Schasinglulu 13*91f16700Schasinglulu #include "../amu_private.h" 14*91f16700Schasinglulu #include <arch.h> 15*91f16700Schasinglulu #include <arch_features.h> 16*91f16700Schasinglulu #include <arch_helpers.h> 17*91f16700Schasinglulu #include <common/debug.h> 18*91f16700Schasinglulu #include <lib/el3_runtime/pubsub_events.h> 19*91f16700Schasinglulu #include <lib/extensions/amu.h> 20*91f16700Schasinglulu 21*91f16700Schasinglulu #include <plat/common/platform.h> 22*91f16700Schasinglulu 23*91f16700Schasinglulu #if ENABLE_AMU_FCONF 24*91f16700Schasinglulu # include <lib/fconf/fconf.h> 25*91f16700Schasinglulu # include <lib/fconf/fconf_amu_getter.h> 26*91f16700Schasinglulu #endif 27*91f16700Schasinglulu 28*91f16700Schasinglulu #if ENABLE_MPMM 29*91f16700Schasinglulu # include <lib/mpmm/mpmm.h> 30*91f16700Schasinglulu #endif 31*91f16700Schasinglulu 32*91f16700Schasinglulu struct amu_ctx { 33*91f16700Schasinglulu uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; 34*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 35*91f16700Schasinglulu uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; 36*91f16700Schasinglulu #endif 37*91f16700Schasinglulu 38*91f16700Schasinglulu /* Architected event counter 1 does not have an offset register */ 39*91f16700Schasinglulu uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; 40*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 41*91f16700Schasinglulu uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; 42*91f16700Schasinglulu #endif 43*91f16700Schasinglulu 44*91f16700Schasinglulu uint16_t group0_enable; 45*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 46*91f16700Schasinglulu uint16_t group1_enable; 47*91f16700Schasinglulu #endif 48*91f16700Schasinglulu }; 49*91f16700Schasinglulu 50*91f16700Schasinglulu static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; 51*91f16700Schasinglulu 52*91f16700Schasinglulu CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, 53*91f16700Schasinglulu amu_ctx_group0_enable_cannot_represent_all_group0_counters); 54*91f16700Schasinglulu 55*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 56*91f16700Schasinglulu CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, 57*91f16700Schasinglulu amu_ctx_group1_enable_cannot_represent_all_group1_counters); 58*91f16700Schasinglulu #endif 59*91f16700Schasinglulu 60*91f16700Schasinglulu static inline __unused uint64_t read_hcr_el2_amvoffen(void) 61*91f16700Schasinglulu { 62*91f16700Schasinglulu return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> 63*91f16700Schasinglulu HCR_AMVOFFEN_SHIFT; 64*91f16700Schasinglulu } 65*91f16700Schasinglulu 66*91f16700Schasinglulu static inline __unused void write_cptr_el2_tam(uint64_t value) 67*91f16700Schasinglulu { 68*91f16700Schasinglulu write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | 69*91f16700Schasinglulu ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); 70*91f16700Schasinglulu } 71*91f16700Schasinglulu 72*91f16700Schasinglulu static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen) 73*91f16700Schasinglulu { 74*91f16700Schasinglulu uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 75*91f16700Schasinglulu 76*91f16700Schasinglulu value &= ~SCR_AMVOFFEN_BIT; 77*91f16700Schasinglulu value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT; 78*91f16700Schasinglulu 79*91f16700Schasinglulu write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value); 80*91f16700Schasinglulu } 81*91f16700Schasinglulu 82*91f16700Schasinglulu static inline __unused void write_hcr_el2_amvoffen(uint64_t value) 83*91f16700Schasinglulu { 84*91f16700Schasinglulu write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | 85*91f16700Schasinglulu ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); 86*91f16700Schasinglulu } 87*91f16700Schasinglulu 88*91f16700Schasinglulu static inline __unused void write_amcr_el0_cg1rz(uint64_t value) 89*91f16700Schasinglulu { 90*91f16700Schasinglulu write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | 91*91f16700Schasinglulu ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); 92*91f16700Schasinglulu } 93*91f16700Schasinglulu 94*91f16700Schasinglulu static inline __unused uint64_t read_amcfgr_el0_ncg(void) 95*91f16700Schasinglulu { 96*91f16700Schasinglulu return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & 97*91f16700Schasinglulu AMCFGR_EL0_NCG_MASK; 98*91f16700Schasinglulu } 99*91f16700Schasinglulu 100*91f16700Schasinglulu static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) 101*91f16700Schasinglulu { 102*91f16700Schasinglulu return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & 103*91f16700Schasinglulu AMCGCR_EL0_CG0NC_MASK; 104*91f16700Schasinglulu } 105*91f16700Schasinglulu 106*91f16700Schasinglulu static inline __unused uint64_t read_amcg1idr_el0_voff(void) 107*91f16700Schasinglulu { 108*91f16700Schasinglulu return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & 109*91f16700Schasinglulu AMCG1IDR_VOFF_MASK; 110*91f16700Schasinglulu } 111*91f16700Schasinglulu 112*91f16700Schasinglulu static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) 113*91f16700Schasinglulu { 114*91f16700Schasinglulu return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 115*91f16700Schasinglulu AMCGCR_EL0_CG1NC_MASK; 116*91f16700Schasinglulu } 117*91f16700Schasinglulu 118*91f16700Schasinglulu static inline __unused uint64_t read_amcntenset0_el0_px(void) 119*91f16700Schasinglulu { 120*91f16700Schasinglulu return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & 121*91f16700Schasinglulu AMCNTENSET0_EL0_Pn_MASK; 122*91f16700Schasinglulu } 123*91f16700Schasinglulu 124*91f16700Schasinglulu static inline __unused uint64_t read_amcntenset1_el0_px(void) 125*91f16700Schasinglulu { 126*91f16700Schasinglulu return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & 127*91f16700Schasinglulu AMCNTENSET1_EL0_Pn_MASK; 128*91f16700Schasinglulu } 129*91f16700Schasinglulu 130*91f16700Schasinglulu static inline __unused void write_amcntenset0_el0_px(uint64_t px) 131*91f16700Schasinglulu { 132*91f16700Schasinglulu uint64_t value = read_amcntenset0_el0(); 133*91f16700Schasinglulu 134*91f16700Schasinglulu value &= ~AMCNTENSET0_EL0_Pn_MASK; 135*91f16700Schasinglulu value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; 136*91f16700Schasinglulu 137*91f16700Schasinglulu write_amcntenset0_el0(value); 138*91f16700Schasinglulu } 139*91f16700Schasinglulu 140*91f16700Schasinglulu static inline __unused void write_amcntenset1_el0_px(uint64_t px) 141*91f16700Schasinglulu { 142*91f16700Schasinglulu uint64_t value = read_amcntenset1_el0(); 143*91f16700Schasinglulu 144*91f16700Schasinglulu value &= ~AMCNTENSET1_EL0_Pn_MASK; 145*91f16700Schasinglulu value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; 146*91f16700Schasinglulu 147*91f16700Schasinglulu write_amcntenset1_el0(value); 148*91f16700Schasinglulu } 149*91f16700Schasinglulu 150*91f16700Schasinglulu static inline __unused void write_amcntenclr0_el0_px(uint64_t px) 151*91f16700Schasinglulu { 152*91f16700Schasinglulu uint64_t value = read_amcntenclr0_el0(); 153*91f16700Schasinglulu 154*91f16700Schasinglulu value &= ~AMCNTENCLR0_EL0_Pn_MASK; 155*91f16700Schasinglulu value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; 156*91f16700Schasinglulu 157*91f16700Schasinglulu write_amcntenclr0_el0(value); 158*91f16700Schasinglulu } 159*91f16700Schasinglulu 160*91f16700Schasinglulu static inline __unused void write_amcntenclr1_el0_px(uint64_t px) 161*91f16700Schasinglulu { 162*91f16700Schasinglulu uint64_t value = read_amcntenclr1_el0(); 163*91f16700Schasinglulu 164*91f16700Schasinglulu value &= ~AMCNTENCLR1_EL0_Pn_MASK; 165*91f16700Schasinglulu value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; 166*91f16700Schasinglulu 167*91f16700Schasinglulu write_amcntenclr1_el0(value); 168*91f16700Schasinglulu } 169*91f16700Schasinglulu 170*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 171*91f16700Schasinglulu static __unused bool amu_group1_supported(void) 172*91f16700Schasinglulu { 173*91f16700Schasinglulu return read_amcfgr_el0_ncg() > 0U; 174*91f16700Schasinglulu } 175*91f16700Schasinglulu #endif 176*91f16700Schasinglulu 177*91f16700Schasinglulu /* 178*91f16700Schasinglulu * Enable counters. This function is meant to be invoked by the context 179*91f16700Schasinglulu * management library before exiting from EL3. 180*91f16700Schasinglulu */ 181*91f16700Schasinglulu void amu_enable(cpu_context_t *ctx) 182*91f16700Schasinglulu { 183*91f16700Schasinglulu /* Initialize FEAT_AMUv1p1 features if present. */ 184*91f16700Schasinglulu if (is_feat_amuv1p1_supported()) { 185*91f16700Schasinglulu /* 186*91f16700Schasinglulu * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual 187*91f16700Schasinglulu * offset registers at EL2 do not trap to EL3 188*91f16700Schasinglulu */ 189*91f16700Schasinglulu ctx_write_scr_el3_amvoffen(ctx, 1U); 190*91f16700Schasinglulu } 191*91f16700Schasinglulu } 192*91f16700Schasinglulu 193*91f16700Schasinglulu void amu_enable_per_world(per_world_context_t *per_world_ctx) 194*91f16700Schasinglulu { 195*91f16700Schasinglulu /* 196*91f16700Schasinglulu * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor 197*91f16700Schasinglulu * registers do not trap to EL3. 198*91f16700Schasinglulu */ 199*91f16700Schasinglulu uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3; 200*91f16700Schasinglulu 201*91f16700Schasinglulu cptr_el3 &= ~TAM_BIT; 202*91f16700Schasinglulu per_world_ctx->ctx_cptr_el3 = cptr_el3; 203*91f16700Schasinglulu } 204*91f16700Schasinglulu 205*91f16700Schasinglulu void amu_init_el3(void) 206*91f16700Schasinglulu { 207*91f16700Schasinglulu uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc(); 208*91f16700Schasinglulu uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U; 209*91f16700Schasinglulu uint64_t num_ctr_groups = read_amcfgr_el0_ncg(); 210*91f16700Schasinglulu 211*91f16700Schasinglulu /* Enable all architected counters by default */ 212*91f16700Schasinglulu write_amcntenset0_el0_px(group0_en_mask); 213*91f16700Schasinglulu 214*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 215*91f16700Schasinglulu if (num_ctr_groups > 0U) { 216*91f16700Schasinglulu uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ 217*91f16700Schasinglulu const struct amu_topology *topology; 218*91f16700Schasinglulu 219*91f16700Schasinglulu /* 220*91f16700Schasinglulu * The platform may opt to enable specific auxiliary counters. 221*91f16700Schasinglulu * This can be done via the common FCONF getter, or via the 222*91f16700Schasinglulu * platform-implemented function. 223*91f16700Schasinglulu */ 224*91f16700Schasinglulu #if ENABLE_AMU_FCONF 225*91f16700Schasinglulu topology = FCONF_GET_PROPERTY(amu, config, topology); 226*91f16700Schasinglulu #else 227*91f16700Schasinglulu topology = plat_amu_topology(); 228*91f16700Schasinglulu #endif /* ENABLE_AMU_FCONF */ 229*91f16700Schasinglulu 230*91f16700Schasinglulu if (topology != NULL) { 231*91f16700Schasinglulu unsigned int core_pos = plat_my_core_pos(); 232*91f16700Schasinglulu 233*91f16700Schasinglulu amcntenset1_el0_px = topology->cores[core_pos].enable; 234*91f16700Schasinglulu } else { 235*91f16700Schasinglulu ERROR("AMU: failed to generate AMU topology\n"); 236*91f16700Schasinglulu } 237*91f16700Schasinglulu 238*91f16700Schasinglulu write_amcntenset1_el0_px(amcntenset1_el0_px); 239*91f16700Schasinglulu } 240*91f16700Schasinglulu #else /* ENABLE_AMU_AUXILIARY_COUNTERS */ 241*91f16700Schasinglulu if (num_ctr_groups > 0U) { 242*91f16700Schasinglulu VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); 243*91f16700Schasinglulu } 244*91f16700Schasinglulu #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ 245*91f16700Schasinglulu 246*91f16700Schasinglulu if (is_feat_amuv1p1_supported()) { 247*91f16700Schasinglulu #if AMU_RESTRICT_COUNTERS 248*91f16700Schasinglulu /* 249*91f16700Schasinglulu * FEAT_AMUv1p1 adds a register field to restrict access to 250*91f16700Schasinglulu * group 1 counters at all but the highest implemented EL. This 251*91f16700Schasinglulu * is controlled with the `AMU_RESTRICT_COUNTERS` compile time 252*91f16700Schasinglulu * flag, when set, system register reads at lower ELs return 253*91f16700Schasinglulu * zero. Reads from the memory mapped view are unaffected. 254*91f16700Schasinglulu */ 255*91f16700Schasinglulu VERBOSE("AMU group 1 counter access restricted.\n"); 256*91f16700Schasinglulu write_amcr_el0_cg1rz(1U); 257*91f16700Schasinglulu #else 258*91f16700Schasinglulu write_amcr_el0_cg1rz(0U); 259*91f16700Schasinglulu #endif 260*91f16700Schasinglulu } 261*91f16700Schasinglulu 262*91f16700Schasinglulu #if ENABLE_MPMM 263*91f16700Schasinglulu mpmm_enable(); 264*91f16700Schasinglulu #endif 265*91f16700Schasinglulu } 266*91f16700Schasinglulu 267*91f16700Schasinglulu void amu_init_el2_unused(void) 268*91f16700Schasinglulu { 269*91f16700Schasinglulu /* 270*91f16700Schasinglulu * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor 271*91f16700Schasinglulu * registers do not trap to EL2. 272*91f16700Schasinglulu */ 273*91f16700Schasinglulu write_cptr_el2_tam(0U); 274*91f16700Schasinglulu 275*91f16700Schasinglulu /* Initialize FEAT_AMUv1p1 features if present. */ 276*91f16700Schasinglulu if (is_feat_amuv1p1_supported()) { 277*91f16700Schasinglulu /* Make sure virtual offsets are disabled if EL2 not used. */ 278*91f16700Schasinglulu write_hcr_el2_amvoffen(0U); 279*91f16700Schasinglulu } 280*91f16700Schasinglulu } 281*91f16700Schasinglulu 282*91f16700Schasinglulu /* Read the group 0 counter identified by the given `idx`. */ 283*91f16700Schasinglulu static uint64_t amu_group0_cnt_read(unsigned int idx) 284*91f16700Schasinglulu { 285*91f16700Schasinglulu assert(is_feat_amu_supported()); 286*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg0nc()); 287*91f16700Schasinglulu 288*91f16700Schasinglulu return amu_group0_cnt_read_internal(idx); 289*91f16700Schasinglulu } 290*91f16700Schasinglulu 291*91f16700Schasinglulu /* Write the group 0 counter identified by the given `idx` with `val` */ 292*91f16700Schasinglulu static void amu_group0_cnt_write(unsigned int idx, uint64_t val) 293*91f16700Schasinglulu { 294*91f16700Schasinglulu assert(is_feat_amu_supported()); 295*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg0nc()); 296*91f16700Schasinglulu 297*91f16700Schasinglulu amu_group0_cnt_write_internal(idx, val); 298*91f16700Schasinglulu isb(); 299*91f16700Schasinglulu } 300*91f16700Schasinglulu 301*91f16700Schasinglulu /* 302*91f16700Schasinglulu * Unlike with auxiliary counters, we cannot detect at runtime whether an 303*91f16700Schasinglulu * architected counter supports a virtual offset. These are instead fixed 304*91f16700Schasinglulu * according to FEAT_AMUv1p1, but this switch will need to be updated if later 305*91f16700Schasinglulu * revisions of FEAT_AMU add additional architected counters. 306*91f16700Schasinglulu */ 307*91f16700Schasinglulu static bool amu_group0_voffset_supported(uint64_t idx) 308*91f16700Schasinglulu { 309*91f16700Schasinglulu switch (idx) { 310*91f16700Schasinglulu case 0U: 311*91f16700Schasinglulu case 2U: 312*91f16700Schasinglulu case 3U: 313*91f16700Schasinglulu return true; 314*91f16700Schasinglulu 315*91f16700Schasinglulu case 1U: 316*91f16700Schasinglulu return false; 317*91f16700Schasinglulu 318*91f16700Schasinglulu default: 319*91f16700Schasinglulu ERROR("AMU: can't set up virtual offset for unknown " 320*91f16700Schasinglulu "architected counter %" PRIu64 "!\n", idx); 321*91f16700Schasinglulu 322*91f16700Schasinglulu panic(); 323*91f16700Schasinglulu } 324*91f16700Schasinglulu } 325*91f16700Schasinglulu 326*91f16700Schasinglulu /* 327*91f16700Schasinglulu * Read the group 0 offset register for a given index. Index must be 0, 2, 328*91f16700Schasinglulu * or 3, the register for 1 does not exist. 329*91f16700Schasinglulu * 330*91f16700Schasinglulu * Using this function requires FEAT_AMUv1p1 support. 331*91f16700Schasinglulu */ 332*91f16700Schasinglulu static uint64_t amu_group0_voffset_read(unsigned int idx) 333*91f16700Schasinglulu { 334*91f16700Schasinglulu assert(is_feat_amuv1p1_supported()); 335*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg0nc()); 336*91f16700Schasinglulu assert(idx != 1U); 337*91f16700Schasinglulu 338*91f16700Schasinglulu return amu_group0_voffset_read_internal(idx); 339*91f16700Schasinglulu } 340*91f16700Schasinglulu 341*91f16700Schasinglulu /* 342*91f16700Schasinglulu * Write the group 0 offset register for a given index. Index must be 0, 2, or 343*91f16700Schasinglulu * 3, the register for 1 does not exist. 344*91f16700Schasinglulu * 345*91f16700Schasinglulu * Using this function requires FEAT_AMUv1p1 support. 346*91f16700Schasinglulu */ 347*91f16700Schasinglulu static void amu_group0_voffset_write(unsigned int idx, uint64_t val) 348*91f16700Schasinglulu { 349*91f16700Schasinglulu assert(is_feat_amuv1p1_supported()); 350*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg0nc()); 351*91f16700Schasinglulu assert(idx != 1U); 352*91f16700Schasinglulu 353*91f16700Schasinglulu amu_group0_voffset_write_internal(idx, val); 354*91f16700Schasinglulu isb(); 355*91f16700Schasinglulu } 356*91f16700Schasinglulu 357*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 358*91f16700Schasinglulu /* Read the group 1 counter identified by the given `idx` */ 359*91f16700Schasinglulu static uint64_t amu_group1_cnt_read(unsigned int idx) 360*91f16700Schasinglulu { 361*91f16700Schasinglulu assert(is_feat_amu_supported()); 362*91f16700Schasinglulu assert(amu_group1_supported()); 363*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg1nc()); 364*91f16700Schasinglulu 365*91f16700Schasinglulu return amu_group1_cnt_read_internal(idx); 366*91f16700Schasinglulu } 367*91f16700Schasinglulu 368*91f16700Schasinglulu /* Write the group 1 counter identified by the given `idx` with `val` */ 369*91f16700Schasinglulu static void amu_group1_cnt_write(unsigned int idx, uint64_t val) 370*91f16700Schasinglulu { 371*91f16700Schasinglulu assert(is_feat_amu_supported()); 372*91f16700Schasinglulu assert(amu_group1_supported()); 373*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg1nc()); 374*91f16700Schasinglulu 375*91f16700Schasinglulu amu_group1_cnt_write_internal(idx, val); 376*91f16700Schasinglulu isb(); 377*91f16700Schasinglulu } 378*91f16700Schasinglulu 379*91f16700Schasinglulu /* 380*91f16700Schasinglulu * Read the group 1 offset register for a given index. 381*91f16700Schasinglulu * 382*91f16700Schasinglulu * Using this function requires FEAT_AMUv1p1 support. 383*91f16700Schasinglulu */ 384*91f16700Schasinglulu static uint64_t amu_group1_voffset_read(unsigned int idx) 385*91f16700Schasinglulu { 386*91f16700Schasinglulu assert(is_feat_amuv1p1_supported()); 387*91f16700Schasinglulu assert(amu_group1_supported()); 388*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg1nc()); 389*91f16700Schasinglulu assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 390*91f16700Schasinglulu 391*91f16700Schasinglulu return amu_group1_voffset_read_internal(idx); 392*91f16700Schasinglulu } 393*91f16700Schasinglulu 394*91f16700Schasinglulu /* 395*91f16700Schasinglulu * Write the group 1 offset register for a given index. 396*91f16700Schasinglulu * 397*91f16700Schasinglulu * Using this function requires FEAT_AMUv1p1 support. 398*91f16700Schasinglulu */ 399*91f16700Schasinglulu static void amu_group1_voffset_write(unsigned int idx, uint64_t val) 400*91f16700Schasinglulu { 401*91f16700Schasinglulu assert(is_feat_amuv1p1_supported()); 402*91f16700Schasinglulu assert(amu_group1_supported()); 403*91f16700Schasinglulu assert(idx < read_amcgcr_el0_cg1nc()); 404*91f16700Schasinglulu assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 405*91f16700Schasinglulu 406*91f16700Schasinglulu amu_group1_voffset_write_internal(idx, val); 407*91f16700Schasinglulu isb(); 408*91f16700Schasinglulu } 409*91f16700Schasinglulu #endif 410*91f16700Schasinglulu 411*91f16700Schasinglulu static void *amu_context_save(const void *arg) 412*91f16700Schasinglulu { 413*91f16700Schasinglulu uint64_t i, j; 414*91f16700Schasinglulu 415*91f16700Schasinglulu unsigned int core_pos; 416*91f16700Schasinglulu struct amu_ctx *ctx; 417*91f16700Schasinglulu 418*91f16700Schasinglulu uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 419*91f16700Schasinglulu uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 420*91f16700Schasinglulu 421*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 422*91f16700Schasinglulu uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 423*91f16700Schasinglulu uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 424*91f16700Schasinglulu uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 425*91f16700Schasinglulu #endif 426*91f16700Schasinglulu 427*91f16700Schasinglulu if (!is_feat_amu_supported()) { 428*91f16700Schasinglulu return (void *)0; 429*91f16700Schasinglulu } 430*91f16700Schasinglulu 431*91f16700Schasinglulu core_pos = plat_my_core_pos(); 432*91f16700Schasinglulu ctx = &amu_ctxs_[core_pos]; 433*91f16700Schasinglulu 434*91f16700Schasinglulu amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 435*91f16700Schasinglulu if (is_feat_amuv1p1_supported()) { 436*91f16700Schasinglulu hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 437*91f16700Schasinglulu } 438*91f16700Schasinglulu 439*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 440*91f16700Schasinglulu amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 441*91f16700Schasinglulu amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 442*91f16700Schasinglulu amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 443*91f16700Schasinglulu #endif 444*91f16700Schasinglulu 445*91f16700Schasinglulu /* 446*91f16700Schasinglulu * Disable all AMU counters. 447*91f16700Schasinglulu */ 448*91f16700Schasinglulu 449*91f16700Schasinglulu ctx->group0_enable = read_amcntenset0_el0_px(); 450*91f16700Schasinglulu write_amcntenclr0_el0_px(ctx->group0_enable); 451*91f16700Schasinglulu 452*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 453*91f16700Schasinglulu if (amcfgr_el0_ncg > 0U) { 454*91f16700Schasinglulu ctx->group1_enable = read_amcntenset1_el0_px(); 455*91f16700Schasinglulu write_amcntenclr1_el0_px(ctx->group1_enable); 456*91f16700Schasinglulu } 457*91f16700Schasinglulu #endif 458*91f16700Schasinglulu 459*91f16700Schasinglulu /* 460*91f16700Schasinglulu * Save the counters to the local context. 461*91f16700Schasinglulu */ 462*91f16700Schasinglulu 463*91f16700Schasinglulu isb(); /* Ensure counters have been stopped */ 464*91f16700Schasinglulu 465*91f16700Schasinglulu for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 466*91f16700Schasinglulu ctx->group0_cnts[i] = amu_group0_cnt_read(i); 467*91f16700Schasinglulu } 468*91f16700Schasinglulu 469*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 470*91f16700Schasinglulu for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 471*91f16700Schasinglulu ctx->group1_cnts[i] = amu_group1_cnt_read(i); 472*91f16700Schasinglulu } 473*91f16700Schasinglulu #endif 474*91f16700Schasinglulu 475*91f16700Schasinglulu /* 476*91f16700Schasinglulu * Save virtual offsets for counters that offer them. 477*91f16700Schasinglulu */ 478*91f16700Schasinglulu 479*91f16700Schasinglulu if (hcr_el2_amvoffen != 0U) { 480*91f16700Schasinglulu for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 481*91f16700Schasinglulu if (!amu_group0_voffset_supported(i)) { 482*91f16700Schasinglulu continue; /* No virtual offset */ 483*91f16700Schasinglulu } 484*91f16700Schasinglulu 485*91f16700Schasinglulu ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); 486*91f16700Schasinglulu } 487*91f16700Schasinglulu 488*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 489*91f16700Schasinglulu for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 490*91f16700Schasinglulu if ((amcg1idr_el0_voff >> i) & 1U) { 491*91f16700Schasinglulu continue; /* No virtual offset */ 492*91f16700Schasinglulu } 493*91f16700Schasinglulu 494*91f16700Schasinglulu ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); 495*91f16700Schasinglulu } 496*91f16700Schasinglulu #endif 497*91f16700Schasinglulu } 498*91f16700Schasinglulu 499*91f16700Schasinglulu return (void *)0; 500*91f16700Schasinglulu } 501*91f16700Schasinglulu 502*91f16700Schasinglulu static void *amu_context_restore(const void *arg) 503*91f16700Schasinglulu { 504*91f16700Schasinglulu uint64_t i, j; 505*91f16700Schasinglulu 506*91f16700Schasinglulu unsigned int core_pos; 507*91f16700Schasinglulu struct amu_ctx *ctx; 508*91f16700Schasinglulu 509*91f16700Schasinglulu uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 510*91f16700Schasinglulu 511*91f16700Schasinglulu uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 512*91f16700Schasinglulu 513*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 514*91f16700Schasinglulu uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 515*91f16700Schasinglulu uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 516*91f16700Schasinglulu uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 517*91f16700Schasinglulu #endif 518*91f16700Schasinglulu 519*91f16700Schasinglulu if (!is_feat_amu_supported()) { 520*91f16700Schasinglulu return (void *)0; 521*91f16700Schasinglulu } 522*91f16700Schasinglulu 523*91f16700Schasinglulu core_pos = plat_my_core_pos(); 524*91f16700Schasinglulu ctx = &amu_ctxs_[core_pos]; 525*91f16700Schasinglulu 526*91f16700Schasinglulu amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 527*91f16700Schasinglulu 528*91f16700Schasinglulu if (is_feat_amuv1p1_supported()) { 529*91f16700Schasinglulu hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 530*91f16700Schasinglulu } 531*91f16700Schasinglulu 532*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 533*91f16700Schasinglulu amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 534*91f16700Schasinglulu amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 535*91f16700Schasinglulu amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 536*91f16700Schasinglulu #endif 537*91f16700Schasinglulu 538*91f16700Schasinglulu /* 539*91f16700Schasinglulu * Restore the counter values from the local context. 540*91f16700Schasinglulu */ 541*91f16700Schasinglulu 542*91f16700Schasinglulu for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 543*91f16700Schasinglulu amu_group0_cnt_write(i, ctx->group0_cnts[i]); 544*91f16700Schasinglulu } 545*91f16700Schasinglulu 546*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 547*91f16700Schasinglulu for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 548*91f16700Schasinglulu amu_group1_cnt_write(i, ctx->group1_cnts[i]); 549*91f16700Schasinglulu } 550*91f16700Schasinglulu #endif 551*91f16700Schasinglulu 552*91f16700Schasinglulu /* 553*91f16700Schasinglulu * Restore virtual offsets for counters that offer them. 554*91f16700Schasinglulu */ 555*91f16700Schasinglulu 556*91f16700Schasinglulu if (hcr_el2_amvoffen != 0U) { 557*91f16700Schasinglulu for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 558*91f16700Schasinglulu if (!amu_group0_voffset_supported(i)) { 559*91f16700Schasinglulu continue; /* No virtual offset */ 560*91f16700Schasinglulu } 561*91f16700Schasinglulu 562*91f16700Schasinglulu amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); 563*91f16700Schasinglulu } 564*91f16700Schasinglulu 565*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 566*91f16700Schasinglulu for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 567*91f16700Schasinglulu if ((amcg1idr_el0_voff >> i) & 1U) { 568*91f16700Schasinglulu continue; /* No virtual offset */ 569*91f16700Schasinglulu } 570*91f16700Schasinglulu 571*91f16700Schasinglulu amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); 572*91f16700Schasinglulu } 573*91f16700Schasinglulu #endif 574*91f16700Schasinglulu } 575*91f16700Schasinglulu 576*91f16700Schasinglulu /* 577*91f16700Schasinglulu * Re-enable counters that were disabled during context save. 578*91f16700Schasinglulu */ 579*91f16700Schasinglulu 580*91f16700Schasinglulu write_amcntenset0_el0_px(ctx->group0_enable); 581*91f16700Schasinglulu 582*91f16700Schasinglulu #if ENABLE_AMU_AUXILIARY_COUNTERS 583*91f16700Schasinglulu if (amcfgr_el0_ncg > 0) { 584*91f16700Schasinglulu write_amcntenset1_el0_px(ctx->group1_enable); 585*91f16700Schasinglulu } 586*91f16700Schasinglulu #endif 587*91f16700Schasinglulu 588*91f16700Schasinglulu #if ENABLE_MPMM 589*91f16700Schasinglulu mpmm_enable(); 590*91f16700Schasinglulu #endif 591*91f16700Schasinglulu 592*91f16700Schasinglulu return (void *)0; 593*91f16700Schasinglulu } 594*91f16700Schasinglulu 595*91f16700Schasinglulu SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 596*91f16700Schasinglulu SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 597