xref: /arm-trusted-firmware/lib/el3_runtime/aarch64/context_mgmt.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
4*91f16700Schasinglulu  *
5*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
6*91f16700Schasinglulu  */
7*91f16700Schasinglulu 
8*91f16700Schasinglulu #include <assert.h>
9*91f16700Schasinglulu #include <stdbool.h>
10*91f16700Schasinglulu #include <string.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu #include <platform_def.h>
13*91f16700Schasinglulu 
14*91f16700Schasinglulu #include <arch.h>
15*91f16700Schasinglulu #include <arch_helpers.h>
16*91f16700Schasinglulu #include <arch_features.h>
17*91f16700Schasinglulu #include <bl31/interrupt_mgmt.h>
18*91f16700Schasinglulu #include <common/bl_common.h>
19*91f16700Schasinglulu #include <common/debug.h>
20*91f16700Schasinglulu #include <context.h>
21*91f16700Schasinglulu #include <drivers/arm/gicv3.h>
22*91f16700Schasinglulu #include <lib/el3_runtime/context_mgmt.h>
23*91f16700Schasinglulu #include <lib/el3_runtime/cpu_data.h>
24*91f16700Schasinglulu #include <lib/el3_runtime/pubsub_events.h>
25*91f16700Schasinglulu #include <lib/extensions/amu.h>
26*91f16700Schasinglulu #include <lib/extensions/brbe.h>
27*91f16700Schasinglulu #include <lib/extensions/mpam.h>
28*91f16700Schasinglulu #include <lib/extensions/pmuv3.h>
29*91f16700Schasinglulu #include <lib/extensions/sme.h>
30*91f16700Schasinglulu #include <lib/extensions/spe.h>
31*91f16700Schasinglulu #include <lib/extensions/sve.h>
32*91f16700Schasinglulu #include <lib/extensions/sys_reg_trace.h>
33*91f16700Schasinglulu #include <lib/extensions/trbe.h>
34*91f16700Schasinglulu #include <lib/extensions/trf.h>
35*91f16700Schasinglulu #include <lib/utils.h>
36*91f16700Schasinglulu 
37*91f16700Schasinglulu #if ENABLE_FEAT_TWED
38*91f16700Schasinglulu /* Make sure delay value fits within the range(0-15) */
39*91f16700Schasinglulu CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
40*91f16700Schasinglulu #endif /* ENABLE_FEAT_TWED */
41*91f16700Schasinglulu 
42*91f16700Schasinglulu per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
43*91f16700Schasinglulu static bool has_secure_perworld_init;
44*91f16700Schasinglulu 
45*91f16700Schasinglulu static void manage_extensions_nonsecure(cpu_context_t *ctx);
46*91f16700Schasinglulu static void manage_extensions_secure(cpu_context_t *ctx);
47*91f16700Schasinglulu static void manage_extensions_secure_per_world(void);
48*91f16700Schasinglulu 
49*91f16700Schasinglulu static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
50*91f16700Schasinglulu {
51*91f16700Schasinglulu 	u_register_t sctlr_elx, actlr_elx;
52*91f16700Schasinglulu 
53*91f16700Schasinglulu 	/*
54*91f16700Schasinglulu 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
55*91f16700Schasinglulu 	 * execution state setting all fields rather than relying on the hw.
56*91f16700Schasinglulu 	 * Some fields have architecturally UNKNOWN reset values and these are
57*91f16700Schasinglulu 	 * set to zero.
58*91f16700Schasinglulu 	 *
59*91f16700Schasinglulu 	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
60*91f16700Schasinglulu 	 *
61*91f16700Schasinglulu 	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
62*91f16700Schasinglulu 	 * required by PSCI specification)
63*91f16700Schasinglulu 	 */
64*91f16700Schasinglulu 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
65*91f16700Schasinglulu 	if (GET_RW(ep->spsr) == MODE_RW_64) {
66*91f16700Schasinglulu 		sctlr_elx |= SCTLR_EL1_RES1;
67*91f16700Schasinglulu 	} else {
68*91f16700Schasinglulu 		/*
69*91f16700Schasinglulu 		 * If the target execution state is AArch32 then the following
70*91f16700Schasinglulu 		 * fields need to be set.
71*91f16700Schasinglulu 		 *
72*91f16700Schasinglulu 		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
73*91f16700Schasinglulu 		 *  instructions are not trapped to EL1.
74*91f16700Schasinglulu 		 *
75*91f16700Schasinglulu 		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
76*91f16700Schasinglulu 		 *  instructions are not trapped to EL1.
77*91f16700Schasinglulu 		 *
78*91f16700Schasinglulu 		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
79*91f16700Schasinglulu 		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
80*91f16700Schasinglulu 		 */
81*91f16700Schasinglulu 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
82*91f16700Schasinglulu 					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
83*91f16700Schasinglulu 	}
84*91f16700Schasinglulu 
85*91f16700Schasinglulu #if ERRATA_A75_764081
86*91f16700Schasinglulu 	/*
87*91f16700Schasinglulu 	 * If workaround of errata 764081 for Cortex-A75 is used then set
88*91f16700Schasinglulu 	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
89*91f16700Schasinglulu 	 */
90*91f16700Schasinglulu 	sctlr_elx |= SCTLR_IESB_BIT;
91*91f16700Schasinglulu #endif
92*91f16700Schasinglulu 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
93*91f16700Schasinglulu 	write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
94*91f16700Schasinglulu 
95*91f16700Schasinglulu 	/*
96*91f16700Schasinglulu 	 * Base the context ACTLR_EL1 on the current value, as it is
97*91f16700Schasinglulu 	 * implementation defined. The context restore process will write
98*91f16700Schasinglulu 	 * the value from the context to the actual register and can cause
99*91f16700Schasinglulu 	 * problems for processor cores that don't expect certain bits to
100*91f16700Schasinglulu 	 * be zero.
101*91f16700Schasinglulu 	 */
102*91f16700Schasinglulu 	actlr_elx = read_actlr_el1();
103*91f16700Schasinglulu 	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
104*91f16700Schasinglulu }
105*91f16700Schasinglulu 
106*91f16700Schasinglulu /******************************************************************************
107*91f16700Schasinglulu  * This function performs initializations that are specific to SECURE state
108*91f16700Schasinglulu  * and updates the cpu context specified by 'ctx'.
109*91f16700Schasinglulu  *****************************************************************************/
110*91f16700Schasinglulu static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
111*91f16700Schasinglulu {
112*91f16700Schasinglulu 	u_register_t scr_el3;
113*91f16700Schasinglulu 	el3_state_t *state;
114*91f16700Schasinglulu 
115*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
116*91f16700Schasinglulu 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
117*91f16700Schasinglulu 
118*91f16700Schasinglulu #if defined(IMAGE_BL31) && !defined(SPD_spmd)
119*91f16700Schasinglulu 	/*
120*91f16700Schasinglulu 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
121*91f16700Schasinglulu 	 * indicated by the interrupt routing model for BL31.
122*91f16700Schasinglulu 	 */
123*91f16700Schasinglulu 	scr_el3 |= get_scr_el3_from_routing_model(SECURE);
124*91f16700Schasinglulu #endif
125*91f16700Schasinglulu 
126*91f16700Schasinglulu #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
127*91f16700Schasinglulu 	/* Get Memory Tagging Extension support level */
128*91f16700Schasinglulu 	unsigned int mte = get_armv8_5_mte_support();
129*91f16700Schasinglulu #endif
130*91f16700Schasinglulu 	/*
131*91f16700Schasinglulu 	 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS
132*91f16700Schasinglulu 	 * is set, or when MTE is only implemented at EL0.
133*91f16700Schasinglulu 	 */
134*91f16700Schasinglulu #if CTX_INCLUDE_MTE_REGS
135*91f16700Schasinglulu 	assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
136*91f16700Schasinglulu 	scr_el3 |= SCR_ATA_BIT;
137*91f16700Schasinglulu #else
138*91f16700Schasinglulu 	if (mte == MTE_IMPLEMENTED_EL0) {
139*91f16700Schasinglulu 		scr_el3 |= SCR_ATA_BIT;
140*91f16700Schasinglulu 	}
141*91f16700Schasinglulu #endif /* CTX_INCLUDE_MTE_REGS */
142*91f16700Schasinglulu 
143*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
144*91f16700Schasinglulu 
145*91f16700Schasinglulu 	/*
146*91f16700Schasinglulu 	 * Initialize EL1 context registers unless SPMC is running
147*91f16700Schasinglulu 	 * at S-EL2.
148*91f16700Schasinglulu 	 */
149*91f16700Schasinglulu #if !SPMD_SPM_AT_SEL2
150*91f16700Schasinglulu 	setup_el1_context(ctx, ep);
151*91f16700Schasinglulu #endif
152*91f16700Schasinglulu 
153*91f16700Schasinglulu 	manage_extensions_secure(ctx);
154*91f16700Schasinglulu 
155*91f16700Schasinglulu 	/**
156*91f16700Schasinglulu 	 * manage_extensions_secure_per_world api has to be executed once,
157*91f16700Schasinglulu 	 * as the registers getting initialised, maintain constant value across
158*91f16700Schasinglulu 	 * all the cpus for the secure world.
159*91f16700Schasinglulu 	 * Henceforth, this check ensures that the registers are initialised once
160*91f16700Schasinglulu 	 * and avoids re-initialization from multiple cores.
161*91f16700Schasinglulu 	 */
162*91f16700Schasinglulu 	if (!has_secure_perworld_init) {
163*91f16700Schasinglulu 		manage_extensions_secure_per_world();
164*91f16700Schasinglulu 	}
165*91f16700Schasinglulu 
166*91f16700Schasinglulu }
167*91f16700Schasinglulu 
168*91f16700Schasinglulu #if ENABLE_RME
169*91f16700Schasinglulu /******************************************************************************
170*91f16700Schasinglulu  * This function performs initializations that are specific to REALM state
171*91f16700Schasinglulu  * and updates the cpu context specified by 'ctx'.
172*91f16700Schasinglulu  *****************************************************************************/
173*91f16700Schasinglulu static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
174*91f16700Schasinglulu {
175*91f16700Schasinglulu 	u_register_t scr_el3;
176*91f16700Schasinglulu 	el3_state_t *state;
177*91f16700Schasinglulu 
178*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
179*91f16700Schasinglulu 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
180*91f16700Schasinglulu 
181*91f16700Schasinglulu 	scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
182*91f16700Schasinglulu 
183*91f16700Schasinglulu 	if (is_feat_csv2_2_supported()) {
184*91f16700Schasinglulu 		/* Enable access to the SCXTNUM_ELx registers. */
185*91f16700Schasinglulu 		scr_el3 |= SCR_EnSCXT_BIT;
186*91f16700Schasinglulu 	}
187*91f16700Schasinglulu 
188*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
189*91f16700Schasinglulu }
190*91f16700Schasinglulu #endif /* ENABLE_RME */
191*91f16700Schasinglulu 
192*91f16700Schasinglulu /******************************************************************************
193*91f16700Schasinglulu  * This function performs initializations that are specific to NON-SECURE state
194*91f16700Schasinglulu  * and updates the cpu context specified by 'ctx'.
195*91f16700Schasinglulu  *****************************************************************************/
196*91f16700Schasinglulu static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
197*91f16700Schasinglulu {
198*91f16700Schasinglulu 	u_register_t scr_el3;
199*91f16700Schasinglulu 	el3_state_t *state;
200*91f16700Schasinglulu 
201*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
202*91f16700Schasinglulu 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
203*91f16700Schasinglulu 
204*91f16700Schasinglulu 	/* SCR_NS: Set the NS bit */
205*91f16700Schasinglulu 	scr_el3 |= SCR_NS_BIT;
206*91f16700Schasinglulu 
207*91f16700Schasinglulu 	/* Allow access to Allocation Tags when MTE is implemented. */
208*91f16700Schasinglulu 	scr_el3 |= SCR_ATA_BIT;
209*91f16700Schasinglulu 
210*91f16700Schasinglulu #if !CTX_INCLUDE_PAUTH_REGS
211*91f16700Schasinglulu 	/*
212*91f16700Schasinglulu 	 * Pointer Authentication feature, if present, is always enabled by default
213*91f16700Schasinglulu 	 * for Non secure lower exception levels. We do not have an explicit
214*91f16700Schasinglulu 	 * flag to set it.
215*91f16700Schasinglulu 	 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower
216*91f16700Schasinglulu 	 * exception levels of secure and realm worlds.
217*91f16700Schasinglulu 	 *
218*91f16700Schasinglulu 	 * To prevent the leakage between the worlds during world switch,
219*91f16700Schasinglulu 	 * we enable it only for the non-secure world.
220*91f16700Schasinglulu 	 *
221*91f16700Schasinglulu 	 * If the Secure/realm world wants to use pointer authentication,
222*91f16700Schasinglulu 	 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case
223*91f16700Schasinglulu 	 * it will be enabled globally for all the contexts.
224*91f16700Schasinglulu 	 *
225*91f16700Schasinglulu 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
226*91f16700Schasinglulu 	 *  other than EL3
227*91f16700Schasinglulu 	 *
228*91f16700Schasinglulu 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
229*91f16700Schasinglulu 	 *  than EL3
230*91f16700Schasinglulu 	 */
231*91f16700Schasinglulu 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
232*91f16700Schasinglulu 
233*91f16700Schasinglulu #endif /* CTX_INCLUDE_PAUTH_REGS */
234*91f16700Schasinglulu 
235*91f16700Schasinglulu #if HANDLE_EA_EL3_FIRST_NS
236*91f16700Schasinglulu 	/* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
237*91f16700Schasinglulu 	scr_el3 |= SCR_EA_BIT;
238*91f16700Schasinglulu #endif
239*91f16700Schasinglulu 
240*91f16700Schasinglulu #if RAS_TRAP_NS_ERR_REC_ACCESS
241*91f16700Schasinglulu 	/*
242*91f16700Schasinglulu 	 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
243*91f16700Schasinglulu 	 * and RAS ERX registers from EL1 and EL2(from any security state)
244*91f16700Schasinglulu 	 * are trapped to EL3.
245*91f16700Schasinglulu 	 * Set here to trap only for NS EL1/EL2
246*91f16700Schasinglulu 	 *
247*91f16700Schasinglulu 	 */
248*91f16700Schasinglulu 	scr_el3 |= SCR_TERR_BIT;
249*91f16700Schasinglulu #endif
250*91f16700Schasinglulu 
251*91f16700Schasinglulu 	if (is_feat_csv2_2_supported()) {
252*91f16700Schasinglulu 		/* Enable access to the SCXTNUM_ELx registers. */
253*91f16700Schasinglulu 		scr_el3 |= SCR_EnSCXT_BIT;
254*91f16700Schasinglulu 	}
255*91f16700Schasinglulu 
256*91f16700Schasinglulu #ifdef IMAGE_BL31
257*91f16700Schasinglulu 	/*
258*91f16700Schasinglulu 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
259*91f16700Schasinglulu 	 *  indicated by the interrupt routing model for BL31.
260*91f16700Schasinglulu 	 */
261*91f16700Schasinglulu 	scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
262*91f16700Schasinglulu #endif
263*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
264*91f16700Schasinglulu 
265*91f16700Schasinglulu 	/* Initialize EL1 context registers */
266*91f16700Schasinglulu 	setup_el1_context(ctx, ep);
267*91f16700Schasinglulu 
268*91f16700Schasinglulu 	/* Initialize EL2 context registers */
269*91f16700Schasinglulu #if CTX_INCLUDE_EL2_REGS
270*91f16700Schasinglulu 
271*91f16700Schasinglulu 	/*
272*91f16700Schasinglulu 	 * Initialize SCTLR_EL2 context register using Endianness value
273*91f16700Schasinglulu 	 * taken from the entrypoint attribute.
274*91f16700Schasinglulu 	 */
275*91f16700Schasinglulu 	u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
276*91f16700Schasinglulu 	sctlr_el2 |= SCTLR_EL2_RES1;
277*91f16700Schasinglulu 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2,
278*91f16700Schasinglulu 			sctlr_el2);
279*91f16700Schasinglulu 
280*91f16700Schasinglulu 	if (is_feat_hcx_supported()) {
281*91f16700Schasinglulu 		/*
282*91f16700Schasinglulu 		 * Initialize register HCRX_EL2 with its init value.
283*91f16700Schasinglulu 		 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a
284*91f16700Schasinglulu 		 * chance that this can lead to unexpected behavior in lower
285*91f16700Schasinglulu 		 * ELs that have not been updated since the introduction of
286*91f16700Schasinglulu 		 * this feature if not properly initialized, especially when
287*91f16700Schasinglulu 		 * it comes to those bits that enable/disable traps.
288*91f16700Schasinglulu 		 */
289*91f16700Schasinglulu 		write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2,
290*91f16700Schasinglulu 			HCRX_EL2_INIT_VAL);
291*91f16700Schasinglulu 	}
292*91f16700Schasinglulu 
293*91f16700Schasinglulu 	if (is_feat_fgt_supported()) {
294*91f16700Schasinglulu 		/*
295*91f16700Schasinglulu 		 * Initialize HFG*_EL2 registers with a default value so legacy
296*91f16700Schasinglulu 		 * systems unaware of FEAT_FGT do not get trapped due to their lack
297*91f16700Schasinglulu 		 * of initialization for this feature.
298*91f16700Schasinglulu 		 */
299*91f16700Schasinglulu 		write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGITR_EL2,
300*91f16700Schasinglulu 			HFGITR_EL2_INIT_VAL);
301*91f16700Schasinglulu 		write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGRTR_EL2,
302*91f16700Schasinglulu 			HFGRTR_EL2_INIT_VAL);
303*91f16700Schasinglulu 		write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGWTR_EL2,
304*91f16700Schasinglulu 			HFGWTR_EL2_INIT_VAL);
305*91f16700Schasinglulu 	}
306*91f16700Schasinglulu #endif /* CTX_INCLUDE_EL2_REGS */
307*91f16700Schasinglulu 
308*91f16700Schasinglulu 	manage_extensions_nonsecure(ctx);
309*91f16700Schasinglulu }
310*91f16700Schasinglulu 
311*91f16700Schasinglulu /*******************************************************************************
312*91f16700Schasinglulu  * The following function performs initialization of the cpu_context 'ctx'
313*91f16700Schasinglulu  * for first use that is common to all security states, and sets the
314*91f16700Schasinglulu  * initial entrypoint state as specified by the entry_point_info structure.
315*91f16700Schasinglulu  *
316*91f16700Schasinglulu  * The EE and ST attributes are used to configure the endianness and secure
317*91f16700Schasinglulu  * timer availability for the new execution context.
318*91f16700Schasinglulu  ******************************************************************************/
319*91f16700Schasinglulu static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
320*91f16700Schasinglulu {
321*91f16700Schasinglulu 	u_register_t scr_el3;
322*91f16700Schasinglulu 	el3_state_t *state;
323*91f16700Schasinglulu 	gp_regs_t *gp_regs;
324*91f16700Schasinglulu 
325*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
326*91f16700Schasinglulu 
327*91f16700Schasinglulu 	/* Clear any residual register values from the context */
328*91f16700Schasinglulu 	zeromem(ctx, sizeof(*ctx));
329*91f16700Schasinglulu 
330*91f16700Schasinglulu 	/*
331*91f16700Schasinglulu 	 * The lower-EL context is zeroed so that no stale values leak to a world.
332*91f16700Schasinglulu 	 * It is assumed that an all-zero lower-EL context is good enough for it
333*91f16700Schasinglulu 	 * to boot correctly. However, there are very few registers where this
334*91f16700Schasinglulu 	 * is not true and some values need to be recreated.
335*91f16700Schasinglulu 	 */
336*91f16700Schasinglulu #if CTX_INCLUDE_EL2_REGS
337*91f16700Schasinglulu 	el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx);
338*91f16700Schasinglulu 
339*91f16700Schasinglulu 	/*
340*91f16700Schasinglulu 	 * These bits are set in the gicv3 driver. Losing them (especially the
341*91f16700Schasinglulu 	 * SRE bit) is problematic for all worlds. Henceforth recreate them.
342*91f16700Schasinglulu 	 */
343*91f16700Schasinglulu 	u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
344*91f16700Schasinglulu 				   ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
345*91f16700Schasinglulu 	write_ctx_reg(el2_ctx, CTX_ICC_SRE_EL2, icc_sre_el2);
346*91f16700Schasinglulu #endif /* CTX_INCLUDE_EL2_REGS */
347*91f16700Schasinglulu 
348*91f16700Schasinglulu 	/* Start with a clean SCR_EL3 copy as all relevant values are set */
349*91f16700Schasinglulu 	scr_el3 = SCR_RESET_VAL;
350*91f16700Schasinglulu 
351*91f16700Schasinglulu 	/*
352*91f16700Schasinglulu 	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
353*91f16700Schasinglulu 	 *  EL2, EL1 and EL0 are not trapped to EL3.
354*91f16700Schasinglulu 	 *
355*91f16700Schasinglulu 	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
356*91f16700Schasinglulu 	 *  EL2, EL1 and EL0 are not trapped to EL3.
357*91f16700Schasinglulu 	 *
358*91f16700Schasinglulu 	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
359*91f16700Schasinglulu 	 *  both Security states and both Execution states.
360*91f16700Schasinglulu 	 *
361*91f16700Schasinglulu 	 * SCR_EL3.SIF: Set to one to disable secure instruction execution from
362*91f16700Schasinglulu 	 *  Non-secure memory.
363*91f16700Schasinglulu 	 */
364*91f16700Schasinglulu 	scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT);
365*91f16700Schasinglulu 
366*91f16700Schasinglulu 	scr_el3 |= SCR_SIF_BIT;
367*91f16700Schasinglulu 
368*91f16700Schasinglulu 	/*
369*91f16700Schasinglulu 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
370*91f16700Schasinglulu 	 *  Exception level as specified by SPSR.
371*91f16700Schasinglulu 	 */
372*91f16700Schasinglulu 	if (GET_RW(ep->spsr) == MODE_RW_64) {
373*91f16700Schasinglulu 		scr_el3 |= SCR_RW_BIT;
374*91f16700Schasinglulu 	}
375*91f16700Schasinglulu 
376*91f16700Schasinglulu 	/*
377*91f16700Schasinglulu 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
378*91f16700Schasinglulu 	 * Secure timer registers to EL3, from AArch64 state only, if specified
379*91f16700Schasinglulu 	 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
380*91f16700Schasinglulu 	 * bit always behaves as 1 (i.e. secure physical timer register access
381*91f16700Schasinglulu 	 * is not trapped)
382*91f16700Schasinglulu 	 */
383*91f16700Schasinglulu 	if (EP_GET_ST(ep->h.attr) != 0U) {
384*91f16700Schasinglulu 		scr_el3 |= SCR_ST_BIT;
385*91f16700Schasinglulu 	}
386*91f16700Schasinglulu 
387*91f16700Schasinglulu 	/*
388*91f16700Schasinglulu 	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
389*91f16700Schasinglulu 	 * SCR_EL3.HXEn.
390*91f16700Schasinglulu 	 */
391*91f16700Schasinglulu 	if (is_feat_hcx_supported()) {
392*91f16700Schasinglulu 		scr_el3 |= SCR_HXEn_BIT;
393*91f16700Schasinglulu 	}
394*91f16700Schasinglulu 
395*91f16700Schasinglulu 	/*
396*91f16700Schasinglulu 	 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS
397*91f16700Schasinglulu 	 * registers are trapped to EL3.
398*91f16700Schasinglulu 	 */
399*91f16700Schasinglulu #if ENABLE_FEAT_RNG_TRAP
400*91f16700Schasinglulu 	scr_el3 |= SCR_TRNDR_BIT;
401*91f16700Schasinglulu #endif
402*91f16700Schasinglulu 
403*91f16700Schasinglulu #if FAULT_INJECTION_SUPPORT
404*91f16700Schasinglulu 	/* Enable fault injection from lower ELs */
405*91f16700Schasinglulu 	scr_el3 |= SCR_FIEN_BIT;
406*91f16700Schasinglulu #endif
407*91f16700Schasinglulu 
408*91f16700Schasinglulu #if CTX_INCLUDE_PAUTH_REGS
409*91f16700Schasinglulu 	/*
410*91f16700Schasinglulu 	 * Enable Pointer Authentication globally for all the worlds.
411*91f16700Schasinglulu 	 *
412*91f16700Schasinglulu 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
413*91f16700Schasinglulu 	 *  other than EL3
414*91f16700Schasinglulu 	 *
415*91f16700Schasinglulu 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
416*91f16700Schasinglulu 	 *  than EL3
417*91f16700Schasinglulu 	 */
418*91f16700Schasinglulu 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
419*91f16700Schasinglulu #endif /* CTX_INCLUDE_PAUTH_REGS */
420*91f16700Schasinglulu 
421*91f16700Schasinglulu 	/*
422*91f16700Schasinglulu 	 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
423*91f16700Schasinglulu 	 */
424*91f16700Schasinglulu 	if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) {
425*91f16700Schasinglulu 		scr_el3 |= SCR_TCR2EN_BIT;
426*91f16700Schasinglulu 	}
427*91f16700Schasinglulu 
428*91f16700Schasinglulu 	/*
429*91f16700Schasinglulu 	 * SCR_EL3.PIEN: Enable permission indirection and overlay
430*91f16700Schasinglulu 	 * registers for AArch64 if present.
431*91f16700Schasinglulu 	 */
432*91f16700Schasinglulu 	if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) {
433*91f16700Schasinglulu 		scr_el3 |= SCR_PIEN_BIT;
434*91f16700Schasinglulu 	}
435*91f16700Schasinglulu 
436*91f16700Schasinglulu 	/*
437*91f16700Schasinglulu 	 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present.
438*91f16700Schasinglulu 	 */
439*91f16700Schasinglulu 	if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) {
440*91f16700Schasinglulu 		scr_el3 |= SCR_GCSEn_BIT;
441*91f16700Schasinglulu 	}
442*91f16700Schasinglulu 
443*91f16700Schasinglulu 	/*
444*91f16700Schasinglulu 	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
445*91f16700Schasinglulu 	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
446*91f16700Schasinglulu 	 * next mode is Hyp.
447*91f16700Schasinglulu 	 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
448*91f16700Schasinglulu 	 * same conditions as HVC instructions and when the processor supports
449*91f16700Schasinglulu 	 * ARMv8.6-FGT.
450*91f16700Schasinglulu 	 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
451*91f16700Schasinglulu 	 * CNTPOFF_EL2 register under the same conditions as HVC instructions
452*91f16700Schasinglulu 	 * and when the processor supports ECV.
453*91f16700Schasinglulu 	 */
454*91f16700Schasinglulu 	if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
455*91f16700Schasinglulu 	    || ((GET_RW(ep->spsr) != MODE_RW_64)
456*91f16700Schasinglulu 		&& (GET_M32(ep->spsr) == MODE32_hyp))) {
457*91f16700Schasinglulu 		scr_el3 |= SCR_HCE_BIT;
458*91f16700Schasinglulu 
459*91f16700Schasinglulu 		if (is_feat_fgt_supported()) {
460*91f16700Schasinglulu 			scr_el3 |= SCR_FGTEN_BIT;
461*91f16700Schasinglulu 		}
462*91f16700Schasinglulu 
463*91f16700Schasinglulu 		if (is_feat_ecv_supported()) {
464*91f16700Schasinglulu 			scr_el3 |= SCR_ECVEN_BIT;
465*91f16700Schasinglulu 		}
466*91f16700Schasinglulu 	}
467*91f16700Schasinglulu 
468*91f16700Schasinglulu 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
469*91f16700Schasinglulu 	if (is_feat_twed_supported()) {
470*91f16700Schasinglulu 		/* Set delay in SCR_EL3 */
471*91f16700Schasinglulu 		scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
472*91f16700Schasinglulu 		scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK)
473*91f16700Schasinglulu 				<< SCR_TWEDEL_SHIFT);
474*91f16700Schasinglulu 
475*91f16700Schasinglulu 		/* Enable WFE delay */
476*91f16700Schasinglulu 		scr_el3 |= SCR_TWEDEn_BIT;
477*91f16700Schasinglulu 	}
478*91f16700Schasinglulu 
479*91f16700Schasinglulu #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
480*91f16700Schasinglulu 	/* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */
481*91f16700Schasinglulu 	if (is_feat_sel2_supported()) {
482*91f16700Schasinglulu 		scr_el3 |= SCR_EEL2_BIT;
483*91f16700Schasinglulu 	}
484*91f16700Schasinglulu #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */
485*91f16700Schasinglulu 
486*91f16700Schasinglulu 	if (is_feat_mpam_supported()) {
487*91f16700Schasinglulu 		write_ctx_reg(get_el3state_ctx(ctx), CTX_MPAM3_EL3, \
488*91f16700Schasinglulu 				MPAM3_EL3_RESET_VAL);
489*91f16700Schasinglulu 	}
490*91f16700Schasinglulu 
491*91f16700Schasinglulu 	/*
492*91f16700Schasinglulu 	 * Populate EL3 state so that we've the right context
493*91f16700Schasinglulu 	 * before doing ERET
494*91f16700Schasinglulu 	 */
495*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
496*91f16700Schasinglulu 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
497*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
498*91f16700Schasinglulu 
499*91f16700Schasinglulu 	/*
500*91f16700Schasinglulu 	 * Store the X0-X7 value from the entrypoint into the context
501*91f16700Schasinglulu 	 * Use memcpy as we are in control of the layout of the structures
502*91f16700Schasinglulu 	 */
503*91f16700Schasinglulu 	gp_regs = get_gpregs_ctx(ctx);
504*91f16700Schasinglulu 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
505*91f16700Schasinglulu }
506*91f16700Schasinglulu 
507*91f16700Schasinglulu /*******************************************************************************
508*91f16700Schasinglulu  * Context management library initialization routine. This library is used by
509*91f16700Schasinglulu  * runtime services to share pointers to 'cpu_context' structures for secure
510*91f16700Schasinglulu  * non-secure and realm states. Management of the structures and their associated
511*91f16700Schasinglulu  * memory is not done by the context management library e.g. the PSCI service
512*91f16700Schasinglulu  * manages the cpu context used for entry from and exit to the non-secure state.
513*91f16700Schasinglulu  * The Secure payload dispatcher service manages the context(s) corresponding to
514*91f16700Schasinglulu  * the secure state. It also uses this library to get access to the non-secure
515*91f16700Schasinglulu  * state cpu context pointers.
516*91f16700Schasinglulu  * Lastly, this library provides the API to make SP_EL3 point to the cpu context
517*91f16700Schasinglulu  * which will be used for programming an entry into a lower EL. The same context
518*91f16700Schasinglulu  * will be used to save state upon exception entry from that EL.
519*91f16700Schasinglulu  ******************************************************************************/
520*91f16700Schasinglulu void __init cm_init(void)
521*91f16700Schasinglulu {
522*91f16700Schasinglulu 	/*
523*91f16700Schasinglulu 	 * The context management library has only global data to initialize, but
524*91f16700Schasinglulu 	 * that will be done when the BSS is zeroed out.
525*91f16700Schasinglulu 	 */
526*91f16700Schasinglulu }
527*91f16700Schasinglulu 
528*91f16700Schasinglulu /*******************************************************************************
529*91f16700Schasinglulu  * This is the high-level function used to initialize the cpu_context 'ctx' for
530*91f16700Schasinglulu  * first use. It performs initializations that are common to all security states
531*91f16700Schasinglulu  * and initializations specific to the security state specified in 'ep'
532*91f16700Schasinglulu  ******************************************************************************/
533*91f16700Schasinglulu void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
534*91f16700Schasinglulu {
535*91f16700Schasinglulu 	unsigned int security_state;
536*91f16700Schasinglulu 
537*91f16700Schasinglulu 	assert(ctx != NULL);
538*91f16700Schasinglulu 
539*91f16700Schasinglulu 	/*
540*91f16700Schasinglulu 	 * Perform initializations that are common
541*91f16700Schasinglulu 	 * to all security states
542*91f16700Schasinglulu 	 */
543*91f16700Schasinglulu 	setup_context_common(ctx, ep);
544*91f16700Schasinglulu 
545*91f16700Schasinglulu 	security_state = GET_SECURITY_STATE(ep->h.attr);
546*91f16700Schasinglulu 
547*91f16700Schasinglulu 	/* Perform security state specific initializations */
548*91f16700Schasinglulu 	switch (security_state) {
549*91f16700Schasinglulu 	case SECURE:
550*91f16700Schasinglulu 		setup_secure_context(ctx, ep);
551*91f16700Schasinglulu 		break;
552*91f16700Schasinglulu #if ENABLE_RME
553*91f16700Schasinglulu 	case REALM:
554*91f16700Schasinglulu 		setup_realm_context(ctx, ep);
555*91f16700Schasinglulu 		break;
556*91f16700Schasinglulu #endif
557*91f16700Schasinglulu 	case NON_SECURE:
558*91f16700Schasinglulu 		setup_ns_context(ctx, ep);
559*91f16700Schasinglulu 		break;
560*91f16700Schasinglulu 	default:
561*91f16700Schasinglulu 		ERROR("Invalid security state\n");
562*91f16700Schasinglulu 		panic();
563*91f16700Schasinglulu 		break;
564*91f16700Schasinglulu 	}
565*91f16700Schasinglulu }
566*91f16700Schasinglulu 
567*91f16700Schasinglulu /*******************************************************************************
568*91f16700Schasinglulu  * Enable architecture extensions for EL3 execution. This function only updates
569*91f16700Schasinglulu  * registers in-place which are expected to either never change or be
570*91f16700Schasinglulu  * overwritten by el3_exit.
571*91f16700Schasinglulu  ******************************************************************************/
572*91f16700Schasinglulu #if IMAGE_BL31
573*91f16700Schasinglulu void cm_manage_extensions_el3(void)
574*91f16700Schasinglulu {
575*91f16700Schasinglulu 	if (is_feat_spe_supported()) {
576*91f16700Schasinglulu 		spe_init_el3();
577*91f16700Schasinglulu 	}
578*91f16700Schasinglulu 
579*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
580*91f16700Schasinglulu 		amu_init_el3();
581*91f16700Schasinglulu 	}
582*91f16700Schasinglulu 
583*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
584*91f16700Schasinglulu 		sme_init_el3();
585*91f16700Schasinglulu 	}
586*91f16700Schasinglulu 
587*91f16700Schasinglulu 	if (is_feat_trbe_supported()) {
588*91f16700Schasinglulu 		trbe_init_el3();
589*91f16700Schasinglulu 	}
590*91f16700Schasinglulu 
591*91f16700Schasinglulu 	if (is_feat_brbe_supported()) {
592*91f16700Schasinglulu 		brbe_init_el3();
593*91f16700Schasinglulu 	}
594*91f16700Schasinglulu 
595*91f16700Schasinglulu 	if (is_feat_trf_supported()) {
596*91f16700Schasinglulu 		trf_init_el3();
597*91f16700Schasinglulu 	}
598*91f16700Schasinglulu 
599*91f16700Schasinglulu 	pmuv3_init_el3();
600*91f16700Schasinglulu }
601*91f16700Schasinglulu #endif /* IMAGE_BL31 */
602*91f16700Schasinglulu 
603*91f16700Schasinglulu /*******************************************************************************
604*91f16700Schasinglulu  * Initialise per_world_context for Non-Secure world.
605*91f16700Schasinglulu  * This function enables the architecture extensions, which have same value
606*91f16700Schasinglulu  * across the cores for the non-secure world.
607*91f16700Schasinglulu  ******************************************************************************/
608*91f16700Schasinglulu #if IMAGE_BL31
609*91f16700Schasinglulu void manage_extensions_nonsecure_per_world(void)
610*91f16700Schasinglulu {
611*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
612*91f16700Schasinglulu 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
613*91f16700Schasinglulu 	}
614*91f16700Schasinglulu 
615*91f16700Schasinglulu 	if (is_feat_sve_supported()) {
616*91f16700Schasinglulu 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
617*91f16700Schasinglulu 	}
618*91f16700Schasinglulu 
619*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
620*91f16700Schasinglulu 		amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
621*91f16700Schasinglulu 	}
622*91f16700Schasinglulu 
623*91f16700Schasinglulu 	if (is_feat_sys_reg_trace_supported()) {
624*91f16700Schasinglulu 		sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
625*91f16700Schasinglulu 	}
626*91f16700Schasinglulu }
627*91f16700Schasinglulu #endif /* IMAGE_BL31 */
628*91f16700Schasinglulu 
629*91f16700Schasinglulu /*******************************************************************************
630*91f16700Schasinglulu  * Initialise per_world_context for Secure world.
631*91f16700Schasinglulu  * This function enables the architecture extensions, which have same value
632*91f16700Schasinglulu  * across the cores for the secure world.
633*91f16700Schasinglulu  ******************************************************************************/
634*91f16700Schasinglulu 
635*91f16700Schasinglulu static void manage_extensions_secure_per_world(void)
636*91f16700Schasinglulu {
637*91f16700Schasinglulu #if IMAGE_BL31
638*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
639*91f16700Schasinglulu 
640*91f16700Schasinglulu 		if (ENABLE_SME_FOR_SWD) {
641*91f16700Schasinglulu 		/*
642*91f16700Schasinglulu 		 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure
643*91f16700Schasinglulu 		 * SME, SVE, and FPU/SIMD context properly managed.
644*91f16700Schasinglulu 		 */
645*91f16700Schasinglulu 			sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
646*91f16700Schasinglulu 		} else {
647*91f16700Schasinglulu 		/*
648*91f16700Schasinglulu 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
649*91f16700Schasinglulu 		 * world can safely use the associated registers.
650*91f16700Schasinglulu 		 */
651*91f16700Schasinglulu 			sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
652*91f16700Schasinglulu 		}
653*91f16700Schasinglulu 	}
654*91f16700Schasinglulu 	if (is_feat_sve_supported()) {
655*91f16700Schasinglulu 		if (ENABLE_SVE_FOR_SWD) {
656*91f16700Schasinglulu 		/*
657*91f16700Schasinglulu 		 * Enable SVE and FPU in secure context, SPM must ensure
658*91f16700Schasinglulu 		 * that the SVE and FPU register contexts are properly managed.
659*91f16700Schasinglulu 		 */
660*91f16700Schasinglulu 			sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
661*91f16700Schasinglulu 		} else {
662*91f16700Schasinglulu 		/*
663*91f16700Schasinglulu 		 * Disable SVE and FPU in secure context so non-secure world
664*91f16700Schasinglulu 		 * can safely use them.
665*91f16700Schasinglulu 		 */
666*91f16700Schasinglulu 			sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
667*91f16700Schasinglulu 		}
668*91f16700Schasinglulu 	}
669*91f16700Schasinglulu 
670*91f16700Schasinglulu 	/* NS can access this but Secure shouldn't */
671*91f16700Schasinglulu 	if (is_feat_sys_reg_trace_supported()) {
672*91f16700Schasinglulu 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
673*91f16700Schasinglulu 	}
674*91f16700Schasinglulu 
675*91f16700Schasinglulu 	has_secure_perworld_init = true;
676*91f16700Schasinglulu #endif /* IMAGE_BL31 */
677*91f16700Schasinglulu }
678*91f16700Schasinglulu 
679*91f16700Schasinglulu /*******************************************************************************
680*91f16700Schasinglulu  * Enable architecture extensions on first entry to Non-secure world.
681*91f16700Schasinglulu  ******************************************************************************/
682*91f16700Schasinglulu static void manage_extensions_nonsecure(cpu_context_t *ctx)
683*91f16700Schasinglulu {
684*91f16700Schasinglulu #if IMAGE_BL31
685*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
686*91f16700Schasinglulu 		amu_enable(ctx);
687*91f16700Schasinglulu 	}
688*91f16700Schasinglulu 
689*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
690*91f16700Schasinglulu 		sme_enable(ctx);
691*91f16700Schasinglulu 	}
692*91f16700Schasinglulu 
693*91f16700Schasinglulu 	if (is_feat_mpam_supported()) {
694*91f16700Schasinglulu 		mpam_enable(ctx);
695*91f16700Schasinglulu 	}
696*91f16700Schasinglulu 	pmuv3_enable(ctx);
697*91f16700Schasinglulu #endif /* IMAGE_BL31 */
698*91f16700Schasinglulu }
699*91f16700Schasinglulu 
700*91f16700Schasinglulu /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */
701*91f16700Schasinglulu static __unused void enable_pauth_el2(void)
702*91f16700Schasinglulu {
703*91f16700Schasinglulu 	u_register_t hcr_el2 = read_hcr_el2();
704*91f16700Schasinglulu 	/*
705*91f16700Schasinglulu 	 * For Armv8.3 pointer authentication feature, disable traps to EL2 when
706*91f16700Schasinglulu 	 *  accessing key registers or using pointer authentication instructions
707*91f16700Schasinglulu 	 *  from lower ELs.
708*91f16700Schasinglulu 	 */
709*91f16700Schasinglulu 	hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
710*91f16700Schasinglulu 
711*91f16700Schasinglulu 	write_hcr_el2(hcr_el2);
712*91f16700Schasinglulu }
713*91f16700Schasinglulu 
714*91f16700Schasinglulu #if INIT_UNUSED_NS_EL2
715*91f16700Schasinglulu /*******************************************************************************
716*91f16700Schasinglulu  * Enable architecture extensions in-place at EL2 on first entry to Non-secure
717*91f16700Schasinglulu  * world when EL2 is empty and unused.
718*91f16700Schasinglulu  ******************************************************************************/
719*91f16700Schasinglulu static void manage_extensions_nonsecure_el2_unused(void)
720*91f16700Schasinglulu {
721*91f16700Schasinglulu #if IMAGE_BL31
722*91f16700Schasinglulu 	if (is_feat_spe_supported()) {
723*91f16700Schasinglulu 		spe_init_el2_unused();
724*91f16700Schasinglulu 	}
725*91f16700Schasinglulu 
726*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
727*91f16700Schasinglulu 		amu_init_el2_unused();
728*91f16700Schasinglulu 	}
729*91f16700Schasinglulu 
730*91f16700Schasinglulu 	if (is_feat_mpam_supported()) {
731*91f16700Schasinglulu 		mpam_init_el2_unused();
732*91f16700Schasinglulu 	}
733*91f16700Schasinglulu 
734*91f16700Schasinglulu 	if (is_feat_trbe_supported()) {
735*91f16700Schasinglulu 		trbe_init_el2_unused();
736*91f16700Schasinglulu 	}
737*91f16700Schasinglulu 
738*91f16700Schasinglulu 	if (is_feat_sys_reg_trace_supported()) {
739*91f16700Schasinglulu 		sys_reg_trace_init_el2_unused();
740*91f16700Schasinglulu 	}
741*91f16700Schasinglulu 
742*91f16700Schasinglulu 	if (is_feat_trf_supported()) {
743*91f16700Schasinglulu 		trf_init_el2_unused();
744*91f16700Schasinglulu 	}
745*91f16700Schasinglulu 
746*91f16700Schasinglulu 	pmuv3_init_el2_unused();
747*91f16700Schasinglulu 
748*91f16700Schasinglulu 	if (is_feat_sve_supported()) {
749*91f16700Schasinglulu 		sve_init_el2_unused();
750*91f16700Schasinglulu 	}
751*91f16700Schasinglulu 
752*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
753*91f16700Schasinglulu 		sme_init_el2_unused();
754*91f16700Schasinglulu 	}
755*91f16700Schasinglulu 
756*91f16700Schasinglulu #if ENABLE_PAUTH
757*91f16700Schasinglulu 	enable_pauth_el2();
758*91f16700Schasinglulu #endif /* ENABLE_PAUTH */
759*91f16700Schasinglulu #endif /* IMAGE_BL31 */
760*91f16700Schasinglulu }
761*91f16700Schasinglulu #endif /* INIT_UNUSED_NS_EL2 */
762*91f16700Schasinglulu 
763*91f16700Schasinglulu /*******************************************************************************
764*91f16700Schasinglulu  * Enable architecture extensions on first entry to Secure world.
765*91f16700Schasinglulu  ******************************************************************************/
766*91f16700Schasinglulu static void manage_extensions_secure(cpu_context_t *ctx)
767*91f16700Schasinglulu {
768*91f16700Schasinglulu #if IMAGE_BL31
769*91f16700Schasinglulu 	if (is_feat_sme_supported()) {
770*91f16700Schasinglulu 		if (ENABLE_SME_FOR_SWD) {
771*91f16700Schasinglulu 		/*
772*91f16700Schasinglulu 		 * Enable SME, SVE, FPU/SIMD in secure context, secure manager
773*91f16700Schasinglulu 		 * must ensure SME, SVE, and FPU/SIMD context properly managed.
774*91f16700Schasinglulu 		 */
775*91f16700Schasinglulu 			sme_init_el3();
776*91f16700Schasinglulu 			sme_enable(ctx);
777*91f16700Schasinglulu 		} else {
778*91f16700Schasinglulu 		/*
779*91f16700Schasinglulu 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
780*91f16700Schasinglulu 		 * world can safely use the associated registers.
781*91f16700Schasinglulu 		 */
782*91f16700Schasinglulu 			sme_disable(ctx);
783*91f16700Schasinglulu 		}
784*91f16700Schasinglulu 	}
785*91f16700Schasinglulu #endif /* IMAGE_BL31 */
786*91f16700Schasinglulu }
787*91f16700Schasinglulu 
788*91f16700Schasinglulu /*******************************************************************************
789*91f16700Schasinglulu  * The following function initializes the cpu_context for a CPU specified by
790*91f16700Schasinglulu  * its `cpu_idx` for first use, and sets the initial entrypoint state as
791*91f16700Schasinglulu  * specified by the entry_point_info structure.
792*91f16700Schasinglulu  ******************************************************************************/
793*91f16700Schasinglulu void cm_init_context_by_index(unsigned int cpu_idx,
794*91f16700Schasinglulu 			      const entry_point_info_t *ep)
795*91f16700Schasinglulu {
796*91f16700Schasinglulu 	cpu_context_t *ctx;
797*91f16700Schasinglulu 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
798*91f16700Schasinglulu 	cm_setup_context(ctx, ep);
799*91f16700Schasinglulu }
800*91f16700Schasinglulu 
801*91f16700Schasinglulu /*******************************************************************************
802*91f16700Schasinglulu  * The following function initializes the cpu_context for the current CPU
803*91f16700Schasinglulu  * for first use, and sets the initial entrypoint state as specified by the
804*91f16700Schasinglulu  * entry_point_info structure.
805*91f16700Schasinglulu  ******************************************************************************/
806*91f16700Schasinglulu void cm_init_my_context(const entry_point_info_t *ep)
807*91f16700Schasinglulu {
808*91f16700Schasinglulu 	cpu_context_t *ctx;
809*91f16700Schasinglulu 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
810*91f16700Schasinglulu 	cm_setup_context(ctx, ep);
811*91f16700Schasinglulu }
812*91f16700Schasinglulu 
813*91f16700Schasinglulu /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */
814*91f16700Schasinglulu static void init_nonsecure_el2_unused(cpu_context_t *ctx)
815*91f16700Schasinglulu {
816*91f16700Schasinglulu #if INIT_UNUSED_NS_EL2
817*91f16700Schasinglulu 	u_register_t hcr_el2 = HCR_RESET_VAL;
818*91f16700Schasinglulu 	u_register_t mdcr_el2;
819*91f16700Schasinglulu 	u_register_t scr_el3;
820*91f16700Schasinglulu 
821*91f16700Schasinglulu 	scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
822*91f16700Schasinglulu 
823*91f16700Schasinglulu 	/* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */
824*91f16700Schasinglulu 	if ((scr_el3 & SCR_RW_BIT) != 0U) {
825*91f16700Schasinglulu 		hcr_el2 |= HCR_RW_BIT;
826*91f16700Schasinglulu 	}
827*91f16700Schasinglulu 
828*91f16700Schasinglulu 	write_hcr_el2(hcr_el2);
829*91f16700Schasinglulu 
830*91f16700Schasinglulu 	/*
831*91f16700Schasinglulu 	 * Initialise CPTR_EL2 setting all fields rather than relying on the hw.
832*91f16700Schasinglulu 	 * All fields have architecturally UNKNOWN reset values.
833*91f16700Schasinglulu 	 */
834*91f16700Schasinglulu 	write_cptr_el2(CPTR_EL2_RESET_VAL);
835*91f16700Schasinglulu 
836*91f16700Schasinglulu 	/*
837*91f16700Schasinglulu 	 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on
838*91f16700Schasinglulu 	 * reset and are set to zero except for field(s) listed below.
839*91f16700Schasinglulu 	 *
840*91f16700Schasinglulu 	 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of
841*91f16700Schasinglulu 	 * Non-secure EL0 and EL1 accesses to the physical timer registers.
842*91f16700Schasinglulu 	 *
843*91f16700Schasinglulu 	 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of
844*91f16700Schasinglulu 	 * Non-secure EL0 and EL1 accesses to the physical counter registers.
845*91f16700Schasinglulu 	 */
846*91f16700Schasinglulu 	write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT);
847*91f16700Schasinglulu 
848*91f16700Schasinglulu 	/*
849*91f16700Schasinglulu 	 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally
850*91f16700Schasinglulu 	 * UNKNOWN value.
851*91f16700Schasinglulu 	 */
852*91f16700Schasinglulu 	write_cntvoff_el2(0);
853*91f16700Schasinglulu 
854*91f16700Schasinglulu 	/*
855*91f16700Schasinglulu 	 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1
856*91f16700Schasinglulu 	 * respectively.
857*91f16700Schasinglulu 	 */
858*91f16700Schasinglulu 	write_vpidr_el2(read_midr_el1());
859*91f16700Schasinglulu 	write_vmpidr_el2(read_mpidr_el1());
860*91f16700Schasinglulu 
861*91f16700Schasinglulu 	/*
862*91f16700Schasinglulu 	 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset.
863*91f16700Schasinglulu 	 *
864*91f16700Schasinglulu 	 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address
865*91f16700Schasinglulu 	 * translation is disabled, cache maintenance operations depend on the
866*91f16700Schasinglulu 	 * VMID.
867*91f16700Schasinglulu 	 *
868*91f16700Schasinglulu 	 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is
869*91f16700Schasinglulu 	 * disabled.
870*91f16700Schasinglulu 	 */
871*91f16700Schasinglulu 	write_vttbr_el2(VTTBR_RESET_VAL &
872*91f16700Schasinglulu 		     ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) |
873*91f16700Schasinglulu 		       (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
874*91f16700Schasinglulu 
875*91f16700Schasinglulu 	/*
876*91f16700Schasinglulu 	 * Initialise MDCR_EL2, setting all fields rather than relying on hw.
877*91f16700Schasinglulu 	 * Some fields are architecturally UNKNOWN on reset.
878*91f16700Schasinglulu 	 *
879*91f16700Schasinglulu 	 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System
880*91f16700Schasinglulu 	 * register accesses to the Debug ROM registers are not trapped to EL2.
881*91f16700Schasinglulu 	 *
882*91f16700Schasinglulu 	 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register
883*91f16700Schasinglulu 	 * accesses to the powerdown debug registers are not trapped to EL2.
884*91f16700Schasinglulu 	 *
885*91f16700Schasinglulu 	 * MDCR_EL2.TDA: Set to zero so that System register accesses to the
886*91f16700Schasinglulu 	 * debug registers do not trap to EL2.
887*91f16700Schasinglulu 	 *
888*91f16700Schasinglulu 	 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to
889*91f16700Schasinglulu 	 * EL2.
890*91f16700Schasinglulu 	 */
891*91f16700Schasinglulu 	mdcr_el2 = MDCR_EL2_RESET_VAL &
892*91f16700Schasinglulu 		 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT |
893*91f16700Schasinglulu 		   MDCR_EL2_TDE_BIT);
894*91f16700Schasinglulu 
895*91f16700Schasinglulu 	write_mdcr_el2(mdcr_el2);
896*91f16700Schasinglulu 
897*91f16700Schasinglulu 	/*
898*91f16700Schasinglulu 	 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset.
899*91f16700Schasinglulu 	 *
900*91f16700Schasinglulu 	 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or
901*91f16700Schasinglulu 	 * EL1 accesses to System registers do not trap to EL2.
902*91f16700Schasinglulu 	 */
903*91f16700Schasinglulu 	write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
904*91f16700Schasinglulu 
905*91f16700Schasinglulu 	/*
906*91f16700Schasinglulu 	 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on
907*91f16700Schasinglulu 	 * reset.
908*91f16700Schasinglulu 	 *
909*91f16700Schasinglulu 	 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer
910*91f16700Schasinglulu 	 * and prevent timer interrupts.
911*91f16700Schasinglulu 	 */
912*91f16700Schasinglulu 	write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT));
913*91f16700Schasinglulu 
914*91f16700Schasinglulu 	manage_extensions_nonsecure_el2_unused();
915*91f16700Schasinglulu #endif /* INIT_UNUSED_NS_EL2 */
916*91f16700Schasinglulu }
917*91f16700Schasinglulu 
918*91f16700Schasinglulu /*******************************************************************************
919*91f16700Schasinglulu  * Prepare the CPU system registers for first entry into realm, secure, or
920*91f16700Schasinglulu  * normal world.
921*91f16700Schasinglulu  *
922*91f16700Schasinglulu  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
923*91f16700Schasinglulu  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
924*91f16700Schasinglulu  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
925*91f16700Schasinglulu  * For all entries, the EL1 registers are initialized from the cpu_context
926*91f16700Schasinglulu  ******************************************************************************/
927*91f16700Schasinglulu void cm_prepare_el3_exit(uint32_t security_state)
928*91f16700Schasinglulu {
929*91f16700Schasinglulu 	u_register_t sctlr_elx, scr_el3;
930*91f16700Schasinglulu 	cpu_context_t *ctx = cm_get_context(security_state);
931*91f16700Schasinglulu 
932*91f16700Schasinglulu 	assert(ctx != NULL);
933*91f16700Schasinglulu 
934*91f16700Schasinglulu 	if (security_state == NON_SECURE) {
935*91f16700Schasinglulu 		uint64_t el2_implemented = el_implemented(2);
936*91f16700Schasinglulu 
937*91f16700Schasinglulu 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
938*91f16700Schasinglulu 						 CTX_SCR_EL3);
939*91f16700Schasinglulu 
940*91f16700Schasinglulu 		if (((scr_el3 & SCR_HCE_BIT) != 0U)
941*91f16700Schasinglulu 			|| (el2_implemented != EL_IMPL_NONE)) {
942*91f16700Schasinglulu 			/*
943*91f16700Schasinglulu 			 * If context is not being used for EL2, initialize
944*91f16700Schasinglulu 			 * HCRX_EL2 with its init value here.
945*91f16700Schasinglulu 			 */
946*91f16700Schasinglulu 			if (is_feat_hcx_supported()) {
947*91f16700Schasinglulu 				write_hcrx_el2(HCRX_EL2_INIT_VAL);
948*91f16700Schasinglulu 			}
949*91f16700Schasinglulu 
950*91f16700Schasinglulu 			/*
951*91f16700Schasinglulu 			 * Initialize Fine-grained trap registers introduced
952*91f16700Schasinglulu 			 * by FEAT_FGT so all traps are initially disabled when
953*91f16700Schasinglulu 			 * switching to EL2 or a lower EL, preventing undesired
954*91f16700Schasinglulu 			 * behavior.
955*91f16700Schasinglulu 			 */
956*91f16700Schasinglulu 			if (is_feat_fgt_supported()) {
957*91f16700Schasinglulu 				/*
958*91f16700Schasinglulu 				 * Initialize HFG*_EL2 registers with a default
959*91f16700Schasinglulu 				 * value so legacy systems unaware of FEAT_FGT
960*91f16700Schasinglulu 				 * do not get trapped due to their lack of
961*91f16700Schasinglulu 				 * initialization for this feature.
962*91f16700Schasinglulu 				 */
963*91f16700Schasinglulu 				write_hfgitr_el2(HFGITR_EL2_INIT_VAL);
964*91f16700Schasinglulu 				write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL);
965*91f16700Schasinglulu 				write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL);
966*91f16700Schasinglulu 			}
967*91f16700Schasinglulu 		}
968*91f16700Schasinglulu 
969*91f16700Schasinglulu 
970*91f16700Schasinglulu 		if ((scr_el3 & SCR_HCE_BIT) != 0U) {
971*91f16700Schasinglulu 			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
972*91f16700Schasinglulu 			sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
973*91f16700Schasinglulu 							   CTX_SCTLR_EL1);
974*91f16700Schasinglulu 			sctlr_elx &= SCTLR_EE_BIT;
975*91f16700Schasinglulu 			sctlr_elx |= SCTLR_EL2_RES1;
976*91f16700Schasinglulu #if ERRATA_A75_764081
977*91f16700Schasinglulu 			/*
978*91f16700Schasinglulu 			 * If workaround of errata 764081 for Cortex-A75 is used
979*91f16700Schasinglulu 			 * then set SCTLR_EL2.IESB to enable Implicit Error
980*91f16700Schasinglulu 			 * Synchronization Barrier.
981*91f16700Schasinglulu 			 */
982*91f16700Schasinglulu 			sctlr_elx |= SCTLR_IESB_BIT;
983*91f16700Schasinglulu #endif
984*91f16700Schasinglulu 			write_sctlr_el2(sctlr_elx);
985*91f16700Schasinglulu 		} else if (el2_implemented != EL_IMPL_NONE) {
986*91f16700Schasinglulu 			init_nonsecure_el2_unused(ctx);
987*91f16700Schasinglulu 		}
988*91f16700Schasinglulu 	}
989*91f16700Schasinglulu 
990*91f16700Schasinglulu 	cm_el1_sysregs_context_restore(security_state);
991*91f16700Schasinglulu 	cm_set_next_eret_context(security_state);
992*91f16700Schasinglulu }
993*91f16700Schasinglulu 
994*91f16700Schasinglulu #if CTX_INCLUDE_EL2_REGS
995*91f16700Schasinglulu 
996*91f16700Schasinglulu static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
997*91f16700Schasinglulu {
998*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2());
999*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
1000*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2());
1001*91f16700Schasinglulu 	}
1002*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2());
1003*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2());
1004*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2());
1005*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2());
1006*91f16700Schasinglulu }
1007*91f16700Schasinglulu 
1008*91f16700Schasinglulu static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
1009*91f16700Schasinglulu {
1010*91f16700Schasinglulu 	write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2));
1011*91f16700Schasinglulu 	if (is_feat_amu_supported()) {
1012*91f16700Schasinglulu 		write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2));
1013*91f16700Schasinglulu 	}
1014*91f16700Schasinglulu 	write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2));
1015*91f16700Schasinglulu 	write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2));
1016*91f16700Schasinglulu 	write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2));
1017*91f16700Schasinglulu 	write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2));
1018*91f16700Schasinglulu }
1019*91f16700Schasinglulu 
1020*91f16700Schasinglulu static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
1021*91f16700Schasinglulu {
1022*91f16700Schasinglulu 	u_register_t mpam_idr = read_mpamidr_el1();
1023*91f16700Schasinglulu 
1024*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2());
1025*91f16700Schasinglulu 
1026*91f16700Schasinglulu 	/*
1027*91f16700Schasinglulu 	 * The context registers that we intend to save would be part of the
1028*91f16700Schasinglulu 	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
1029*91f16700Schasinglulu 	 */
1030*91f16700Schasinglulu 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1031*91f16700Schasinglulu 		return;
1032*91f16700Schasinglulu 	}
1033*91f16700Schasinglulu 
1034*91f16700Schasinglulu 	/*
1035*91f16700Schasinglulu 	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
1036*91f16700Schasinglulu 	 * MPAMIDR_HAS_HCR_BIT == 1.
1037*91f16700Schasinglulu 	 */
1038*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2());
1039*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2());
1040*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2());
1041*91f16700Schasinglulu 
1042*91f16700Schasinglulu 	/*
1043*91f16700Schasinglulu 	 * The number of MPAMVPM registers is implementation defined, their
1044*91f16700Schasinglulu 	 * number is stored in the MPAMIDR_EL1 register.
1045*91f16700Schasinglulu 	 */
1046*91f16700Schasinglulu 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1047*91f16700Schasinglulu 	case 7:
1048*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2());
1049*91f16700Schasinglulu 		__fallthrough;
1050*91f16700Schasinglulu 	case 6:
1051*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2());
1052*91f16700Schasinglulu 		__fallthrough;
1053*91f16700Schasinglulu 	case 5:
1054*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2());
1055*91f16700Schasinglulu 		__fallthrough;
1056*91f16700Schasinglulu 	case 4:
1057*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2());
1058*91f16700Schasinglulu 		__fallthrough;
1059*91f16700Schasinglulu 	case 3:
1060*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2());
1061*91f16700Schasinglulu 		__fallthrough;
1062*91f16700Schasinglulu 	case 2:
1063*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2());
1064*91f16700Schasinglulu 		__fallthrough;
1065*91f16700Schasinglulu 	case 1:
1066*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2());
1067*91f16700Schasinglulu 		break;
1068*91f16700Schasinglulu 	}
1069*91f16700Schasinglulu }
1070*91f16700Schasinglulu 
1071*91f16700Schasinglulu static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
1072*91f16700Schasinglulu {
1073*91f16700Schasinglulu 	u_register_t mpam_idr = read_mpamidr_el1();
1074*91f16700Schasinglulu 
1075*91f16700Schasinglulu 	write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2));
1076*91f16700Schasinglulu 
1077*91f16700Schasinglulu 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1078*91f16700Schasinglulu 		return;
1079*91f16700Schasinglulu 	}
1080*91f16700Schasinglulu 
1081*91f16700Schasinglulu 	write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2));
1082*91f16700Schasinglulu 	write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2));
1083*91f16700Schasinglulu 	write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2));
1084*91f16700Schasinglulu 
1085*91f16700Schasinglulu 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1086*91f16700Schasinglulu 	case 7:
1087*91f16700Schasinglulu 		write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2));
1088*91f16700Schasinglulu 		__fallthrough;
1089*91f16700Schasinglulu 	case 6:
1090*91f16700Schasinglulu 		write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2));
1091*91f16700Schasinglulu 		__fallthrough;
1092*91f16700Schasinglulu 	case 5:
1093*91f16700Schasinglulu 		write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2));
1094*91f16700Schasinglulu 		__fallthrough;
1095*91f16700Schasinglulu 	case 4:
1096*91f16700Schasinglulu 		write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2));
1097*91f16700Schasinglulu 		__fallthrough;
1098*91f16700Schasinglulu 	case 3:
1099*91f16700Schasinglulu 		write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2));
1100*91f16700Schasinglulu 		__fallthrough;
1101*91f16700Schasinglulu 	case 2:
1102*91f16700Schasinglulu 		write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2));
1103*91f16700Schasinglulu 		__fallthrough;
1104*91f16700Schasinglulu 	case 1:
1105*91f16700Schasinglulu 		write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2));
1106*91f16700Schasinglulu 		break;
1107*91f16700Schasinglulu 	}
1108*91f16700Schasinglulu }
1109*91f16700Schasinglulu 
1110*91f16700Schasinglulu /* -----------------------------------------------------
1111*91f16700Schasinglulu  * The following registers are not added:
1112*91f16700Schasinglulu  * AMEVCNTVOFF0<n>_EL2
1113*91f16700Schasinglulu  * AMEVCNTVOFF1<n>_EL2
1114*91f16700Schasinglulu  * ICH_AP0R<n>_EL2
1115*91f16700Schasinglulu  * ICH_AP1R<n>_EL2
1116*91f16700Schasinglulu  * ICH_LR<n>_EL2
1117*91f16700Schasinglulu  * -----------------------------------------------------
1118*91f16700Schasinglulu  */
1119*91f16700Schasinglulu static void el2_sysregs_context_save_common(el2_sysregs_t *ctx)
1120*91f16700Schasinglulu {
1121*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ACTLR_EL2, read_actlr_el2());
1122*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_AFSR0_EL2, read_afsr0_el2());
1123*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_AFSR1_EL2, read_afsr1_el2());
1124*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_AMAIR_EL2, read_amair_el2());
1125*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_CNTHCTL_EL2, read_cnthctl_el2());
1126*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_CNTVOFF_EL2, read_cntvoff_el2());
1127*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_CPTR_EL2, read_cptr_el2());
1128*91f16700Schasinglulu 	if (CTX_INCLUDE_AARCH32_REGS) {
1129*91f16700Schasinglulu 		write_ctx_reg(ctx, CTX_DBGVCR32_EL2, read_dbgvcr32_el2());
1130*91f16700Schasinglulu 	}
1131*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ELR_EL2, read_elr_el2());
1132*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ESR_EL2, read_esr_el2());
1133*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_FAR_EL2, read_far_el2());
1134*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HACR_EL2, read_hacr_el2());
1135*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HCR_EL2, read_hcr_el2());
1136*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HPFAR_EL2, read_hpfar_el2());
1137*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_HSTR_EL2, read_hstr_el2());
1138*91f16700Schasinglulu 
1139*91f16700Schasinglulu 	/*
1140*91f16700Schasinglulu 	 * Set the NS bit to be able to access the ICC_SRE_EL2 register
1141*91f16700Schasinglulu 	 * TODO: remove with root context
1142*91f16700Schasinglulu 	 */
1143*91f16700Schasinglulu 	u_register_t scr_el3 = read_scr_el3();
1144*91f16700Schasinglulu 
1145*91f16700Schasinglulu 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1146*91f16700Schasinglulu 	isb();
1147*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ICC_SRE_EL2, read_icc_sre_el2());
1148*91f16700Schasinglulu 
1149*91f16700Schasinglulu 	write_scr_el3(scr_el3);
1150*91f16700Schasinglulu 	isb();
1151*91f16700Schasinglulu 
1152*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ICH_HCR_EL2, read_ich_hcr_el2());
1153*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_ICH_VMCR_EL2, read_ich_vmcr_el2());
1154*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MAIR_EL2, read_mair_el2());
1155*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_MDCR_EL2, read_mdcr_el2());
1156*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_SCTLR_EL2, read_sctlr_el2());
1157*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_SPSR_EL2, read_spsr_el2());
1158*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_SP_EL2, read_sp_el2());
1159*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_TCR_EL2, read_tcr_el2());
1160*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_TPIDR_EL2, read_tpidr_el2());
1161*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_TTBR0_EL2, read_ttbr0_el2());
1162*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_VBAR_EL2, read_vbar_el2());
1163*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_VMPIDR_EL2, read_vmpidr_el2());
1164*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_VPIDR_EL2, read_vpidr_el2());
1165*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_VTCR_EL2, read_vtcr_el2());
1166*91f16700Schasinglulu 	write_ctx_reg(ctx, CTX_VTTBR_EL2, read_vttbr_el2());
1167*91f16700Schasinglulu }
1168*91f16700Schasinglulu 
1169*91f16700Schasinglulu static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx)
1170*91f16700Schasinglulu {
1171*91f16700Schasinglulu 	write_actlr_el2(read_ctx_reg(ctx, CTX_ACTLR_EL2));
1172*91f16700Schasinglulu 	write_afsr0_el2(read_ctx_reg(ctx, CTX_AFSR0_EL2));
1173*91f16700Schasinglulu 	write_afsr1_el2(read_ctx_reg(ctx, CTX_AFSR1_EL2));
1174*91f16700Schasinglulu 	write_amair_el2(read_ctx_reg(ctx, CTX_AMAIR_EL2));
1175*91f16700Schasinglulu 	write_cnthctl_el2(read_ctx_reg(ctx, CTX_CNTHCTL_EL2));
1176*91f16700Schasinglulu 	write_cntvoff_el2(read_ctx_reg(ctx, CTX_CNTVOFF_EL2));
1177*91f16700Schasinglulu 	write_cptr_el2(read_ctx_reg(ctx, CTX_CPTR_EL2));
1178*91f16700Schasinglulu 	if (CTX_INCLUDE_AARCH32_REGS) {
1179*91f16700Schasinglulu 		write_dbgvcr32_el2(read_ctx_reg(ctx, CTX_DBGVCR32_EL2));
1180*91f16700Schasinglulu 	}
1181*91f16700Schasinglulu 	write_elr_el2(read_ctx_reg(ctx, CTX_ELR_EL2));
1182*91f16700Schasinglulu 	write_esr_el2(read_ctx_reg(ctx, CTX_ESR_EL2));
1183*91f16700Schasinglulu 	write_far_el2(read_ctx_reg(ctx, CTX_FAR_EL2));
1184*91f16700Schasinglulu 	write_hacr_el2(read_ctx_reg(ctx, CTX_HACR_EL2));
1185*91f16700Schasinglulu 	write_hcr_el2(read_ctx_reg(ctx, CTX_HCR_EL2));
1186*91f16700Schasinglulu 	write_hpfar_el2(read_ctx_reg(ctx, CTX_HPFAR_EL2));
1187*91f16700Schasinglulu 	write_hstr_el2(read_ctx_reg(ctx, CTX_HSTR_EL2));
1188*91f16700Schasinglulu 
1189*91f16700Schasinglulu 	/*
1190*91f16700Schasinglulu 	 * Set the NS bit to be able to access the ICC_SRE_EL2 register
1191*91f16700Schasinglulu 	 * TODO: remove with root context
1192*91f16700Schasinglulu 	 */
1193*91f16700Schasinglulu 	u_register_t scr_el3 = read_scr_el3();
1194*91f16700Schasinglulu 
1195*91f16700Schasinglulu 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1196*91f16700Schasinglulu 	isb();
1197*91f16700Schasinglulu 	write_icc_sre_el2(read_ctx_reg(ctx, CTX_ICC_SRE_EL2));
1198*91f16700Schasinglulu 
1199*91f16700Schasinglulu 	write_scr_el3(scr_el3);
1200*91f16700Schasinglulu 	isb();
1201*91f16700Schasinglulu 
1202*91f16700Schasinglulu 	write_ich_hcr_el2(read_ctx_reg(ctx, CTX_ICH_HCR_EL2));
1203*91f16700Schasinglulu 	write_ich_vmcr_el2(read_ctx_reg(ctx, CTX_ICH_VMCR_EL2));
1204*91f16700Schasinglulu 	write_mair_el2(read_ctx_reg(ctx, CTX_MAIR_EL2));
1205*91f16700Schasinglulu 	write_mdcr_el2(read_ctx_reg(ctx, CTX_MDCR_EL2));
1206*91f16700Schasinglulu 	write_sctlr_el2(read_ctx_reg(ctx, CTX_SCTLR_EL2));
1207*91f16700Schasinglulu 	write_spsr_el2(read_ctx_reg(ctx, CTX_SPSR_EL2));
1208*91f16700Schasinglulu 	write_sp_el2(read_ctx_reg(ctx, CTX_SP_EL2));
1209*91f16700Schasinglulu 	write_tcr_el2(read_ctx_reg(ctx, CTX_TCR_EL2));
1210*91f16700Schasinglulu 	write_tpidr_el2(read_ctx_reg(ctx, CTX_TPIDR_EL2));
1211*91f16700Schasinglulu 	write_ttbr0_el2(read_ctx_reg(ctx, CTX_TTBR0_EL2));
1212*91f16700Schasinglulu 	write_vbar_el2(read_ctx_reg(ctx, CTX_VBAR_EL2));
1213*91f16700Schasinglulu 	write_vmpidr_el2(read_ctx_reg(ctx, CTX_VMPIDR_EL2));
1214*91f16700Schasinglulu 	write_vpidr_el2(read_ctx_reg(ctx, CTX_VPIDR_EL2));
1215*91f16700Schasinglulu 	write_vtcr_el2(read_ctx_reg(ctx, CTX_VTCR_EL2));
1216*91f16700Schasinglulu 	write_vttbr_el2(read_ctx_reg(ctx, CTX_VTTBR_EL2));
1217*91f16700Schasinglulu }
1218*91f16700Schasinglulu 
1219*91f16700Schasinglulu /*******************************************************************************
1220*91f16700Schasinglulu  * Save EL2 sysreg context
1221*91f16700Schasinglulu  ******************************************************************************/
1222*91f16700Schasinglulu void cm_el2_sysregs_context_save(uint32_t security_state)
1223*91f16700Schasinglulu {
1224*91f16700Schasinglulu 	cpu_context_t *ctx;
1225*91f16700Schasinglulu 	el2_sysregs_t *el2_sysregs_ctx;
1226*91f16700Schasinglulu 
1227*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1228*91f16700Schasinglulu 	assert(ctx != NULL);
1229*91f16700Schasinglulu 
1230*91f16700Schasinglulu 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1231*91f16700Schasinglulu 
1232*91f16700Schasinglulu 	el2_sysregs_context_save_common(el2_sysregs_ctx);
1233*91f16700Schasinglulu #if CTX_INCLUDE_MTE_REGS
1234*91f16700Schasinglulu 	write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2());
1235*91f16700Schasinglulu #endif
1236*91f16700Schasinglulu 	if (is_feat_mpam_supported()) {
1237*91f16700Schasinglulu 		el2_sysregs_context_save_mpam(el2_sysregs_ctx);
1238*91f16700Schasinglulu 	}
1239*91f16700Schasinglulu 
1240*91f16700Schasinglulu 	if (is_feat_fgt_supported()) {
1241*91f16700Schasinglulu 		el2_sysregs_context_save_fgt(el2_sysregs_ctx);
1242*91f16700Schasinglulu 	}
1243*91f16700Schasinglulu 
1244*91f16700Schasinglulu 	if (is_feat_ecv_v2_supported()) {
1245*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2, read_cntpoff_el2());
1246*91f16700Schasinglulu 	}
1247*91f16700Schasinglulu 
1248*91f16700Schasinglulu 	if (is_feat_vhe_supported()) {
1249*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2, read_contextidr_el2());
1250*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2, read_ttbr1_el2());
1251*91f16700Schasinglulu 	}
1252*91f16700Schasinglulu 
1253*91f16700Schasinglulu 	if (is_feat_ras_supported()) {
1254*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2, read_vdisr_el2());
1255*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2, read_vsesr_el2());
1256*91f16700Schasinglulu 	}
1257*91f16700Schasinglulu 
1258*91f16700Schasinglulu 	if (is_feat_nv2_supported()) {
1259*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2, read_vncr_el2());
1260*91f16700Schasinglulu 	}
1261*91f16700Schasinglulu 
1262*91f16700Schasinglulu 	if (is_feat_trf_supported()) {
1263*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2());
1264*91f16700Schasinglulu 	}
1265*91f16700Schasinglulu 
1266*91f16700Schasinglulu 	if (is_feat_csv2_2_supported()) {
1267*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2, read_scxtnum_el2());
1268*91f16700Schasinglulu 	}
1269*91f16700Schasinglulu 
1270*91f16700Schasinglulu 	if (is_feat_hcx_supported()) {
1271*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2());
1272*91f16700Schasinglulu 	}
1273*91f16700Schasinglulu 	if (is_feat_tcr2_supported()) {
1274*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2());
1275*91f16700Schasinglulu 	}
1276*91f16700Schasinglulu 	if (is_feat_sxpie_supported()) {
1277*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2());
1278*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2());
1279*91f16700Schasinglulu 	}
1280*91f16700Schasinglulu 	if (is_feat_s2pie_supported()) {
1281*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2());
1282*91f16700Schasinglulu 	}
1283*91f16700Schasinglulu 	if (is_feat_sxpoe_supported()) {
1284*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2());
1285*91f16700Schasinglulu 	}
1286*91f16700Schasinglulu 	if (is_feat_gcs_supported()) {
1287*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2());
1288*91f16700Schasinglulu 		write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2());
1289*91f16700Schasinglulu 	}
1290*91f16700Schasinglulu }
1291*91f16700Schasinglulu 
1292*91f16700Schasinglulu /*******************************************************************************
1293*91f16700Schasinglulu  * Restore EL2 sysreg context
1294*91f16700Schasinglulu  ******************************************************************************/
1295*91f16700Schasinglulu void cm_el2_sysregs_context_restore(uint32_t security_state)
1296*91f16700Schasinglulu {
1297*91f16700Schasinglulu 	cpu_context_t *ctx;
1298*91f16700Schasinglulu 	el2_sysregs_t *el2_sysregs_ctx;
1299*91f16700Schasinglulu 
1300*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1301*91f16700Schasinglulu 	assert(ctx != NULL);
1302*91f16700Schasinglulu 
1303*91f16700Schasinglulu 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1304*91f16700Schasinglulu 
1305*91f16700Schasinglulu 	el2_sysregs_context_restore_common(el2_sysregs_ctx);
1306*91f16700Schasinglulu #if CTX_INCLUDE_MTE_REGS
1307*91f16700Schasinglulu 	write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2));
1308*91f16700Schasinglulu #endif
1309*91f16700Schasinglulu 	if (is_feat_mpam_supported()) {
1310*91f16700Schasinglulu 		el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
1311*91f16700Schasinglulu 	}
1312*91f16700Schasinglulu 
1313*91f16700Schasinglulu 	if (is_feat_fgt_supported()) {
1314*91f16700Schasinglulu 		el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
1315*91f16700Schasinglulu 	}
1316*91f16700Schasinglulu 
1317*91f16700Schasinglulu 	if (is_feat_ecv_v2_supported()) {
1318*91f16700Schasinglulu 		write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2));
1319*91f16700Schasinglulu 	}
1320*91f16700Schasinglulu 
1321*91f16700Schasinglulu 	if (is_feat_vhe_supported()) {
1322*91f16700Schasinglulu 		write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
1323*91f16700Schasinglulu 		write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
1324*91f16700Schasinglulu 	}
1325*91f16700Schasinglulu 
1326*91f16700Schasinglulu 	if (is_feat_ras_supported()) {
1327*91f16700Schasinglulu 		write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2));
1328*91f16700Schasinglulu 		write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2));
1329*91f16700Schasinglulu 	}
1330*91f16700Schasinglulu 
1331*91f16700Schasinglulu 	if (is_feat_nv2_supported()) {
1332*91f16700Schasinglulu 		write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2));
1333*91f16700Schasinglulu 	}
1334*91f16700Schasinglulu 	if (is_feat_trf_supported()) {
1335*91f16700Schasinglulu 		write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2));
1336*91f16700Schasinglulu 	}
1337*91f16700Schasinglulu 
1338*91f16700Schasinglulu 	if (is_feat_csv2_2_supported()) {
1339*91f16700Schasinglulu 		write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2));
1340*91f16700Schasinglulu 	}
1341*91f16700Schasinglulu 
1342*91f16700Schasinglulu 	if (is_feat_hcx_supported()) {
1343*91f16700Schasinglulu 		write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2));
1344*91f16700Schasinglulu 	}
1345*91f16700Schasinglulu 	if (is_feat_tcr2_supported()) {
1346*91f16700Schasinglulu 		write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2));
1347*91f16700Schasinglulu 	}
1348*91f16700Schasinglulu 	if (is_feat_sxpie_supported()) {
1349*91f16700Schasinglulu 		write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2));
1350*91f16700Schasinglulu 		write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2));
1351*91f16700Schasinglulu 	}
1352*91f16700Schasinglulu 	if (is_feat_s2pie_supported()) {
1353*91f16700Schasinglulu 		write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2));
1354*91f16700Schasinglulu 	}
1355*91f16700Schasinglulu 	if (is_feat_sxpoe_supported()) {
1356*91f16700Schasinglulu 		write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2));
1357*91f16700Schasinglulu 	}
1358*91f16700Schasinglulu 	if (is_feat_gcs_supported()) {
1359*91f16700Schasinglulu 		write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2));
1360*91f16700Schasinglulu 		write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2));
1361*91f16700Schasinglulu 	}
1362*91f16700Schasinglulu }
1363*91f16700Schasinglulu #endif /* CTX_INCLUDE_EL2_REGS */
1364*91f16700Schasinglulu 
1365*91f16700Schasinglulu /*******************************************************************************
1366*91f16700Schasinglulu  * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
1367*91f16700Schasinglulu  * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
1368*91f16700Schasinglulu  * updating EL1 and EL2 registers. Otherwise, it calls the generic
1369*91f16700Schasinglulu  * cm_prepare_el3_exit function.
1370*91f16700Schasinglulu  ******************************************************************************/
1371*91f16700Schasinglulu void cm_prepare_el3_exit_ns(void)
1372*91f16700Schasinglulu {
1373*91f16700Schasinglulu #if CTX_INCLUDE_EL2_REGS
1374*91f16700Schasinglulu #if ENABLE_ASSERTIONS
1375*91f16700Schasinglulu 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
1376*91f16700Schasinglulu 	assert(ctx != NULL);
1377*91f16700Schasinglulu 
1378*91f16700Schasinglulu 	/* Assert that EL2 is used. */
1379*91f16700Schasinglulu 	u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
1380*91f16700Schasinglulu 	assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
1381*91f16700Schasinglulu 			(el_implemented(2U) != EL_IMPL_NONE));
1382*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS */
1383*91f16700Schasinglulu 
1384*91f16700Schasinglulu 	/* Restore EL2 and EL1 sysreg contexts */
1385*91f16700Schasinglulu 	cm_el2_sysregs_context_restore(NON_SECURE);
1386*91f16700Schasinglulu 	cm_el1_sysregs_context_restore(NON_SECURE);
1387*91f16700Schasinglulu 	cm_set_next_eret_context(NON_SECURE);
1388*91f16700Schasinglulu #else
1389*91f16700Schasinglulu 	cm_prepare_el3_exit(NON_SECURE);
1390*91f16700Schasinglulu #endif /* CTX_INCLUDE_EL2_REGS */
1391*91f16700Schasinglulu }
1392*91f16700Schasinglulu 
1393*91f16700Schasinglulu /*******************************************************************************
1394*91f16700Schasinglulu  * The next four functions are used by runtime services to save and restore
1395*91f16700Schasinglulu  * EL1 context on the 'cpu_context' structure for the specified security
1396*91f16700Schasinglulu  * state.
1397*91f16700Schasinglulu  ******************************************************************************/
1398*91f16700Schasinglulu void cm_el1_sysregs_context_save(uint32_t security_state)
1399*91f16700Schasinglulu {
1400*91f16700Schasinglulu 	cpu_context_t *ctx;
1401*91f16700Schasinglulu 
1402*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1403*91f16700Schasinglulu 	assert(ctx != NULL);
1404*91f16700Schasinglulu 
1405*91f16700Schasinglulu 	el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
1406*91f16700Schasinglulu 
1407*91f16700Schasinglulu #if IMAGE_BL31
1408*91f16700Schasinglulu 	if (security_state == SECURE)
1409*91f16700Schasinglulu 		PUBLISH_EVENT(cm_exited_secure_world);
1410*91f16700Schasinglulu 	else
1411*91f16700Schasinglulu 		PUBLISH_EVENT(cm_exited_normal_world);
1412*91f16700Schasinglulu #endif
1413*91f16700Schasinglulu }
1414*91f16700Schasinglulu 
1415*91f16700Schasinglulu void cm_el1_sysregs_context_restore(uint32_t security_state)
1416*91f16700Schasinglulu {
1417*91f16700Schasinglulu 	cpu_context_t *ctx;
1418*91f16700Schasinglulu 
1419*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1420*91f16700Schasinglulu 	assert(ctx != NULL);
1421*91f16700Schasinglulu 
1422*91f16700Schasinglulu 	el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
1423*91f16700Schasinglulu 
1424*91f16700Schasinglulu #if IMAGE_BL31
1425*91f16700Schasinglulu 	if (security_state == SECURE)
1426*91f16700Schasinglulu 		PUBLISH_EVENT(cm_entering_secure_world);
1427*91f16700Schasinglulu 	else
1428*91f16700Schasinglulu 		PUBLISH_EVENT(cm_entering_normal_world);
1429*91f16700Schasinglulu #endif
1430*91f16700Schasinglulu }
1431*91f16700Schasinglulu 
1432*91f16700Schasinglulu /*******************************************************************************
1433*91f16700Schasinglulu  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
1434*91f16700Schasinglulu  * given security state with the given entrypoint
1435*91f16700Schasinglulu  ******************************************************************************/
1436*91f16700Schasinglulu void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
1437*91f16700Schasinglulu {
1438*91f16700Schasinglulu 	cpu_context_t *ctx;
1439*91f16700Schasinglulu 	el3_state_t *state;
1440*91f16700Schasinglulu 
1441*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1442*91f16700Schasinglulu 	assert(ctx != NULL);
1443*91f16700Schasinglulu 
1444*91f16700Schasinglulu 	/* Populate EL3 state so that ERET jumps to the correct entry */
1445*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
1446*91f16700Schasinglulu 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1447*91f16700Schasinglulu }
1448*91f16700Schasinglulu 
1449*91f16700Schasinglulu /*******************************************************************************
1450*91f16700Schasinglulu  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
1451*91f16700Schasinglulu  * pertaining to the given security state
1452*91f16700Schasinglulu  ******************************************************************************/
1453*91f16700Schasinglulu void cm_set_elr_spsr_el3(uint32_t security_state,
1454*91f16700Schasinglulu 			uintptr_t entrypoint, uint32_t spsr)
1455*91f16700Schasinglulu {
1456*91f16700Schasinglulu 	cpu_context_t *ctx;
1457*91f16700Schasinglulu 	el3_state_t *state;
1458*91f16700Schasinglulu 
1459*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1460*91f16700Schasinglulu 	assert(ctx != NULL);
1461*91f16700Schasinglulu 
1462*91f16700Schasinglulu 	/* Populate EL3 state so that ERET jumps to the correct entry */
1463*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
1464*91f16700Schasinglulu 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1465*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
1466*91f16700Schasinglulu }
1467*91f16700Schasinglulu 
1468*91f16700Schasinglulu /*******************************************************************************
1469*91f16700Schasinglulu  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
1470*91f16700Schasinglulu  * pertaining to the given security state using the value and bit position
1471*91f16700Schasinglulu  * specified in the parameters. It preserves all other bits.
1472*91f16700Schasinglulu  ******************************************************************************/
1473*91f16700Schasinglulu void cm_write_scr_el3_bit(uint32_t security_state,
1474*91f16700Schasinglulu 			  uint32_t bit_pos,
1475*91f16700Schasinglulu 			  uint32_t value)
1476*91f16700Schasinglulu {
1477*91f16700Schasinglulu 	cpu_context_t *ctx;
1478*91f16700Schasinglulu 	el3_state_t *state;
1479*91f16700Schasinglulu 	u_register_t scr_el3;
1480*91f16700Schasinglulu 
1481*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1482*91f16700Schasinglulu 	assert(ctx != NULL);
1483*91f16700Schasinglulu 
1484*91f16700Schasinglulu 	/* Ensure that the bit position is a valid one */
1485*91f16700Schasinglulu 	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
1486*91f16700Schasinglulu 
1487*91f16700Schasinglulu 	/* Ensure that the 'value' is only a bit wide */
1488*91f16700Schasinglulu 	assert(value <= 1U);
1489*91f16700Schasinglulu 
1490*91f16700Schasinglulu 	/*
1491*91f16700Schasinglulu 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
1492*91f16700Schasinglulu 	 * and set it to its new value.
1493*91f16700Schasinglulu 	 */
1494*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
1495*91f16700Schasinglulu 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1496*91f16700Schasinglulu 	scr_el3 &= ~(1UL << bit_pos);
1497*91f16700Schasinglulu 	scr_el3 |= (u_register_t)value << bit_pos;
1498*91f16700Schasinglulu 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
1499*91f16700Schasinglulu }
1500*91f16700Schasinglulu 
1501*91f16700Schasinglulu /*******************************************************************************
1502*91f16700Schasinglulu  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
1503*91f16700Schasinglulu  * given security state.
1504*91f16700Schasinglulu  ******************************************************************************/
1505*91f16700Schasinglulu u_register_t cm_get_scr_el3(uint32_t security_state)
1506*91f16700Schasinglulu {
1507*91f16700Schasinglulu 	cpu_context_t *ctx;
1508*91f16700Schasinglulu 	el3_state_t *state;
1509*91f16700Schasinglulu 
1510*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1511*91f16700Schasinglulu 	assert(ctx != NULL);
1512*91f16700Schasinglulu 
1513*91f16700Schasinglulu 	/* Populate EL3 state so that ERET jumps to the correct entry */
1514*91f16700Schasinglulu 	state = get_el3state_ctx(ctx);
1515*91f16700Schasinglulu 	return read_ctx_reg(state, CTX_SCR_EL3);
1516*91f16700Schasinglulu }
1517*91f16700Schasinglulu 
1518*91f16700Schasinglulu /*******************************************************************************
1519*91f16700Schasinglulu  * This function is used to program the context that's used for exception
1520*91f16700Schasinglulu  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
1521*91f16700Schasinglulu  * the required security state
1522*91f16700Schasinglulu  ******************************************************************************/
1523*91f16700Schasinglulu void cm_set_next_eret_context(uint32_t security_state)
1524*91f16700Schasinglulu {
1525*91f16700Schasinglulu 	cpu_context_t *ctx;
1526*91f16700Schasinglulu 
1527*91f16700Schasinglulu 	ctx = cm_get_context(security_state);
1528*91f16700Schasinglulu 	assert(ctx != NULL);
1529*91f16700Schasinglulu 
1530*91f16700Schasinglulu 	cm_set_next_context(ctx);
1531*91f16700Schasinglulu }
1532