xref: /arm-trusted-firmware/include/arch/aarch64/el2_common_macros.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu */
6*91f16700Schasinglulu
7*91f16700Schasinglulu#ifndef EL2_COMMON_MACROS_S
8*91f16700Schasinglulu#define EL2_COMMON_MACROS_S
9*91f16700Schasinglulu
10*91f16700Schasinglulu#include <arch.h>
11*91f16700Schasinglulu#include <asm_macros.S>
12*91f16700Schasinglulu#include <context.h>
13*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h>
14*91f16700Schasinglulu
15*91f16700Schasinglulu#include <platform_def.h>
16*91f16700Schasinglulu
17*91f16700Schasinglulu	/*
18*91f16700Schasinglulu	 * Helper macro to initialise system registers at EL2.
19*91f16700Schasinglulu	 */
20*91f16700Schasinglulu	.macro el2_arch_init_common
21*91f16700Schasinglulu
22*91f16700Schasinglulu	/* ---------------------------------------------------------------------
23*91f16700Schasinglulu	 * SCTLR_EL2 has already been initialised - read current value before
24*91f16700Schasinglulu	 * modifying.
25*91f16700Schasinglulu	 *
26*91f16700Schasinglulu	 * SCTLR_EL2.I: Enable the instruction cache.
27*91f16700Schasinglulu	 *
28*91f16700Schasinglulu	 * SCTLR_EL2.SA: Enable Stack Alignment check. A SP alignment fault
29*91f16700Schasinglulu	 *  exception is generated if a load or store instruction executed at
30*91f16700Schasinglulu	 *  EL2 uses the SP as the base address and the SP is not aligned to a
31*91f16700Schasinglulu	 *  16-byte boundary.
32*91f16700Schasinglulu	 *
33*91f16700Schasinglulu	 * SCTLR_EL2.A: Enable Alignment fault checking. All instructions that
34*91f16700Schasinglulu	 *  load or store one or more registers have an alignment check that the
35*91f16700Schasinglulu	 *  address being accessed is aligned to the size of the data element(s)
36*91f16700Schasinglulu	 *  being accessed.
37*91f16700Schasinglulu	 * ---------------------------------------------------------------------
38*91f16700Schasinglulu	 */
39*91f16700Schasinglulu	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
40*91f16700Schasinglulu	mrs	x0, sctlr_el2
41*91f16700Schasinglulu	orr	x0, x0, x1
42*91f16700Schasinglulu	msr	sctlr_el2, x0
43*91f16700Schasinglulu	isb
44*91f16700Schasinglulu
45*91f16700Schasinglulu	/* ---------------------------------------------------------------------
46*91f16700Schasinglulu	 * Initialise HCR_EL2, setting all fields rather than relying on HW.
47*91f16700Schasinglulu	 * All fields are architecturally UNKNOWN on reset. The following fields
48*91f16700Schasinglulu	 * do not change during the TF lifetime. The remaining fields are set to
49*91f16700Schasinglulu	 * zero here but are updated ahead of transitioning to a lower EL in the
50*91f16700Schasinglulu	 * function cm_init_context_common().
51*91f16700Schasinglulu	 *
52*91f16700Schasinglulu	 * HCR_EL2.TWE: Set to zero so that execution of WFE instructions at
53*91f16700Schasinglulu	 *  EL2, EL1 and EL0 are not trapped to EL2.
54*91f16700Schasinglulu	 *
55*91f16700Schasinglulu	 * HCR_EL2.TWI: Set to zero so that execution of WFI instructions at
56*91f16700Schasinglulu	 *  EL2, EL1 and EL0 are not trapped to EL2.
57*91f16700Schasinglulu	 *
58*91f16700Schasinglulu	 * HCR_EL2.HCD: Set to zero to enable HVC calls at EL1 and above,
59*91f16700Schasinglulu	 *  from both Security states and both Execution states.
60*91f16700Schasinglulu	 *
61*91f16700Schasinglulu	 * HCR_EL2.TEA: Set to one to route External Aborts and SError
62*91f16700Schasinglulu	 * Interrupts to EL2 when executing at any EL.
63*91f16700Schasinglulu	 *
64*91f16700Schasinglulu	 * HCR_EL2.{API,APK}: For Armv8.3 pointer authentication feature,
65*91f16700Schasinglulu	 * disable traps to EL2 when accessing key registers or using
66*91f16700Schasinglulu	 * pointer authentication instructions from lower ELs.
67*91f16700Schasinglulu	 * ---------------------------------------------------------------------
68*91f16700Schasinglulu	 */
69*91f16700Schasinglulu	mov_imm	x0, ((HCR_RESET_VAL | HCR_TEA_BIT) \
70*91f16700Schasinglulu			& ~(HCR_TWE_BIT | HCR_TWI_BIT | HCR_HCD_BIT))
71*91f16700Schasinglulu#if CTX_INCLUDE_PAUTH_REGS
72*91f16700Schasinglulu	/*
73*91f16700Schasinglulu	 * If the pointer authentication registers are saved during world
74*91f16700Schasinglulu	 * switches, enable pointer authentication everywhere, as it is safe to
75*91f16700Schasinglulu	 * do so.
76*91f16700Schasinglulu	 */
77*91f16700Schasinglulu	orr	x0, x0, #(HCR_API_BIT | HCR_APK_BIT)
78*91f16700Schasinglulu#endif  /* CTX_INCLUDE_PAUTH_REGS */
79*91f16700Schasinglulu	msr	hcr_el2, x0
80*91f16700Schasinglulu
81*91f16700Schasinglulu	/* ---------------------------------------------------------------------
82*91f16700Schasinglulu	 * Initialise MDCR_EL2, setting all fields rather than relying on
83*91f16700Schasinglulu	 * hw. Some fields are architecturally UNKNOWN on reset.
84*91f16700Schasinglulu	 *
85*91f16700Schasinglulu	 * MDCR_EL2.TDOSA: Set to zero so that EL2 and EL2 System register
86*91f16700Schasinglulu	 *  access to the powerdown debug registers do not trap to EL2.
87*91f16700Schasinglulu	 *
88*91f16700Schasinglulu	 * MDCR_EL2.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
89*91f16700Schasinglulu	 *  debug registers, other than those registers that are controlled by
90*91f16700Schasinglulu	 *  MDCR_EL2.TDOSA.
91*91f16700Schasinglulu	 *
92*91f16700Schasinglulu	 * MDCR_EL2.TPM: Set to zero so that EL0, EL1, and EL2 System
93*91f16700Schasinglulu	 *  register accesses to all Performance Monitors registers do not trap
94*91f16700Schasinglulu	 *  to EL2.
95*91f16700Schasinglulu	 *
96*91f16700Schasinglulu	 * MDCR_EL2.HPMD: Set to zero so that event counting by the program-
97*91f16700Schasinglulu	 *  mable counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If
98*91f16700Schasinglulu	 *  ARMv8.2 Debug is not implemented this bit does not have any effect
99*91f16700Schasinglulu	 *  on the counters unless there is support for the implementation
100*91f16700Schasinglulu	 *  defined authentication interface
101*91f16700Schasinglulu	 *  ExternalSecureNoninvasiveDebugEnabled().
102*91f16700Schasinglulu	 * ---------------------------------------------------------------------
103*91f16700Schasinglulu	 */
104*91f16700Schasinglulu	mov_imm	x0, ((MDCR_EL2_RESET_VAL | \
105*91f16700Schasinglulu		      MDCR_SPD32(MDCR_SPD32_DISABLE)) \
106*91f16700Schasinglulu		      & ~(MDCR_EL2_HPMD_BIT | MDCR_TDOSA_BIT | \
107*91f16700Schasinglulu		      MDCR_TDA_BIT | MDCR_TPM_BIT))
108*91f16700Schasinglulu
109*91f16700Schasinglulu	msr	mdcr_el2, x0
110*91f16700Schasinglulu
111*91f16700Schasinglulu	/* ---------------------------------------------------------------------
112*91f16700Schasinglulu	 * Initialise PMCR_EL0 setting all fields rather than relying
113*91f16700Schasinglulu	 * on hw. Some fields are architecturally UNKNOWN on reset.
114*91f16700Schasinglulu	 *
115*91f16700Schasinglulu	 * PMCR_EL0.DP: Set to one so that the cycle counter,
116*91f16700Schasinglulu	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
117*91f16700Schasinglulu	 *
118*91f16700Schasinglulu	 * PMCR_EL0.X: Set to zero to disable export of events.
119*91f16700Schasinglulu	 *
120*91f16700Schasinglulu	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
121*91f16700Schasinglulu	 *  counts on every clock cycle.
122*91f16700Schasinglulu	 * ---------------------------------------------------------------------
123*91f16700Schasinglulu	 */
124*91f16700Schasinglulu	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_DP_BIT) & \
125*91f16700Schasinglulu		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
126*91f16700Schasinglulu
127*91f16700Schasinglulu	msr	pmcr_el0, x0
128*91f16700Schasinglulu
129*91f16700Schasinglulu	/* ---------------------------------------------------------------------
130*91f16700Schasinglulu	 * Enable External Aborts and SError Interrupts now that the exception
131*91f16700Schasinglulu	 * vectors have been setup.
132*91f16700Schasinglulu	 * ---------------------------------------------------------------------
133*91f16700Schasinglulu	 */
134*91f16700Schasinglulu	msr	daifclr, #DAIF_ABT_BIT
135*91f16700Schasinglulu
136*91f16700Schasinglulu	/* ---------------------------------------------------------------------
137*91f16700Schasinglulu	 * Initialise CPTR_EL2, setting all fields rather than relying on hw.
138*91f16700Schasinglulu	 * All fields are architecturally UNKNOWN on reset.
139*91f16700Schasinglulu	 *
140*91f16700Schasinglulu	 * CPTR_EL2.TCPAC: Set to zero so that any accesses to CPACR_EL1 do
141*91f16700Schasinglulu	 * not trap to EL2.
142*91f16700Schasinglulu	 *
143*91f16700Schasinglulu	 * CPTR_EL2.TTA: Set to zero so that System register accesses to the
144*91f16700Schasinglulu	 *  trace registers do not trap to EL2.
145*91f16700Schasinglulu	 *
146*91f16700Schasinglulu	 * CPTR_EL2.TFP: Set to zero so that accesses to the V- or Z- registers
147*91f16700Schasinglulu	 *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
148*91f16700Schasinglulu	 *  do not trap to EL2.
149*91f16700Schasinglulu	 */
150*91f16700Schasinglulu
151*91f16700Schasinglulu	mov_imm x0, (CPTR_EL2_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
152*91f16700Schasinglulu	msr	cptr_el2, x0
153*91f16700Schasinglulu
154*91f16700Schasinglulu	/*
155*91f16700Schasinglulu	 * If Data Independent Timing (DIT) functionality is implemented,
156*91f16700Schasinglulu	 * always enable DIT in EL2
157*91f16700Schasinglulu	 */
158*91f16700Schasinglulu	mrs	x0, id_aa64pfr0_el1
159*91f16700Schasinglulu	ubfx	x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
160*91f16700Schasinglulu	cmp	x0, #ID_AA64PFR0_DIT_SUPPORTED
161*91f16700Schasinglulu	bne	1f
162*91f16700Schasinglulu	mov	x0, #DIT_BIT
163*91f16700Schasinglulu	msr	DIT, x0
164*91f16700Schasinglulu1:
165*91f16700Schasinglulu	.endm
166*91f16700Schasinglulu
167*91f16700Schasinglulu/* -----------------------------------------------------------------------------
168*91f16700Schasinglulu * This is the super set of actions that need to be performed during a cold boot
169*91f16700Schasinglulu * or a warm boot in EL2. This code is shared by BL1 and BL31.
170*91f16700Schasinglulu *
171*91f16700Schasinglulu * This macro will always perform reset handling, architectural initialisations
172*91f16700Schasinglulu * and stack setup. The rest of the actions are optional because they might not
173*91f16700Schasinglulu * be needed, depending on the context in which this macro is called. This is
174*91f16700Schasinglulu * why this macro is parameterised ; each parameter allows to enable/disable
175*91f16700Schasinglulu * some actions.
176*91f16700Schasinglulu *
177*91f16700Schasinglulu *  _init_sctlr:
178*91f16700Schasinglulu *	Whether the macro needs to initialise SCTLR_EL2, including configuring
179*91f16700Schasinglulu *      the endianness of data accesses.
180*91f16700Schasinglulu *
181*91f16700Schasinglulu *  _warm_boot_mailbox:
182*91f16700Schasinglulu *	Whether the macro needs to detect the type of boot (cold/warm). The
183*91f16700Schasinglulu *	detection is based on the platform entrypoint address : if it is zero
184*91f16700Schasinglulu *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
185*91f16700Schasinglulu *	this macro jumps on the platform entrypoint address.
186*91f16700Schasinglulu *
187*91f16700Schasinglulu *  _secondary_cold_boot:
188*91f16700Schasinglulu *	Whether the macro needs to identify the CPU that is calling it: primary
189*91f16700Schasinglulu *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
190*91f16700Schasinglulu *	the platform initialisations, while the secondaries will be put in a
191*91f16700Schasinglulu *	platform-specific state in the meantime.
192*91f16700Schasinglulu *
193*91f16700Schasinglulu *	If the caller knows this macro will only be called by the primary CPU
194*91f16700Schasinglulu *	then this parameter can be defined to 0 to skip this step.
195*91f16700Schasinglulu *
196*91f16700Schasinglulu * _init_memory:
197*91f16700Schasinglulu *	Whether the macro needs to initialise the memory.
198*91f16700Schasinglulu *
199*91f16700Schasinglulu * _init_c_runtime:
200*91f16700Schasinglulu *	Whether the macro needs to initialise the C runtime environment.
201*91f16700Schasinglulu *
202*91f16700Schasinglulu * _exception_vectors:
203*91f16700Schasinglulu *	Address of the exception vectors to program in the VBAR_EL2 register.
204*91f16700Schasinglulu *
205*91f16700Schasinglulu * _pie_fixup_size:
206*91f16700Schasinglulu *	Size of memory region to fixup Global Descriptor Table (GDT).
207*91f16700Schasinglulu *
208*91f16700Schasinglulu *	A non-zero value is expected when firmware needs GDT to be fixed-up.
209*91f16700Schasinglulu *
210*91f16700Schasinglulu * -----------------------------------------------------------------------------
211*91f16700Schasinglulu */
212*91f16700Schasinglulu	.macro el2_entrypoint_common					\
213*91f16700Schasinglulu		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
214*91f16700Schasinglulu		_init_memory, _init_c_runtime, _exception_vectors,	\
215*91f16700Schasinglulu		_pie_fixup_size
216*91f16700Schasinglulu
217*91f16700Schasinglulu	.if \_init_sctlr
218*91f16700Schasinglulu		/* -------------------------------------------------------------
219*91f16700Schasinglulu		 * This is the initialisation of SCTLR_EL2 and so must ensure
220*91f16700Schasinglulu		 * that all fields are explicitly set rather than relying on hw.
221*91f16700Schasinglulu		 * Some fields reset to an IMPLEMENTATION DEFINED value and
222*91f16700Schasinglulu		 * others are architecturally UNKNOWN on reset.
223*91f16700Schasinglulu		 *
224*91f16700Schasinglulu		 * SCTLR.EE: Set the CPU endianness before doing anything that
225*91f16700Schasinglulu		 *  might involve memory reads or writes. Set to zero to select
226*91f16700Schasinglulu		 *  Little Endian.
227*91f16700Schasinglulu		 *
228*91f16700Schasinglulu		 * SCTLR_EL2.WXN: For the EL2 translation regime, this field can
229*91f16700Schasinglulu		 *  force all memory regions that are writeable to be treated as
230*91f16700Schasinglulu		 *  XN (Execute-never). Set to zero so that this control has no
231*91f16700Schasinglulu		 *  effect on memory access permissions.
232*91f16700Schasinglulu		 *
233*91f16700Schasinglulu		 * SCTLR_EL2.SA: Set to zero to disable Stack Alignment check.
234*91f16700Schasinglulu		 *
235*91f16700Schasinglulu		 * SCTLR_EL2.A: Set to zero to disable Alignment fault checking.
236*91f16700Schasinglulu		 *
237*91f16700Schasinglulu		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
238*91f16700Schasinglulu		 *  safe behaviour upon exception entry to EL2.
239*91f16700Schasinglulu		 * -------------------------------------------------------------
240*91f16700Schasinglulu		 */
241*91f16700Schasinglulu		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
242*91f16700Schasinglulu				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
243*91f16700Schasinglulu		msr	sctlr_el2, x0
244*91f16700Schasinglulu		isb
245*91f16700Schasinglulu	.endif /* _init_sctlr */
246*91f16700Schasinglulu
247*91f16700Schasinglulu	.if \_warm_boot_mailbox
248*91f16700Schasinglulu		/* -------------------------------------------------------------
249*91f16700Schasinglulu		 * This code will be executed for both warm and cold resets.
250*91f16700Schasinglulu		 * Now is the time to distinguish between the two.
251*91f16700Schasinglulu		 * Query the platform entrypoint address and if it is not zero
252*91f16700Schasinglulu		 * then it means it is a warm boot so jump to this address.
253*91f16700Schasinglulu		 * -------------------------------------------------------------
254*91f16700Schasinglulu		 */
255*91f16700Schasinglulu		bl	plat_get_my_entrypoint
256*91f16700Schasinglulu		cbz	x0, do_cold_boot
257*91f16700Schasinglulu		br	x0
258*91f16700Schasinglulu
259*91f16700Schasinglulu	do_cold_boot:
260*91f16700Schasinglulu	.endif /* _warm_boot_mailbox */
261*91f16700Schasinglulu
262*91f16700Schasinglulu	.if \_pie_fixup_size
263*91f16700Schasinglulu#if ENABLE_PIE
264*91f16700Schasinglulu		/*
265*91f16700Schasinglulu		 * ------------------------------------------------------------
266*91f16700Schasinglulu		 * If PIE is enabled fixup the Global descriptor Table only
267*91f16700Schasinglulu		 * once during primary core cold boot path.
268*91f16700Schasinglulu		 *
269*91f16700Schasinglulu		 * Compile time base address, required for fixup, is calculated
270*91f16700Schasinglulu		 * using "pie_fixup" label present within first page.
271*91f16700Schasinglulu		 * ------------------------------------------------------------
272*91f16700Schasinglulu		 */
273*91f16700Schasinglulu	pie_fixup:
274*91f16700Schasinglulu		ldr	x0, =pie_fixup
275*91f16700Schasinglulu		and	x0, x0, #~(PAGE_SIZE_MASK)
276*91f16700Schasinglulu		mov_imm	x1, \_pie_fixup_size
277*91f16700Schasinglulu		add	x1, x1, x0
278*91f16700Schasinglulu		bl	fixup_gdt_reloc
279*91f16700Schasinglulu#endif /* ENABLE_PIE */
280*91f16700Schasinglulu	.endif /* _pie_fixup_size */
281*91f16700Schasinglulu
282*91f16700Schasinglulu	/* ---------------------------------------------------------------------
283*91f16700Schasinglulu	 * Set the exception vectors.
284*91f16700Schasinglulu	 * ---------------------------------------------------------------------
285*91f16700Schasinglulu	 */
286*91f16700Schasinglulu	adr	x0, \_exception_vectors
287*91f16700Schasinglulu	msr	vbar_el2, x0
288*91f16700Schasinglulu	isb
289*91f16700Schasinglulu
290*91f16700Schasinglulu	/* ---------------------------------------------------------------------
291*91f16700Schasinglulu	 * It is a cold boot.
292*91f16700Schasinglulu	 * Perform any processor specific actions upon reset e.g. cache, TLB
293*91f16700Schasinglulu	 * invalidations etc.
294*91f16700Schasinglulu	 * ---------------------------------------------------------------------
295*91f16700Schasinglulu	 */
296*91f16700Schasinglulu	bl	reset_handler
297*91f16700Schasinglulu
298*91f16700Schasinglulu	el2_arch_init_common
299*91f16700Schasinglulu
300*91f16700Schasinglulu	.if \_secondary_cold_boot
301*91f16700Schasinglulu		/* -------------------------------------------------------------
302*91f16700Schasinglulu		 * Check if this is a primary or secondary CPU cold boot.
303*91f16700Schasinglulu		 * The primary CPU will set up the platform while the
304*91f16700Schasinglulu		 * secondaries are placed in a platform-specific state until the
305*91f16700Schasinglulu		 * primary CPU performs the necessary actions to bring them out
306*91f16700Schasinglulu		 * of that state and allows entry into the OS.
307*91f16700Schasinglulu		 * -------------------------------------------------------------
308*91f16700Schasinglulu		 */
309*91f16700Schasinglulu		bl	plat_is_my_cpu_primary
310*91f16700Schasinglulu		cbnz	w0, do_primary_cold_boot
311*91f16700Schasinglulu
312*91f16700Schasinglulu		/* This is a cold boot on a secondary CPU */
313*91f16700Schasinglulu		bl	plat_secondary_cold_boot_setup
314*91f16700Schasinglulu		/* plat_secondary_cold_boot_setup() is not supposed to return */
315*91f16700Schasinglulu		bl	el2_panic
316*91f16700Schasinglulu	do_primary_cold_boot:
317*91f16700Schasinglulu	.endif /* _secondary_cold_boot */
318*91f16700Schasinglulu
319*91f16700Schasinglulu	/* ---------------------------------------------------------------------
320*91f16700Schasinglulu	 * Initialize memory now. Secondary CPU initialization won't get to this
321*91f16700Schasinglulu	 * point.
322*91f16700Schasinglulu	 * ---------------------------------------------------------------------
323*91f16700Schasinglulu	 */
324*91f16700Schasinglulu
325*91f16700Schasinglulu	.if \_init_memory
326*91f16700Schasinglulu		bl	platform_mem_init
327*91f16700Schasinglulu	.endif /* _init_memory */
328*91f16700Schasinglulu
329*91f16700Schasinglulu	/* ---------------------------------------------------------------------
330*91f16700Schasinglulu	 * Init C runtime environment:
331*91f16700Schasinglulu	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
332*91f16700Schasinglulu	 *       - the .bss section;
333*91f16700Schasinglulu	 *       - the coherent memory section (if any).
334*91f16700Schasinglulu	 *   - Relocate the data section from ROM to RAM, if required.
335*91f16700Schasinglulu	 * ---------------------------------------------------------------------
336*91f16700Schasinglulu	 */
337*91f16700Schasinglulu	.if \_init_c_runtime
338*91f16700Schasinglulu		adrp	x0, __BSS_START__
339*91f16700Schasinglulu		add	x0, x0, :lo12:__BSS_START__
340*91f16700Schasinglulu
341*91f16700Schasinglulu		adrp	x1, __BSS_END__
342*91f16700Schasinglulu		add	x1, x1, :lo12:__BSS_END__
343*91f16700Schasinglulu		sub	x1, x1, x0
344*91f16700Schasinglulu		bl	zeromem
345*91f16700Schasinglulu
346*91f16700Schasinglulu#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && \
347*91f16700Schasinglulu	RESET_TO_BL2 && BL2_IN_XIP_MEM)
348*91f16700Schasinglulu		adrp	x0, __DATA_RAM_START__
349*91f16700Schasinglulu		add	x0, x0, :lo12:__DATA_RAM_START__
350*91f16700Schasinglulu		adrp	x1, __DATA_ROM_START__
351*91f16700Schasinglulu		add	x1, x1, :lo12:__DATA_ROM_START__
352*91f16700Schasinglulu		adrp	x2, __DATA_RAM_END__
353*91f16700Schasinglulu		add	x2, x2, :lo12:__DATA_RAM_END__
354*91f16700Schasinglulu		sub	x2, x2, x0
355*91f16700Schasinglulu		bl	memcpy16
356*91f16700Schasinglulu#endif
357*91f16700Schasinglulu	.endif /* _init_c_runtime */
358*91f16700Schasinglulu
359*91f16700Schasinglulu	/* ---------------------------------------------------------------------
360*91f16700Schasinglulu	 * Use SP_EL0 for the C runtime stack.
361*91f16700Schasinglulu	 * ---------------------------------------------------------------------
362*91f16700Schasinglulu	 */
363*91f16700Schasinglulu	msr	spsel, #0
364*91f16700Schasinglulu
365*91f16700Schasinglulu	/* ---------------------------------------------------------------------
366*91f16700Schasinglulu	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
367*91f16700Schasinglulu	 * the MMU is enabled. There is no risk of reading stale stack memory
368*91f16700Schasinglulu	 * after enabling the MMU as only the primary CPU is running at the
369*91f16700Schasinglulu	 * moment.
370*91f16700Schasinglulu	 * ---------------------------------------------------------------------
371*91f16700Schasinglulu	 */
372*91f16700Schasinglulu	bl	plat_set_my_stack
373*91f16700Schasinglulu
374*91f16700Schasinglulu#if STACK_PROTECTOR_ENABLED
375*91f16700Schasinglulu	.if \_init_c_runtime
376*91f16700Schasinglulu	bl	update_stack_protector_canary
377*91f16700Schasinglulu	.endif /* _init_c_runtime */
378*91f16700Schasinglulu#endif
379*91f16700Schasinglulu	.endm
380*91f16700Schasinglulu
381*91f16700Schasinglulu	.macro	apply_at_speculative_wa
382*91f16700Schasinglulu#if ERRATA_SPECULATIVE_AT
383*91f16700Schasinglulu	/*
384*91f16700Schasinglulu	 * This function expects x30 has been saved.
385*91f16700Schasinglulu	 * Also, save x29 which will be used in the called function.
386*91f16700Schasinglulu	 */
387*91f16700Schasinglulu	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
388*91f16700Schasinglulu	bl	save_and_update_ptw_el1_sys_regs
389*91f16700Schasinglulu	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
390*91f16700Schasinglulu#endif
391*91f16700Schasinglulu	.endm
392*91f16700Schasinglulu
393*91f16700Schasinglulu	.macro	restore_ptw_el1_sys_regs
394*91f16700Schasinglulu#if ERRATA_SPECULATIVE_AT
395*91f16700Schasinglulu	/* -----------------------------------------------------------
396*91f16700Schasinglulu	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
397*91f16700Schasinglulu	 * to ensure that page table walk is not enabled until
398*91f16700Schasinglulu	 * restoration of all EL1 system registers. TCR_EL1 register
399*91f16700Schasinglulu	 * should be updated at the end which restores previous page
400*91f16700Schasinglulu	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
401*91f16700Schasinglulu	 * ensures that CPU does below steps in order.
402*91f16700Schasinglulu	 *
403*91f16700Schasinglulu	 * 1. Ensure all other system registers are written before
404*91f16700Schasinglulu	 *    updating SCTLR_EL1 using ISB.
405*91f16700Schasinglulu	 * 2. Restore SCTLR_EL1 register.
406*91f16700Schasinglulu	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
407*91f16700Schasinglulu	 * 4. Restore TCR_EL1 register.
408*91f16700Schasinglulu	 * -----------------------------------------------------------
409*91f16700Schasinglulu	 */
410*91f16700Schasinglulu	isb
411*91f16700Schasinglulu	ldp	x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
412*91f16700Schasinglulu	msr	sctlr_el1, x28
413*91f16700Schasinglulu	isb
414*91f16700Schasinglulu	msr	tcr_el1, x29
415*91f16700Schasinglulu#endif
416*91f16700Schasinglulu	.endm
417*91f16700Schasinglulu
418*91f16700Schasinglulu#endif /* EL2_COMMON_MACROS_S */
419