xref: /arm-trusted-firmware/include/arch/aarch64/el3_common_macros.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu */
6*91f16700Schasinglulu
7*91f16700Schasinglulu#ifndef EL3_COMMON_MACROS_S
8*91f16700Schasinglulu#define EL3_COMMON_MACROS_S
9*91f16700Schasinglulu
10*91f16700Schasinglulu#include <arch.h>
11*91f16700Schasinglulu#include <asm_macros.S>
12*91f16700Schasinglulu#include <assert_macros.S>
13*91f16700Schasinglulu#include <context.h>
14*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h>
15*91f16700Schasinglulu
16*91f16700Schasinglulu	/*
17*91f16700Schasinglulu	 * Helper macro to initialise EL3 registers we care about.
18*91f16700Schasinglulu	 */
19*91f16700Schasinglulu	.macro el3_arch_init_common
20*91f16700Schasinglulu	/* ---------------------------------------------------------------------
21*91f16700Schasinglulu	 * SCTLR_EL3 has already been initialised - read current value before
22*91f16700Schasinglulu	 * modifying.
23*91f16700Schasinglulu	 *
24*91f16700Schasinglulu	 * SCTLR_EL3.I: Enable the instruction cache.
25*91f16700Schasinglulu	 *
26*91f16700Schasinglulu	 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
27*91f16700Schasinglulu	 *  exception is generated if a load or store instruction executed at
28*91f16700Schasinglulu	 *  EL3 uses the SP as the base address and the SP is not aligned to a
29*91f16700Schasinglulu	 *  16-byte boundary.
30*91f16700Schasinglulu	 *
31*91f16700Schasinglulu	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
32*91f16700Schasinglulu	 *  load or store one or more registers have an alignment check that the
33*91f16700Schasinglulu	 *  address being accessed is aligned to the size of the data element(s)
34*91f16700Schasinglulu	 *  being accessed.
35*91f16700Schasinglulu	 * ---------------------------------------------------------------------
36*91f16700Schasinglulu	 */
37*91f16700Schasinglulu	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
38*91f16700Schasinglulu	mrs	x0, sctlr_el3
39*91f16700Schasinglulu	orr	x0, x0, x1
40*91f16700Schasinglulu	msr	sctlr_el3, x0
41*91f16700Schasinglulu	isb
42*91f16700Schasinglulu
43*91f16700Schasinglulu#ifdef IMAGE_BL31
44*91f16700Schasinglulu	/* ---------------------------------------------------------------------
45*91f16700Schasinglulu	 * Initialise the per-cpu cache pointer to the CPU.
46*91f16700Schasinglulu	 * This is done early to enable crash reporting to have access to crash
47*91f16700Schasinglulu	 * stack. Since crash reporting depends on cpu_data to report the
48*91f16700Schasinglulu	 * unhandled exception, not doing so can lead to recursive exceptions
49*91f16700Schasinglulu	 * due to a NULL TPIDR_EL3.
50*91f16700Schasinglulu	 * ---------------------------------------------------------------------
51*91f16700Schasinglulu	 */
52*91f16700Schasinglulu	bl	init_cpu_data_ptr
53*91f16700Schasinglulu#endif /* IMAGE_BL31 */
54*91f16700Schasinglulu
55*91f16700Schasinglulu	/* ---------------------------------------------------------------------
56*91f16700Schasinglulu	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
57*91f16700Schasinglulu	 * All fields are architecturally UNKNOWN on reset. The following fields
58*91f16700Schasinglulu	 * do not change during the TF lifetime. The remaining fields are set to
59*91f16700Schasinglulu	 * zero here but are updated ahead of transitioning to a lower EL in the
60*91f16700Schasinglulu	 * function cm_init_context_common().
61*91f16700Schasinglulu	 *
62*91f16700Schasinglulu	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
63*91f16700Schasinglulu	 *  Non-secure memory.
64*91f16700Schasinglulu	 *
65*91f16700Schasinglulu	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
66*91f16700Schasinglulu	 *  to EL3 when executing at any EL.
67*91f16700Schasinglulu	 * ---------------------------------------------------------------------
68*91f16700Schasinglulu	 */
69*91f16700Schasinglulu	mov_imm	x0, (SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT)
70*91f16700Schasinglulu	msr	scr_el3, x0
71*91f16700Schasinglulu
72*91f16700Schasinglulu	/* ---------------------------------------------------------------------
73*91f16700Schasinglulu	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
74*91f16700Schasinglulu	 * Some fields are architecturally UNKNOWN on reset.
75*91f16700Schasinglulu	 *
76*91f16700Schasinglulu	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
77*91f16700Schasinglulu	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
78*91f16700Schasinglulu	 *  disabled from all ELs in Secure state.
79*91f16700Schasinglulu	 *
80*91f16700Schasinglulu	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
81*91f16700Schasinglulu	 *  privileged debug from S-EL1.
82*91f16700Schasinglulu	 *
83*91f16700Schasinglulu	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
84*91f16700Schasinglulu	 *  access to the powerdown debug registers do not trap to EL3.
85*91f16700Schasinglulu	 *
86*91f16700Schasinglulu	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
87*91f16700Schasinglulu	 *  debug registers, other than those registers that are controlled by
88*91f16700Schasinglulu	 *  MDCR_EL3.TDOSA.
89*91f16700Schasinglulu	 */
90*91f16700Schasinglulu	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
91*91f16700Schasinglulu		      MDCR_SPD32(MDCR_SPD32_DISABLE)) & \
92*91f16700Schasinglulu		    ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT))
93*91f16700Schasinglulu
94*91f16700Schasinglulu	msr	mdcr_el3, x0
95*91f16700Schasinglulu
96*91f16700Schasinglulu	/* ---------------------------------------------------------------------
97*91f16700Schasinglulu	 * Enable External Aborts and SError Interrupts now that the exception
98*91f16700Schasinglulu	 * vectors have been setup.
99*91f16700Schasinglulu	 * ---------------------------------------------------------------------
100*91f16700Schasinglulu	 */
101*91f16700Schasinglulu	msr	daifclr, #DAIF_ABT_BIT
102*91f16700Schasinglulu
103*91f16700Schasinglulu	/* ---------------------------------------------------------------------
104*91f16700Schasinglulu	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
105*91f16700Schasinglulu	 * All fields are architecturally UNKNOWN on reset.
106*91f16700Schasinglulu	 * ---------------------------------------------------------------------
107*91f16700Schasinglulu	 */
108*91f16700Schasinglulu	mov_imm x0, CPTR_EL3_RESET_VAL
109*91f16700Schasinglulu	msr	cptr_el3, x0
110*91f16700Schasinglulu
111*91f16700Schasinglulu	/*
112*91f16700Schasinglulu	 * If Data Independent Timing (DIT) functionality is implemented,
113*91f16700Schasinglulu	 * always enable DIT in EL3.
114*91f16700Schasinglulu	 * First assert that the FEAT_DIT build flag matches the feature id
115*91f16700Schasinglulu	 * register value for DIT.
116*91f16700Schasinglulu	 */
117*91f16700Schasinglulu#if ENABLE_FEAT_DIT
118*91f16700Schasinglulu#if ENABLE_ASSERTIONS || ENABLE_FEAT_DIT > 1
119*91f16700Schasinglulu	mrs	x0, id_aa64pfr0_el1
120*91f16700Schasinglulu	ubfx	x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
121*91f16700Schasinglulu#if ENABLE_FEAT_DIT > 1
122*91f16700Schasinglulu	cbz	x0, 1f
123*91f16700Schasinglulu#else
124*91f16700Schasinglulu	cmp	x0, #ID_AA64PFR0_DIT_SUPPORTED
125*91f16700Schasinglulu	ASM_ASSERT(eq)
126*91f16700Schasinglulu#endif
127*91f16700Schasinglulu
128*91f16700Schasinglulu#endif /* ENABLE_ASSERTIONS */
129*91f16700Schasinglulu	mov	x0, #DIT_BIT
130*91f16700Schasinglulu	msr	DIT, x0
131*91f16700Schasinglulu1:
132*91f16700Schasinglulu#endif
133*91f16700Schasinglulu	.endm
134*91f16700Schasinglulu
135*91f16700Schasinglulu/* -----------------------------------------------------------------------------
136*91f16700Schasinglulu * This is the super set of actions that need to be performed during a cold boot
137*91f16700Schasinglulu * or a warm boot in EL3. This code is shared by BL1 and BL31.
138*91f16700Schasinglulu *
139*91f16700Schasinglulu * This macro will always perform reset handling, architectural initialisations
140*91f16700Schasinglulu * and stack setup. The rest of the actions are optional because they might not
141*91f16700Schasinglulu * be needed, depending on the context in which this macro is called. This is
142*91f16700Schasinglulu * why this macro is parameterised ; each parameter allows to enable/disable
143*91f16700Schasinglulu * some actions.
144*91f16700Schasinglulu *
145*91f16700Schasinglulu *  _init_sctlr:
146*91f16700Schasinglulu *	Whether the macro needs to initialise SCTLR_EL3, including configuring
147*91f16700Schasinglulu *      the endianness of data accesses.
148*91f16700Schasinglulu *
149*91f16700Schasinglulu *  _warm_boot_mailbox:
150*91f16700Schasinglulu *	Whether the macro needs to detect the type of boot (cold/warm). The
151*91f16700Schasinglulu *	detection is based on the platform entrypoint address : if it is zero
152*91f16700Schasinglulu *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
153*91f16700Schasinglulu *	this macro jumps on the platform entrypoint address.
154*91f16700Schasinglulu *
155*91f16700Schasinglulu *  _secondary_cold_boot:
156*91f16700Schasinglulu *	Whether the macro needs to identify the CPU that is calling it: primary
157*91f16700Schasinglulu *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
158*91f16700Schasinglulu *	the platform initialisations, while the secondaries will be put in a
159*91f16700Schasinglulu *	platform-specific state in the meantime.
160*91f16700Schasinglulu *
161*91f16700Schasinglulu *	If the caller knows this macro will only be called by the primary CPU
162*91f16700Schasinglulu *	then this parameter can be defined to 0 to skip this step.
163*91f16700Schasinglulu *
164*91f16700Schasinglulu * _init_memory:
165*91f16700Schasinglulu *	Whether the macro needs to initialise the memory.
166*91f16700Schasinglulu *
167*91f16700Schasinglulu * _init_c_runtime:
168*91f16700Schasinglulu *	Whether the macro needs to initialise the C runtime environment.
169*91f16700Schasinglulu *
170*91f16700Schasinglulu * _exception_vectors:
171*91f16700Schasinglulu *	Address of the exception vectors to program in the VBAR_EL3 register.
172*91f16700Schasinglulu *
173*91f16700Schasinglulu * _pie_fixup_size:
174*91f16700Schasinglulu *	Size of memory region to fixup Global Descriptor Table (GDT).
175*91f16700Schasinglulu *
176*91f16700Schasinglulu *	A non-zero value is expected when firmware needs GDT to be fixed-up.
177*91f16700Schasinglulu *
178*91f16700Schasinglulu * -----------------------------------------------------------------------------
179*91f16700Schasinglulu */
180*91f16700Schasinglulu	.macro el3_entrypoint_common					\
181*91f16700Schasinglulu		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
182*91f16700Schasinglulu		_init_memory, _init_c_runtime, _exception_vectors,	\
183*91f16700Schasinglulu		_pie_fixup_size
184*91f16700Schasinglulu
185*91f16700Schasinglulu	.if \_init_sctlr
186*91f16700Schasinglulu		/* -------------------------------------------------------------
187*91f16700Schasinglulu		 * This is the initialisation of SCTLR_EL3 and so must ensure
188*91f16700Schasinglulu		 * that all fields are explicitly set rather than relying on hw.
189*91f16700Schasinglulu		 * Some fields reset to an IMPLEMENTATION DEFINED value and
190*91f16700Schasinglulu		 * others are architecturally UNKNOWN on reset.
191*91f16700Schasinglulu		 *
192*91f16700Schasinglulu		 * SCTLR.EE: Set the CPU endianness before doing anything that
193*91f16700Schasinglulu		 *  might involve memory reads or writes. Set to zero to select
194*91f16700Schasinglulu		 *  Little Endian.
195*91f16700Schasinglulu		 *
196*91f16700Schasinglulu		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
197*91f16700Schasinglulu		 *  force all memory regions that are writeable to be treated as
198*91f16700Schasinglulu		 *  XN (Execute-never). Set to zero so that this control has no
199*91f16700Schasinglulu		 *  effect on memory access permissions.
200*91f16700Schasinglulu		 *
201*91f16700Schasinglulu		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
202*91f16700Schasinglulu		 *
203*91f16700Schasinglulu		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
204*91f16700Schasinglulu		 *
205*91f16700Schasinglulu		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
206*91f16700Schasinglulu		 *  safe behaviour upon exception entry to EL3.
207*91f16700Schasinglulu		 * -------------------------------------------------------------
208*91f16700Schasinglulu		 */
209*91f16700Schasinglulu		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
210*91f16700Schasinglulu				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
211*91f16700Schasinglulu#if ENABLE_FEAT_RAS
212*91f16700Schasinglulu		/* If FEAT_RAS is present assume FEAT_IESB is also present */
213*91f16700Schasinglulu		orr	x0, x0, #SCTLR_IESB_BIT
214*91f16700Schasinglulu#endif
215*91f16700Schasinglulu		msr	sctlr_el3, x0
216*91f16700Schasinglulu		isb
217*91f16700Schasinglulu	.endif /* _init_sctlr */
218*91f16700Schasinglulu
219*91f16700Schasinglulu	.if \_warm_boot_mailbox
220*91f16700Schasinglulu		/* -------------------------------------------------------------
221*91f16700Schasinglulu		 * This code will be executed for both warm and cold resets.
222*91f16700Schasinglulu		 * Now is the time to distinguish between the two.
223*91f16700Schasinglulu		 * Query the platform entrypoint address and if it is not zero
224*91f16700Schasinglulu		 * then it means it is a warm boot so jump to this address.
225*91f16700Schasinglulu		 * -------------------------------------------------------------
226*91f16700Schasinglulu		 */
227*91f16700Schasinglulu		bl	plat_get_my_entrypoint
228*91f16700Schasinglulu		cbz	x0, do_cold_boot
229*91f16700Schasinglulu		br	x0
230*91f16700Schasinglulu
231*91f16700Schasinglulu	do_cold_boot:
232*91f16700Schasinglulu	.endif /* _warm_boot_mailbox */
233*91f16700Schasinglulu
234*91f16700Schasinglulu	.if \_pie_fixup_size
235*91f16700Schasinglulu#if ENABLE_PIE
236*91f16700Schasinglulu		/*
237*91f16700Schasinglulu		 * ------------------------------------------------------------
238*91f16700Schasinglulu		 * If PIE is enabled fixup the Global descriptor Table only
239*91f16700Schasinglulu		 * once during primary core cold boot path.
240*91f16700Schasinglulu		 *
241*91f16700Schasinglulu		 * Compile time base address, required for fixup, is calculated
242*91f16700Schasinglulu		 * using "pie_fixup" label present within first page.
243*91f16700Schasinglulu		 * ------------------------------------------------------------
244*91f16700Schasinglulu		 */
245*91f16700Schasinglulu	pie_fixup:
246*91f16700Schasinglulu		ldr	x0, =pie_fixup
247*91f16700Schasinglulu		and	x0, x0, #~(PAGE_SIZE_MASK)
248*91f16700Schasinglulu		mov_imm	x1, \_pie_fixup_size
249*91f16700Schasinglulu		add	x1, x1, x0
250*91f16700Schasinglulu		bl	fixup_gdt_reloc
251*91f16700Schasinglulu#endif /* ENABLE_PIE */
252*91f16700Schasinglulu	.endif /* _pie_fixup_size */
253*91f16700Schasinglulu
254*91f16700Schasinglulu	/* ---------------------------------------------------------------------
255*91f16700Schasinglulu	 * Set the exception vectors.
256*91f16700Schasinglulu	 * ---------------------------------------------------------------------
257*91f16700Schasinglulu	 */
258*91f16700Schasinglulu	adr	x0, \_exception_vectors
259*91f16700Schasinglulu	msr	vbar_el3, x0
260*91f16700Schasinglulu	isb
261*91f16700Schasinglulu
262*91f16700Schasinglulu#if !(defined(IMAGE_BL2) && ENABLE_RME)
263*91f16700Schasinglulu	/* ---------------------------------------------------------------------
264*91f16700Schasinglulu	 * It is a cold boot.
265*91f16700Schasinglulu	 * Perform any processor specific actions upon reset e.g. cache, TLB
266*91f16700Schasinglulu	 * invalidations etc.
267*91f16700Schasinglulu	 * ---------------------------------------------------------------------
268*91f16700Schasinglulu	 */
269*91f16700Schasinglulu	bl	reset_handler
270*91f16700Schasinglulu#endif
271*91f16700Schasinglulu
272*91f16700Schasinglulu	el3_arch_init_common
273*91f16700Schasinglulu
274*91f16700Schasinglulu	.if \_secondary_cold_boot
275*91f16700Schasinglulu		/* -------------------------------------------------------------
276*91f16700Schasinglulu		 * Check if this is a primary or secondary CPU cold boot.
277*91f16700Schasinglulu		 * The primary CPU will set up the platform while the
278*91f16700Schasinglulu		 * secondaries are placed in a platform-specific state until the
279*91f16700Schasinglulu		 * primary CPU performs the necessary actions to bring them out
280*91f16700Schasinglulu		 * of that state and allows entry into the OS.
281*91f16700Schasinglulu		 * -------------------------------------------------------------
282*91f16700Schasinglulu		 */
283*91f16700Schasinglulu		bl	plat_is_my_cpu_primary
284*91f16700Schasinglulu		cbnz	w0, do_primary_cold_boot
285*91f16700Schasinglulu
286*91f16700Schasinglulu		/* This is a cold boot on a secondary CPU */
287*91f16700Schasinglulu		bl	plat_secondary_cold_boot_setup
288*91f16700Schasinglulu		/* plat_secondary_cold_boot_setup() is not supposed to return */
289*91f16700Schasinglulu		bl	el3_panic
290*91f16700Schasinglulu
291*91f16700Schasinglulu	do_primary_cold_boot:
292*91f16700Schasinglulu	.endif /* _secondary_cold_boot */
293*91f16700Schasinglulu
294*91f16700Schasinglulu	/* ---------------------------------------------------------------------
295*91f16700Schasinglulu	 * Initialize memory now. Secondary CPU initialization won't get to this
296*91f16700Schasinglulu	 * point.
297*91f16700Schasinglulu	 * ---------------------------------------------------------------------
298*91f16700Schasinglulu	 */
299*91f16700Schasinglulu
300*91f16700Schasinglulu	.if \_init_memory
301*91f16700Schasinglulu		bl	platform_mem_init
302*91f16700Schasinglulu	.endif /* _init_memory */
303*91f16700Schasinglulu
304*91f16700Schasinglulu	/* ---------------------------------------------------------------------
305*91f16700Schasinglulu	 * Init C runtime environment:
306*91f16700Schasinglulu	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
307*91f16700Schasinglulu	 *       - the .bss section;
308*91f16700Schasinglulu	 *       - the coherent memory section (if any).
309*91f16700Schasinglulu	 *   - Relocate the data section from ROM to RAM, if required.
310*91f16700Schasinglulu	 * ---------------------------------------------------------------------
311*91f16700Schasinglulu	 */
312*91f16700Schasinglulu	.if \_init_c_runtime
313*91f16700Schasinglulu#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
314*91f16700Schasinglulu	((RESET_TO_BL2 && BL2_INV_DCACHE) || ENABLE_RME))
315*91f16700Schasinglulu		/* -------------------------------------------------------------
316*91f16700Schasinglulu		 * Invalidate the RW memory used by the BL31 image. This
317*91f16700Schasinglulu		 * includes the data and NOBITS sections. This is done to
318*91f16700Schasinglulu		 * safeguard against possible corruption of this memory by
319*91f16700Schasinglulu		 * dirty cache lines in a system cache as a result of use by
320*91f16700Schasinglulu		 * an earlier boot loader stage. If PIE is enabled however,
321*91f16700Schasinglulu		 * RO sections including the GOT may be modified during
322*91f16700Schasinglulu                 * pie fixup. Therefore, to be on the safe side, invalidate
323*91f16700Schasinglulu		 * the entire image region if PIE is enabled.
324*91f16700Schasinglulu		 * -------------------------------------------------------------
325*91f16700Schasinglulu		 */
326*91f16700Schasinglulu#if ENABLE_PIE
327*91f16700Schasinglulu#if SEPARATE_CODE_AND_RODATA
328*91f16700Schasinglulu		adrp	x0, __TEXT_START__
329*91f16700Schasinglulu		add	x0, x0, :lo12:__TEXT_START__
330*91f16700Schasinglulu#else
331*91f16700Schasinglulu		adrp	x0, __RO_START__
332*91f16700Schasinglulu		add	x0, x0, :lo12:__RO_START__
333*91f16700Schasinglulu#endif /* SEPARATE_CODE_AND_RODATA */
334*91f16700Schasinglulu#else
335*91f16700Schasinglulu		adrp	x0, __RW_START__
336*91f16700Schasinglulu		add	x0, x0, :lo12:__RW_START__
337*91f16700Schasinglulu#endif /* ENABLE_PIE */
338*91f16700Schasinglulu		adrp	x1, __RW_END__
339*91f16700Schasinglulu		add	x1, x1, :lo12:__RW_END__
340*91f16700Schasinglulu		sub	x1, x1, x0
341*91f16700Schasinglulu		bl	inv_dcache_range
342*91f16700Schasinglulu#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
343*91f16700Schasinglulu		adrp	x0, __NOBITS_START__
344*91f16700Schasinglulu		add	x0, x0, :lo12:__NOBITS_START__
345*91f16700Schasinglulu		adrp	x1, __NOBITS_END__
346*91f16700Schasinglulu		add	x1, x1, :lo12:__NOBITS_END__
347*91f16700Schasinglulu		sub	x1, x1, x0
348*91f16700Schasinglulu		bl	inv_dcache_range
349*91f16700Schasinglulu#endif
350*91f16700Schasinglulu#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
351*91f16700Schasinglulu		adrp	x0, __BL2_NOLOAD_START__
352*91f16700Schasinglulu		add	x0, x0, :lo12:__BL2_NOLOAD_START__
353*91f16700Schasinglulu		adrp	x1, __BL2_NOLOAD_END__
354*91f16700Schasinglulu		add	x1, x1, :lo12:__BL2_NOLOAD_END__
355*91f16700Schasinglulu		sub	x1, x1, x0
356*91f16700Schasinglulu		bl	inv_dcache_range
357*91f16700Schasinglulu#endif
358*91f16700Schasinglulu#endif
359*91f16700Schasinglulu		adrp	x0, __BSS_START__
360*91f16700Schasinglulu		add	x0, x0, :lo12:__BSS_START__
361*91f16700Schasinglulu
362*91f16700Schasinglulu		adrp	x1, __BSS_END__
363*91f16700Schasinglulu		add	x1, x1, :lo12:__BSS_END__
364*91f16700Schasinglulu		sub	x1, x1, x0
365*91f16700Schasinglulu		bl	zeromem
366*91f16700Schasinglulu
367*91f16700Schasinglulu#if USE_COHERENT_MEM
368*91f16700Schasinglulu		adrp	x0, __COHERENT_RAM_START__
369*91f16700Schasinglulu		add	x0, x0, :lo12:__COHERENT_RAM_START__
370*91f16700Schasinglulu		adrp	x1, __COHERENT_RAM_END_UNALIGNED__
371*91f16700Schasinglulu		add	x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
372*91f16700Schasinglulu		sub	x1, x1, x0
373*91f16700Schasinglulu		bl	zeromem
374*91f16700Schasinglulu#endif
375*91f16700Schasinglulu
376*91f16700Schasinglulu#if defined(IMAGE_BL1) ||	\
377*91f16700Schasinglulu	(defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM)
378*91f16700Schasinglulu		adrp	x0, __DATA_RAM_START__
379*91f16700Schasinglulu		add	x0, x0, :lo12:__DATA_RAM_START__
380*91f16700Schasinglulu		adrp	x1, __DATA_ROM_START__
381*91f16700Schasinglulu		add	x1, x1, :lo12:__DATA_ROM_START__
382*91f16700Schasinglulu		adrp	x2, __DATA_RAM_END__
383*91f16700Schasinglulu		add	x2, x2, :lo12:__DATA_RAM_END__
384*91f16700Schasinglulu		sub	x2, x2, x0
385*91f16700Schasinglulu		bl	memcpy16
386*91f16700Schasinglulu#endif
387*91f16700Schasinglulu	.endif /* _init_c_runtime */
388*91f16700Schasinglulu
389*91f16700Schasinglulu	/* ---------------------------------------------------------------------
390*91f16700Schasinglulu	 * Use SP_EL0 for the C runtime stack.
391*91f16700Schasinglulu	 * ---------------------------------------------------------------------
392*91f16700Schasinglulu	 */
393*91f16700Schasinglulu	msr	spsel, #0
394*91f16700Schasinglulu
395*91f16700Schasinglulu	/* ---------------------------------------------------------------------
396*91f16700Schasinglulu	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
397*91f16700Schasinglulu	 * the MMU is enabled. There is no risk of reading stale stack memory
398*91f16700Schasinglulu	 * after enabling the MMU as only the primary CPU is running at the
399*91f16700Schasinglulu	 * moment.
400*91f16700Schasinglulu	 * ---------------------------------------------------------------------
401*91f16700Schasinglulu	 */
402*91f16700Schasinglulu	bl	plat_set_my_stack
403*91f16700Schasinglulu
404*91f16700Schasinglulu#if STACK_PROTECTOR_ENABLED
405*91f16700Schasinglulu	.if \_init_c_runtime
406*91f16700Schasinglulu	bl	update_stack_protector_canary
407*91f16700Schasinglulu	.endif /* _init_c_runtime */
408*91f16700Schasinglulu#endif
409*91f16700Schasinglulu	.endm
410*91f16700Schasinglulu
411*91f16700Schasinglulu	.macro	apply_at_speculative_wa
412*91f16700Schasinglulu#if ERRATA_SPECULATIVE_AT
413*91f16700Schasinglulu	/*
414*91f16700Schasinglulu	 * This function expects x30 has been saved.
415*91f16700Schasinglulu	 * Also, save x29 which will be used in the called function.
416*91f16700Schasinglulu	 */
417*91f16700Schasinglulu	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
418*91f16700Schasinglulu	bl	save_and_update_ptw_el1_sys_regs
419*91f16700Schasinglulu	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
420*91f16700Schasinglulu#endif
421*91f16700Schasinglulu	.endm
422*91f16700Schasinglulu
423*91f16700Schasinglulu	.macro	restore_ptw_el1_sys_regs
424*91f16700Schasinglulu#if ERRATA_SPECULATIVE_AT
425*91f16700Schasinglulu	/* -----------------------------------------------------------
426*91f16700Schasinglulu	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
427*91f16700Schasinglulu	 * to ensure that page table walk is not enabled until
428*91f16700Schasinglulu	 * restoration of all EL1 system registers. TCR_EL1 register
429*91f16700Schasinglulu	 * should be updated at the end which restores previous page
430*91f16700Schasinglulu	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
431*91f16700Schasinglulu	 * ensures that CPU does below steps in order.
432*91f16700Schasinglulu	 *
433*91f16700Schasinglulu	 * 1. Ensure all other system registers are written before
434*91f16700Schasinglulu	 *    updating SCTLR_EL1 using ISB.
435*91f16700Schasinglulu	 * 2. Restore SCTLR_EL1 register.
436*91f16700Schasinglulu	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
437*91f16700Schasinglulu	 * 4. Restore TCR_EL1 register.
438*91f16700Schasinglulu	 * -----------------------------------------------------------
439*91f16700Schasinglulu	 */
440*91f16700Schasinglulu	isb
441*91f16700Schasinglulu	ldp	x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
442*91f16700Schasinglulu	msr	sctlr_el1, x28
443*91f16700Schasinglulu	isb
444*91f16700Schasinglulu	msr	tcr_el1, x29
445*91f16700Schasinglulu#endif
446*91f16700Schasinglulu	.endm
447*91f16700Schasinglulu
448*91f16700Schasinglulu/* -----------------------------------------------------------------
449*91f16700Schasinglulu * The below macro reads SCR_EL3 from the context structure to
450*91f16700Schasinglulu * determine the security state of the context upon ERET.
451*91f16700Schasinglulu * ------------------------------------------------------------------
452*91f16700Schasinglulu */
453*91f16700Schasinglulu	.macro get_security_state _ret:req, _scr_reg:req
454*91f16700Schasinglulu		ubfx 	\_ret, \_scr_reg, #SCR_NSE_SHIFT, #1
455*91f16700Schasinglulu		cmp 	\_ret, #1
456*91f16700Schasinglulu		beq 	realm_state
457*91f16700Schasinglulu		bfi	\_ret, \_scr_reg, #0, #1
458*91f16700Schasinglulu		b 	end
459*91f16700Schasinglulu	realm_state:
460*91f16700Schasinglulu		mov 	\_ret, #2
461*91f16700Schasinglulu	end:
462*91f16700Schasinglulu	.endm
463*91f16700Schasinglulu
464*91f16700Schasinglulu#endif /* EL3_COMMON_MACROS_S */
465