xref: /arm-trusted-firmware/bl31/aarch64/bl31_entrypoint.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu */
6*91f16700Schasinglulu
7*91f16700Schasinglulu#include <platform_def.h>
8*91f16700Schasinglulu
9*91f16700Schasinglulu#include <arch.h>
10*91f16700Schasinglulu#include <common/bl_common.h>
11*91f16700Schasinglulu#include <el3_common_macros.S>
12*91f16700Schasinglulu#include <lib/pmf/aarch64/pmf_asm_macros.S>
13*91f16700Schasinglulu#include <lib/runtime_instr.h>
14*91f16700Schasinglulu#include <lib/xlat_tables/xlat_mmu_helpers.h>
15*91f16700Schasinglulu
16*91f16700Schasinglulu	.globl	bl31_entrypoint
17*91f16700Schasinglulu	.globl	bl31_warm_entrypoint
18*91f16700Schasinglulu
19*91f16700Schasinglulu	/* -----------------------------------------------------
20*91f16700Schasinglulu	 * bl31_entrypoint() is the cold boot entrypoint,
21*91f16700Schasinglulu	 * executed only by the primary cpu.
22*91f16700Schasinglulu	 * -----------------------------------------------------
23*91f16700Schasinglulu	 */
24*91f16700Schasinglulu
25*91f16700Schasinglulufunc bl31_entrypoint
26*91f16700Schasinglulu	/* ---------------------------------------------------------------
27*91f16700Schasinglulu	 * Stash the previous bootloader arguments x0 - x3 for later use.
28*91f16700Schasinglulu	 * ---------------------------------------------------------------
29*91f16700Schasinglulu	 */
30*91f16700Schasinglulu	mov	x20, x0
31*91f16700Schasinglulu	mov	x21, x1
32*91f16700Schasinglulu	mov	x22, x2
33*91f16700Schasinglulu	mov	x23, x3
34*91f16700Schasinglulu
35*91f16700Schasinglulu#if !RESET_TO_BL31
36*91f16700Schasinglulu	/* ---------------------------------------------------------------------
37*91f16700Schasinglulu	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
38*91f16700Schasinglulu	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
39*91f16700Schasinglulu	 * and primary/secondary CPU logic should not be executed in this case.
40*91f16700Schasinglulu	 *
41*91f16700Schasinglulu	 * Also, assume that the previous bootloader has already initialised the
42*91f16700Schasinglulu	 * SCTLR_EL3, including the endianness, and has initialised the memory.
43*91f16700Schasinglulu	 * ---------------------------------------------------------------------
44*91f16700Schasinglulu	 */
45*91f16700Schasinglulu	el3_entrypoint_common					\
46*91f16700Schasinglulu		_init_sctlr=0					\
47*91f16700Schasinglulu		_warm_boot_mailbox=0				\
48*91f16700Schasinglulu		_secondary_cold_boot=0				\
49*91f16700Schasinglulu		_init_memory=0					\
50*91f16700Schasinglulu		_init_c_runtime=1				\
51*91f16700Schasinglulu		_exception_vectors=runtime_exceptions		\
52*91f16700Schasinglulu		_pie_fixup_size=BL31_LIMIT - BL31_BASE
53*91f16700Schasinglulu#else
54*91f16700Schasinglulu
55*91f16700Schasinglulu	/* ---------------------------------------------------------------------
56*91f16700Schasinglulu	 * For RESET_TO_BL31 systems which have a programmable reset address,
57*91f16700Schasinglulu	 * bl31_entrypoint() is executed only on the cold boot path so we can
58*91f16700Schasinglulu	 * skip the warm boot mailbox mechanism.
59*91f16700Schasinglulu	 * ---------------------------------------------------------------------
60*91f16700Schasinglulu	 */
61*91f16700Schasinglulu	el3_entrypoint_common					\
62*91f16700Schasinglulu		_init_sctlr=1					\
63*91f16700Schasinglulu		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
64*91f16700Schasinglulu		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
65*91f16700Schasinglulu		_init_memory=1					\
66*91f16700Schasinglulu		_init_c_runtime=1				\
67*91f16700Schasinglulu		_exception_vectors=runtime_exceptions		\
68*91f16700Schasinglulu		_pie_fixup_size=BL31_LIMIT - BL31_BASE
69*91f16700Schasinglulu#endif /* RESET_TO_BL31 */
70*91f16700Schasinglulu
71*91f16700Schasinglulu	/* --------------------------------------------------------------------
72*91f16700Schasinglulu	 * Perform BL31 setup
73*91f16700Schasinglulu	 * --------------------------------------------------------------------
74*91f16700Schasinglulu	 */
75*91f16700Schasinglulu	mov	x0, x20
76*91f16700Schasinglulu	mov	x1, x21
77*91f16700Schasinglulu	mov	x2, x22
78*91f16700Schasinglulu	mov	x3, x23
79*91f16700Schasinglulu	bl	bl31_setup
80*91f16700Schasinglulu
81*91f16700Schasinglulu#if ENABLE_PAUTH
82*91f16700Schasinglulu	/* --------------------------------------------------------------------
83*91f16700Schasinglulu	 * Program APIAKey_EL1 and enable pointer authentication
84*91f16700Schasinglulu	 * --------------------------------------------------------------------
85*91f16700Schasinglulu	 */
86*91f16700Schasinglulu	bl	pauth_init_enable_el3
87*91f16700Schasinglulu#endif /* ENABLE_PAUTH */
88*91f16700Schasinglulu
89*91f16700Schasinglulu	/* --------------------------------------------------------------------
90*91f16700Schasinglulu	 * Jump to main function
91*91f16700Schasinglulu	 * --------------------------------------------------------------------
92*91f16700Schasinglulu	 */
93*91f16700Schasinglulu	bl	bl31_main
94*91f16700Schasinglulu
95*91f16700Schasinglulu	/* --------------------------------------------------------------------
96*91f16700Schasinglulu	 * Clean the .data & .bss sections to main memory. This ensures
97*91f16700Schasinglulu	 * that any global data which was initialised by the primary CPU
98*91f16700Schasinglulu	 * is visible to secondary CPUs before they enable their data
99*91f16700Schasinglulu	 * caches and participate in coherency.
100*91f16700Schasinglulu	 * --------------------------------------------------------------------
101*91f16700Schasinglulu	 */
102*91f16700Schasinglulu	adrp	x0, __DATA_START__
103*91f16700Schasinglulu	add	x0, x0, :lo12:__DATA_START__
104*91f16700Schasinglulu	adrp	x1, __DATA_END__
105*91f16700Schasinglulu	add	x1, x1, :lo12:__DATA_END__
106*91f16700Schasinglulu	sub	x1, x1, x0
107*91f16700Schasinglulu	bl	clean_dcache_range
108*91f16700Schasinglulu
109*91f16700Schasinglulu	adrp	x0, __BSS_START__
110*91f16700Schasinglulu	add	x0, x0, :lo12:__BSS_START__
111*91f16700Schasinglulu	adrp	x1, __BSS_END__
112*91f16700Schasinglulu	add	x1, x1, :lo12:__BSS_END__
113*91f16700Schasinglulu	sub	x1, x1, x0
114*91f16700Schasinglulu	bl	clean_dcache_range
115*91f16700Schasinglulu
116*91f16700Schasinglulu	b	el3_exit
117*91f16700Schasingluluendfunc bl31_entrypoint
118*91f16700Schasinglulu
119*91f16700Schasinglulu	/* --------------------------------------------------------------------
120*91f16700Schasinglulu	 * This CPU has been physically powered up. It is either resuming from
121*91f16700Schasinglulu	 * suspend or has simply been turned on. In both cases, call the BL31
122*91f16700Schasinglulu	 * warmboot entrypoint
123*91f16700Schasinglulu	 * --------------------------------------------------------------------
124*91f16700Schasinglulu	 */
125*91f16700Schasinglulufunc bl31_warm_entrypoint
126*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION
127*91f16700Schasinglulu
128*91f16700Schasinglulu	/*
129*91f16700Schasinglulu	 * This timestamp update happens with cache off.  The next
130*91f16700Schasinglulu	 * timestamp collection will need to do cache maintenance prior
131*91f16700Schasinglulu	 * to timestamp update.
132*91f16700Schasinglulu	 */
133*91f16700Schasinglulu	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
134*91f16700Schasinglulu	mrs	x1, cntpct_el0
135*91f16700Schasinglulu	str	x1, [x0]
136*91f16700Schasinglulu#endif
137*91f16700Schasinglulu
138*91f16700Schasinglulu	/*
139*91f16700Schasinglulu	 * On the warm boot path, most of the EL3 initialisations performed by
140*91f16700Schasinglulu	 * 'el3_entrypoint_common' must be skipped:
141*91f16700Schasinglulu	 *
142*91f16700Schasinglulu	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
143*91f16700Schasinglulu	 *    programming the reset address do we need to initialise SCTLR_EL3.
144*91f16700Schasinglulu	 *    In other cases, we assume this has been taken care by the
145*91f16700Schasinglulu	 *    entrypoint code.
146*91f16700Schasinglulu	 *
147*91f16700Schasinglulu	 *  - No need to determine the type of boot, we know it is a warm boot.
148*91f16700Schasinglulu	 *
149*91f16700Schasinglulu	 *  - Do not try to distinguish between primary and secondary CPUs, this
150*91f16700Schasinglulu	 *    notion only exists for a cold boot.
151*91f16700Schasinglulu	 *
152*91f16700Schasinglulu	 *  - No need to initialise the memory or the C runtime environment,
153*91f16700Schasinglulu	 *    it has been done once and for all on the cold boot path.
154*91f16700Schasinglulu	 */
155*91f16700Schasinglulu	el3_entrypoint_common					\
156*91f16700Schasinglulu		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
157*91f16700Schasinglulu		_warm_boot_mailbox=0				\
158*91f16700Schasinglulu		_secondary_cold_boot=0				\
159*91f16700Schasinglulu		_init_memory=0					\
160*91f16700Schasinglulu		_init_c_runtime=0				\
161*91f16700Schasinglulu		_exception_vectors=runtime_exceptions		\
162*91f16700Schasinglulu		_pie_fixup_size=0
163*91f16700Schasinglulu
164*91f16700Schasinglulu	/*
165*91f16700Schasinglulu	 * We're about to enable MMU and participate in PSCI state coordination.
166*91f16700Schasinglulu	 *
167*91f16700Schasinglulu	 * The PSCI implementation invokes platform routines that enable CPUs to
168*91f16700Schasinglulu	 * participate in coherency. On a system where CPUs are not
169*91f16700Schasinglulu	 * cache-coherent without appropriate platform specific programming,
170*91f16700Schasinglulu	 * having caches enabled until such time might lead to coherency issues
171*91f16700Schasinglulu	 * (resulting from stale data getting speculatively fetched, among
172*91f16700Schasinglulu	 * others). Therefore we keep data caches disabled even after enabling
173*91f16700Schasinglulu	 * the MMU for such platforms.
174*91f16700Schasinglulu	 *
175*91f16700Schasinglulu	 * On systems with hardware-assisted coherency, or on single cluster
176*91f16700Schasinglulu	 * platforms, such platform specific programming is not required to
177*91f16700Schasinglulu	 * enter coherency (as CPUs already are); and there's no reason to have
178*91f16700Schasinglulu	 * caches disabled either.
179*91f16700Schasinglulu	 */
180*91f16700Schasinglulu#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
181*91f16700Schasinglulu	mov	x0, xzr
182*91f16700Schasinglulu#else
183*91f16700Schasinglulu	mov	x0, #DISABLE_DCACHE
184*91f16700Schasinglulu#endif
185*91f16700Schasinglulu	bl	bl31_plat_enable_mmu
186*91f16700Schasinglulu
187*91f16700Schasinglulu#if ENABLE_RME
188*91f16700Schasinglulu	/*
189*91f16700Schasinglulu	 * At warm boot GPT data structures have already been initialized in RAM
190*91f16700Schasinglulu	 * but the sysregs for this CPU need to be initialized. Note that the GPT
191*91f16700Schasinglulu	 * accesses are controlled attributes in GPCCR and do not depend on the
192*91f16700Schasinglulu	 * SCR_EL3.C bit.
193*91f16700Schasinglulu	 */
194*91f16700Schasinglulu	bl	gpt_enable
195*91f16700Schasinglulu	cbz	x0, 1f
196*91f16700Schasinglulu	no_ret plat_panic_handler
197*91f16700Schasinglulu1:
198*91f16700Schasinglulu#endif
199*91f16700Schasinglulu
200*91f16700Schasinglulu#if ENABLE_PAUTH
201*91f16700Schasinglulu	/* --------------------------------------------------------------------
202*91f16700Schasinglulu	 * Program APIAKey_EL1 and enable pointer authentication
203*91f16700Schasinglulu	 * --------------------------------------------------------------------
204*91f16700Schasinglulu	 */
205*91f16700Schasinglulu	bl	pauth_init_enable_el3
206*91f16700Schasinglulu#endif /* ENABLE_PAUTH */
207*91f16700Schasinglulu
208*91f16700Schasinglulu	bl	psci_warmboot_entrypoint
209*91f16700Schasinglulu
210*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION
211*91f16700Schasinglulu	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
212*91f16700Schasinglulu	mov	x19, x0
213*91f16700Schasinglulu
214*91f16700Schasinglulu	/*
215*91f16700Schasinglulu	 * Invalidate before updating timestamp to ensure previous timestamp
216*91f16700Schasinglulu	 * updates on the same cache line with caches disabled are properly
217*91f16700Schasinglulu	 * seen by the same core. Without the cache invalidate, the core might
218*91f16700Schasinglulu	 * write into a stale cache line.
219*91f16700Schasinglulu	 */
220*91f16700Schasinglulu	mov	x1, #PMF_TS_SIZE
221*91f16700Schasinglulu	mov	x20, x30
222*91f16700Schasinglulu	bl	inv_dcache_range
223*91f16700Schasinglulu	mov	x30, x20
224*91f16700Schasinglulu
225*91f16700Schasinglulu	mrs	x0, cntpct_el0
226*91f16700Schasinglulu	str	x0, [x19]
227*91f16700Schasinglulu#endif
228*91f16700Schasinglulu	b	el3_exit
229*91f16700Schasingluluendfunc bl31_warm_entrypoint
230