xref: /arm-trusted-firmware/lib/cpus/aarch32/cpu_helpers.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu */
6*91f16700Schasinglulu
7*91f16700Schasinglulu#include <arch.h>
8*91f16700Schasinglulu#include <asm_macros.S>
9*91f16700Schasinglulu#include <assert_macros.S>
10*91f16700Schasinglulu#include <cpu_macros.S>
11*91f16700Schasinglulu#include <common/bl_common.h>
12*91f16700Schasinglulu#include <lib/cpus/cpu_ops.h>
13*91f16700Schasinglulu#include <lib/el3_runtime/cpu_data.h>
14*91f16700Schasinglulu
15*91f16700Schasinglulu#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
16*91f16700Schasinglulu	(defined(IMAGE_BL2) && RESET_TO_BL2)
17*91f16700Schasinglulu	/*
18*91f16700Schasinglulu	 * The reset handler common to all platforms.  After a matching
19*91f16700Schasinglulu	 * cpu_ops structure entry is found, the correponding reset_handler
20*91f16700Schasinglulu	 * in the cpu_ops is invoked. The reset handler is invoked very early
21*91f16700Schasinglulu	 * in the boot sequence and it is assumed that we can clobber r0 - r10
22*91f16700Schasinglulu	 * without the need to follow AAPCS.
23*91f16700Schasinglulu	 * Clobbers: r0 - r10
24*91f16700Schasinglulu	 */
25*91f16700Schasinglulu	.globl	reset_handler
26*91f16700Schasinglulufunc reset_handler
27*91f16700Schasinglulu	mov	r8, lr
28*91f16700Schasinglulu
29*91f16700Schasinglulu	/* The plat_reset_handler can clobber r0 - r7 */
30*91f16700Schasinglulu	bl	plat_reset_handler
31*91f16700Schasinglulu
32*91f16700Schasinglulu	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33*91f16700Schasinglulu	bl	get_cpu_ops_ptr
34*91f16700Schasinglulu
35*91f16700Schasinglulu#if ENABLE_ASSERTIONS
36*91f16700Schasinglulu	cmp	r0, #0
37*91f16700Schasinglulu	ASM_ASSERT(ne)
38*91f16700Schasinglulu#endif
39*91f16700Schasinglulu
40*91f16700Schasinglulu	/* Get the cpu_ops reset handler */
41*91f16700Schasinglulu	ldr	r1, [r0, #CPU_RESET_FUNC]
42*91f16700Schasinglulu	cmp	r1, #0
43*91f16700Schasinglulu	mov	lr, r8
44*91f16700Schasinglulu	bxne	r1
45*91f16700Schasinglulu	bx	lr
46*91f16700Schasingluluendfunc reset_handler
47*91f16700Schasinglulu
48*91f16700Schasinglulu#endif
49*91f16700Schasinglulu
50*91f16700Schasinglulu#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
51*91f16700Schasinglulu	/*
52*91f16700Schasinglulu	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53*91f16700Schasinglulu	 *
54*91f16700Schasinglulu	 * Prepare CPU power down function for all platforms. The function takes
55*91f16700Schasinglulu	 * a domain level to be powered down as its parameter. After the cpu_ops
56*91f16700Schasinglulu	 * pointer is retrieved from cpu_data, the handler for requested power
57*91f16700Schasinglulu	 * level is called.
58*91f16700Schasinglulu	 */
59*91f16700Schasinglulu	.globl	prepare_cpu_pwr_dwn
60*91f16700Schasinglulufunc prepare_cpu_pwr_dwn
61*91f16700Schasinglulu	/*
62*91f16700Schasinglulu	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63*91f16700Schasinglulu	 * power down handler for the last power level
64*91f16700Schasinglulu	 */
65*91f16700Schasinglulu	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
66*91f16700Schasinglulu	cmp	r0, r2
67*91f16700Schasinglulu	movhi	r0, r2
68*91f16700Schasinglulu
69*91f16700Schasinglulu	push	{r0, lr}
70*91f16700Schasinglulu	bl	_cpu_data
71*91f16700Schasinglulu	pop	{r2, lr}
72*91f16700Schasinglulu
73*91f16700Schasinglulu	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
74*91f16700Schasinglulu#if ENABLE_ASSERTIONS
75*91f16700Schasinglulu	cmp	r0, #0
76*91f16700Schasinglulu	ASM_ASSERT(ne)
77*91f16700Schasinglulu#endif
78*91f16700Schasinglulu
79*91f16700Schasinglulu	/* Get the appropriate power down handler */
80*91f16700Schasinglulu	mov	r1, #CPU_PWR_DWN_OPS
81*91f16700Schasinglulu	add	r1, r1, r2, lsl #2
82*91f16700Schasinglulu	ldr	r1, [r0, r1]
83*91f16700Schasinglulu#if ENABLE_ASSERTIONS
84*91f16700Schasinglulu	cmp	r1, #0
85*91f16700Schasinglulu	ASM_ASSERT(ne)
86*91f16700Schasinglulu#endif
87*91f16700Schasinglulu	bx	r1
88*91f16700Schasingluluendfunc prepare_cpu_pwr_dwn
89*91f16700Schasinglulu
90*91f16700Schasinglulu	/*
91*91f16700Schasinglulu	 * Initializes the cpu_ops_ptr if not already initialized
92*91f16700Schasinglulu	 * in cpu_data. This must only be called after the data cache
93*91f16700Schasinglulu	 * is enabled. AAPCS is followed.
94*91f16700Schasinglulu	 */
95*91f16700Schasinglulu	.globl	init_cpu_ops
96*91f16700Schasinglulufunc init_cpu_ops
97*91f16700Schasinglulu	push	{r4 - r6, lr}
98*91f16700Schasinglulu	bl	_cpu_data
99*91f16700Schasinglulu	mov	r6, r0
100*91f16700Schasinglulu	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
101*91f16700Schasinglulu	cmp	r1, #0
102*91f16700Schasinglulu	bne	1f
103*91f16700Schasinglulu	bl	get_cpu_ops_ptr
104*91f16700Schasinglulu#if ENABLE_ASSERTIONS
105*91f16700Schasinglulu	cmp	r0, #0
106*91f16700Schasinglulu	ASM_ASSERT(ne)
107*91f16700Schasinglulu#endif
108*91f16700Schasinglulu	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
109*91f16700Schasinglulu1:
110*91f16700Schasinglulu	pop	{r4 - r6, pc}
111*91f16700Schasingluluendfunc init_cpu_ops
112*91f16700Schasinglulu
113*91f16700Schasinglulu#endif /* IMAGE_BL32 */
114*91f16700Schasinglulu
115*91f16700Schasinglulu	/*
116*91f16700Schasinglulu	 * The below function returns the cpu_ops structure matching the
117*91f16700Schasinglulu	 * midr of the core. It reads the MIDR and finds the matching
118*91f16700Schasinglulu	 * entry in cpu_ops entries. Only the implementation and part number
119*91f16700Schasinglulu	 * are used to match the entries.
120*91f16700Schasinglulu	 * Return :
121*91f16700Schasinglulu	 *     r0 - The matching cpu_ops pointer on Success
122*91f16700Schasinglulu	 *     r0 - 0 on failure.
123*91f16700Schasinglulu	 * Clobbers: r0 - r5
124*91f16700Schasinglulu	 */
125*91f16700Schasinglulu	.globl	get_cpu_ops_ptr
126*91f16700Schasinglulufunc get_cpu_ops_ptr
127*91f16700Schasinglulu	/* Get the cpu_ops start and end locations */
128*91f16700Schasinglulu	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
129*91f16700Schasinglulu	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
130*91f16700Schasinglulu
131*91f16700Schasinglulu	/* Initialize the return parameter */
132*91f16700Schasinglulu	mov	r0, #0
133*91f16700Schasinglulu
134*91f16700Schasinglulu	/* Read the MIDR_EL1 */
135*91f16700Schasinglulu	ldcopr	r2, MIDR
136*91f16700Schasinglulu	ldr	r3, =CPU_IMPL_PN_MASK
137*91f16700Schasinglulu
138*91f16700Schasinglulu	/* Retain only the implementation and part number using mask */
139*91f16700Schasinglulu	and	r2, r2, r3
140*91f16700Schasinglulu1:
141*91f16700Schasinglulu	/* Check if we have reached end of list */
142*91f16700Schasinglulu	cmp	r4, r5
143*91f16700Schasinglulu	bhs	error_exit
144*91f16700Schasinglulu
145*91f16700Schasinglulu	/* load the midr from the cpu_ops */
146*91f16700Schasinglulu	ldr	r1, [r4], #CPU_OPS_SIZE
147*91f16700Schasinglulu	and	r1, r1, r3
148*91f16700Schasinglulu
149*91f16700Schasinglulu	/* Check if midr matches to midr of this core */
150*91f16700Schasinglulu	cmp	r1, r2
151*91f16700Schasinglulu	bne	1b
152*91f16700Schasinglulu
153*91f16700Schasinglulu	/* Subtract the increment and offset to get the cpu-ops pointer */
154*91f16700Schasinglulu	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
155*91f16700Schasinglulu#if ENABLE_ASSERTIONS
156*91f16700Schasinglulu	cmp	r0, #0
157*91f16700Schasinglulu	ASM_ASSERT(ne)
158*91f16700Schasinglulu#endif
159*91f16700Schasingluluerror_exit:
160*91f16700Schasinglulu	bx	lr
161*91f16700Schasingluluendfunc get_cpu_ops_ptr
162*91f16700Schasinglulu
163*91f16700Schasinglulu/*
164*91f16700Schasinglulu * Extract CPU revision and variant, and combine them into a single numeric for
165*91f16700Schasinglulu * easier comparison.
166*91f16700Schasinglulu */
167*91f16700Schasinglulu	.globl	cpu_get_rev_var
168*91f16700Schasinglulufunc cpu_get_rev_var
169*91f16700Schasinglulu	ldcopr	r1, MIDR
170*91f16700Schasinglulu
171*91f16700Schasinglulu	/*
172*91f16700Schasinglulu	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
173*91f16700Schasinglulu	 * r0[0:7] as variant[7:4] and revision[3:0]:
174*91f16700Schasinglulu	 *
175*91f16700Schasinglulu	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
176*91f16700Schasinglulu	 * extract r1[3:0] into r0[3:0] retaining other bits.
177*91f16700Schasinglulu	 */
178*91f16700Schasinglulu	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
179*91f16700Schasinglulu	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
180*91f16700Schasinglulu	bx	lr
181*91f16700Schasingluluendfunc cpu_get_rev_var
182*91f16700Schasinglulu
183*91f16700Schasinglulu/*
184*91f16700Schasinglulu * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
185*91f16700Schasinglulu * application purposes. If the revision-variant is less than or same as a given
186*91f16700Schasinglulu * value, indicates that errata applies; otherwise not.
187*91f16700Schasinglulu */
188*91f16700Schasinglulu	.globl	cpu_rev_var_ls
189*91f16700Schasinglulufunc cpu_rev_var_ls
190*91f16700Schasinglulu	cmp	r0, r1
191*91f16700Schasinglulu	movls	r0, #ERRATA_APPLIES
192*91f16700Schasinglulu	movhi	r0, #ERRATA_NOT_APPLIES
193*91f16700Schasinglulu	bx	lr
194*91f16700Schasingluluendfunc cpu_rev_var_ls
195*91f16700Schasinglulu
196*91f16700Schasinglulu/*
197*91f16700Schasinglulu * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
198*91f16700Schasinglulu * application purposes. If the revision-variant is higher than or same as a
199*91f16700Schasinglulu * given value, indicates that errata applies; otherwise not.
200*91f16700Schasinglulu */
201*91f16700Schasinglulu	.globl	cpu_rev_var_hs
202*91f16700Schasinglulufunc cpu_rev_var_hs
203*91f16700Schasinglulu	cmp	r0, r1
204*91f16700Schasinglulu	movge	r0, #ERRATA_APPLIES
205*91f16700Schasinglulu	movlt	r0, #ERRATA_NOT_APPLIES
206*91f16700Schasinglulu	bx	lr
207*91f16700Schasingluluendfunc cpu_rev_var_hs
208