xref: /arm-trusted-firmware/lib/cpus/aarch64/cpu_helpers.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu */
6*91f16700Schasinglulu
7*91f16700Schasinglulu#include <arch.h>
8*91f16700Schasinglulu#include <asm_macros.S>
9*91f16700Schasinglulu#include <assert_macros.S>
10*91f16700Schasinglulu#include <common/bl_common.h>
11*91f16700Schasinglulu#include <common/debug.h>
12*91f16700Schasinglulu#include <cpu_macros.S>
13*91f16700Schasinglulu#include <lib/cpus/cpu_ops.h>
14*91f16700Schasinglulu#include <lib/cpus/errata.h>
15*91f16700Schasinglulu#include <lib/el3_runtime/cpu_data.h>
16*91f16700Schasinglulu
17*91f16700Schasinglulu /* Reset fn is needed in BL at reset vector */
18*91f16700Schasinglulu#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
19*91f16700Schasinglulu	(defined(IMAGE_BL2) && RESET_TO_BL2)
20*91f16700Schasinglulu	/*
21*91f16700Schasinglulu	 * The reset handler common to all platforms.  After a matching
22*91f16700Schasinglulu	 * cpu_ops structure entry is found, the correponding reset_handler
23*91f16700Schasinglulu	 * in the cpu_ops is invoked.
24*91f16700Schasinglulu	 * Clobbers: x0 - x19, x30
25*91f16700Schasinglulu	 */
26*91f16700Schasinglulu	.globl	reset_handler
27*91f16700Schasinglulufunc reset_handler
28*91f16700Schasinglulu	mov	x19, x30
29*91f16700Schasinglulu
30*91f16700Schasinglulu	/* The plat_reset_handler can clobber x0 - x18, x30 */
31*91f16700Schasinglulu	bl	plat_reset_handler
32*91f16700Schasinglulu
33*91f16700Schasinglulu	/* Get the matching cpu_ops pointer */
34*91f16700Schasinglulu	bl	get_cpu_ops_ptr
35*91f16700Schasinglulu
36*91f16700Schasinglulu#if ENABLE_ASSERTIONS
37*91f16700Schasinglulu	/*
38*91f16700Schasinglulu	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
39*91f16700Schasinglulu	 * suggest that the proper CPU file hasn't been included.
40*91f16700Schasinglulu	 */
41*91f16700Schasinglulu	cmp	x0, #0
42*91f16700Schasinglulu	ASM_ASSERT(ne)
43*91f16700Schasinglulu#endif
44*91f16700Schasinglulu
45*91f16700Schasinglulu	/* Get the cpu_ops reset handler */
46*91f16700Schasinglulu	ldr	x2, [x0, #CPU_RESET_FUNC]
47*91f16700Schasinglulu	mov	x30, x19
48*91f16700Schasinglulu	cbz	x2, 1f
49*91f16700Schasinglulu
50*91f16700Schasinglulu	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
51*91f16700Schasinglulu	br	x2
52*91f16700Schasinglulu1:
53*91f16700Schasinglulu	ret
54*91f16700Schasingluluendfunc reset_handler
55*91f16700Schasinglulu
56*91f16700Schasinglulu#endif
57*91f16700Schasinglulu
58*91f16700Schasinglulu#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
59*91f16700Schasinglulu	/*
60*91f16700Schasinglulu	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
61*91f16700Schasinglulu	 *
62*91f16700Schasinglulu	 * Prepare CPU power down function for all platforms. The function takes
63*91f16700Schasinglulu	 * a domain level to be powered down as its parameter. After the cpu_ops
64*91f16700Schasinglulu	 * pointer is retrieved from cpu_data, the handler for requested power
65*91f16700Schasinglulu	 * level is called.
66*91f16700Schasinglulu	 */
67*91f16700Schasinglulu	.globl	prepare_cpu_pwr_dwn
68*91f16700Schasinglulufunc prepare_cpu_pwr_dwn
69*91f16700Schasinglulu	/*
70*91f16700Schasinglulu	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
71*91f16700Schasinglulu	 * power down handler for the last power level
72*91f16700Schasinglulu	 */
73*91f16700Schasinglulu	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
74*91f16700Schasinglulu	cmp	x0, x2
75*91f16700Schasinglulu	csel	x2, x2, x0, hi
76*91f16700Schasinglulu
77*91f16700Schasinglulu	mrs	x1, tpidr_el3
78*91f16700Schasinglulu	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
79*91f16700Schasinglulu#if ENABLE_ASSERTIONS
80*91f16700Schasinglulu	cmp	x0, #0
81*91f16700Schasinglulu	ASM_ASSERT(ne)
82*91f16700Schasinglulu#endif
83*91f16700Schasinglulu
84*91f16700Schasinglulu	/* Get the appropriate power down handler */
85*91f16700Schasinglulu	mov	x1, #CPU_PWR_DWN_OPS
86*91f16700Schasinglulu	add	x1, x1, x2, lsl #3
87*91f16700Schasinglulu	ldr	x1, [x0, x1]
88*91f16700Schasinglulu#if ENABLE_ASSERTIONS
89*91f16700Schasinglulu	cmp	x1, #0
90*91f16700Schasinglulu	ASM_ASSERT(ne)
91*91f16700Schasinglulu#endif
92*91f16700Schasinglulu	br	x1
93*91f16700Schasingluluendfunc prepare_cpu_pwr_dwn
94*91f16700Schasinglulu
95*91f16700Schasinglulu
96*91f16700Schasinglulu	/*
97*91f16700Schasinglulu	 * Initializes the cpu_ops_ptr if not already initialized
98*91f16700Schasinglulu	 * in cpu_data. This can be called without a runtime stack, but may
99*91f16700Schasinglulu	 * only be called after the MMU is enabled.
100*91f16700Schasinglulu	 * clobbers: x0 - x6, x10
101*91f16700Schasinglulu	 */
102*91f16700Schasinglulu	.globl	init_cpu_ops
103*91f16700Schasinglulufunc init_cpu_ops
104*91f16700Schasinglulu	mrs	x6, tpidr_el3
105*91f16700Schasinglulu	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
106*91f16700Schasinglulu	cbnz	x0, 1f
107*91f16700Schasinglulu	mov	x10, x30
108*91f16700Schasinglulu	bl	get_cpu_ops_ptr
109*91f16700Schasinglulu	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
110*91f16700Schasinglulu	mov x30, x10
111*91f16700Schasinglulu1:
112*91f16700Schasinglulu	ret
113*91f16700Schasingluluendfunc init_cpu_ops
114*91f16700Schasinglulu#endif /* IMAGE_BL31 */
115*91f16700Schasinglulu
116*91f16700Schasinglulu#if defined(IMAGE_BL31) && CRASH_REPORTING
117*91f16700Schasinglulu	/*
118*91f16700Schasinglulu	 * The cpu specific registers which need to be reported in a crash
119*91f16700Schasinglulu	 * are reported via cpu_ops cpu_reg_dump function. After a matching
120*91f16700Schasinglulu	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
121*91f16700Schasinglulu	 * in the cpu_ops is invoked.
122*91f16700Schasinglulu	 */
123*91f16700Schasinglulu	.globl	do_cpu_reg_dump
124*91f16700Schasinglulufunc do_cpu_reg_dump
125*91f16700Schasinglulu	mov	x16, x30
126*91f16700Schasinglulu
127*91f16700Schasinglulu	/* Get the matching cpu_ops pointer */
128*91f16700Schasinglulu	bl	get_cpu_ops_ptr
129*91f16700Schasinglulu	cbz	x0, 1f
130*91f16700Schasinglulu
131*91f16700Schasinglulu	/* Get the cpu_ops cpu_reg_dump */
132*91f16700Schasinglulu	ldr	x2, [x0, #CPU_REG_DUMP]
133*91f16700Schasinglulu	cbz	x2, 1f
134*91f16700Schasinglulu	blr	x2
135*91f16700Schasinglulu1:
136*91f16700Schasinglulu	mov	x30, x16
137*91f16700Schasinglulu	ret
138*91f16700Schasingluluendfunc do_cpu_reg_dump
139*91f16700Schasinglulu#endif
140*91f16700Schasinglulu
141*91f16700Schasinglulu	/*
142*91f16700Schasinglulu	 * The below function returns the cpu_ops structure matching the
143*91f16700Schasinglulu	 * midr of the core. It reads the MIDR_EL1 and finds the matching
144*91f16700Schasinglulu	 * entry in cpu_ops entries. Only the implementation and part number
145*91f16700Schasinglulu	 * are used to match the entries.
146*91f16700Schasinglulu	 *
147*91f16700Schasinglulu	 * If cpu_ops for the MIDR_EL1 cannot be found and
148*91f16700Schasinglulu	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
149*91f16700Schasinglulu	 * default cpu_ops with an MIDR value of 0.
150*91f16700Schasinglulu	 * (Implementation number 0x0 should be reserved for software use
151*91f16700Schasinglulu	 * and therefore no clashes should happen with that default value).
152*91f16700Schasinglulu	 *
153*91f16700Schasinglulu	 * Return :
154*91f16700Schasinglulu	 *     x0 - The matching cpu_ops pointer on Success
155*91f16700Schasinglulu	 *     x0 - 0 on failure.
156*91f16700Schasinglulu	 * Clobbers : x0 - x5
157*91f16700Schasinglulu	 */
158*91f16700Schasinglulu	.globl	get_cpu_ops_ptr
159*91f16700Schasinglulufunc get_cpu_ops_ptr
160*91f16700Schasinglulu	/* Read the MIDR_EL1 */
161*91f16700Schasinglulu	mrs	x2, midr_el1
162*91f16700Schasinglulu	mov_imm	x3, CPU_IMPL_PN_MASK
163*91f16700Schasinglulu
164*91f16700Schasinglulu	/* Retain only the implementation and part number using mask */
165*91f16700Schasinglulu	and	w2, w2, w3
166*91f16700Schasinglulu
167*91f16700Schasinglulu	/* Get the cpu_ops end location */
168*91f16700Schasinglulu	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
169*91f16700Schasinglulu
170*91f16700Schasinglulu	/* Initialize the return parameter */
171*91f16700Schasinglulu	mov	x0, #0
172*91f16700Schasinglulu1:
173*91f16700Schasinglulu	/* Get the cpu_ops start location */
174*91f16700Schasinglulu	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
175*91f16700Schasinglulu
176*91f16700Schasinglulu2:
177*91f16700Schasinglulu	/* Check if we have reached end of list */
178*91f16700Schasinglulu	cmp	x4, x5
179*91f16700Schasinglulu	b.eq	search_def_ptr
180*91f16700Schasinglulu
181*91f16700Schasinglulu	/* load the midr from the cpu_ops */
182*91f16700Schasinglulu	ldr	x1, [x4], #CPU_OPS_SIZE
183*91f16700Schasinglulu	and	w1, w1, w3
184*91f16700Schasinglulu
185*91f16700Schasinglulu	/* Check if midr matches to midr of this core */
186*91f16700Schasinglulu	cmp	w1, w2
187*91f16700Schasinglulu	b.ne	2b
188*91f16700Schasinglulu
189*91f16700Schasinglulu	/* Subtract the increment and offset to get the cpu-ops pointer */
190*91f16700Schasinglulu	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
191*91f16700Schasinglulu#if ENABLE_ASSERTIONS
192*91f16700Schasinglulu	cmp	x0, #0
193*91f16700Schasinglulu	ASM_ASSERT(ne)
194*91f16700Schasinglulu#endif
195*91f16700Schasinglulu#ifdef SUPPORT_UNKNOWN_MPID
196*91f16700Schasinglulu	cbnz	x2, exit_mpid_found
197*91f16700Schasinglulu	/* Mark the unsupported MPID flag */
198*91f16700Schasinglulu	adrp	x1, unsupported_mpid_flag
199*91f16700Schasinglulu	add	x1, x1, :lo12:unsupported_mpid_flag
200*91f16700Schasinglulu	str	w2, [x1]
201*91f16700Schasingluluexit_mpid_found:
202*91f16700Schasinglulu#endif
203*91f16700Schasinglulu	ret
204*91f16700Schasinglulu
205*91f16700Schasinglulu	/*
206*91f16700Schasinglulu	 * Search again for a default pointer (MIDR = 0x0)
207*91f16700Schasinglulu	 * or return error if already searched.
208*91f16700Schasinglulu	 */
209*91f16700Schasinglulusearch_def_ptr:
210*91f16700Schasinglulu#ifdef SUPPORT_UNKNOWN_MPID
211*91f16700Schasinglulu	cbz	x2, error_exit
212*91f16700Schasinglulu	mov	x2, #0
213*91f16700Schasinglulu	b	1b
214*91f16700Schasingluluerror_exit:
215*91f16700Schasinglulu#endif
216*91f16700Schasinglulu	ret
217*91f16700Schasingluluendfunc get_cpu_ops_ptr
218*91f16700Schasinglulu
219*91f16700Schasinglulu/*
220*91f16700Schasinglulu * Extract CPU revision and variant, and combine them into a single numeric for
221*91f16700Schasinglulu * easier comparison.
222*91f16700Schasinglulu */
223*91f16700Schasinglulu	.globl	cpu_get_rev_var
224*91f16700Schasinglulufunc cpu_get_rev_var
225*91f16700Schasinglulu	mrs	x1, midr_el1
226*91f16700Schasinglulu
227*91f16700Schasinglulu	/*
228*91f16700Schasinglulu	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
229*91f16700Schasinglulu	 * as variant[7:4] and revision[3:0] of x0.
230*91f16700Schasinglulu	 *
231*91f16700Schasinglulu	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
232*91f16700Schasinglulu	 * extract x1[3:0] into x0[3:0] retaining other bits.
233*91f16700Schasinglulu	 */
234*91f16700Schasinglulu	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
235*91f16700Schasinglulu	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
236*91f16700Schasinglulu	ret
237*91f16700Schasingluluendfunc cpu_get_rev_var
238*91f16700Schasinglulu
239*91f16700Schasinglulu/*
240*91f16700Schasinglulu * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
241*91f16700Schasinglulu * application purposes. If the revision-variant is less than or same as a given
242*91f16700Schasinglulu * value, indicates that errata applies; otherwise not.
243*91f16700Schasinglulu *
244*91f16700Schasinglulu * Shall clobber: x0-x3
245*91f16700Schasinglulu */
246*91f16700Schasinglulu	.globl	cpu_rev_var_ls
247*91f16700Schasinglulufunc cpu_rev_var_ls
248*91f16700Schasinglulu	mov	x2, #ERRATA_APPLIES
249*91f16700Schasinglulu	mov	x3, #ERRATA_NOT_APPLIES
250*91f16700Schasinglulu	cmp	x0, x1
251*91f16700Schasinglulu	csel	x0, x2, x3, ls
252*91f16700Schasinglulu	ret
253*91f16700Schasingluluendfunc cpu_rev_var_ls
254*91f16700Schasinglulu
255*91f16700Schasinglulu/*
256*91f16700Schasinglulu * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
257*91f16700Schasinglulu * application purposes. If the revision-variant is higher than or same as a
258*91f16700Schasinglulu * given value, indicates that errata applies; otherwise not.
259*91f16700Schasinglulu *
260*91f16700Schasinglulu * Shall clobber: x0-x3
261*91f16700Schasinglulu */
262*91f16700Schasinglulu	.globl	cpu_rev_var_hs
263*91f16700Schasinglulufunc cpu_rev_var_hs
264*91f16700Schasinglulu	mov	x2, #ERRATA_APPLIES
265*91f16700Schasinglulu	mov	x3, #ERRATA_NOT_APPLIES
266*91f16700Schasinglulu	cmp	x0, x1
267*91f16700Schasinglulu	csel	x0, x2, x3, hs
268*91f16700Schasinglulu	ret
269*91f16700Schasingluluendfunc cpu_rev_var_hs
270*91f16700Schasinglulu
271*91f16700Schasinglulu/*
272*91f16700Schasinglulu * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
273*91f16700Schasinglulu * application purposes. If the revision-variant is between or includes the given
274*91f16700Schasinglulu * values, this indicates that errata applies; otherwise not.
275*91f16700Schasinglulu *
276*91f16700Schasinglulu * Shall clobber: x0-x4
277*91f16700Schasinglulu */
278*91f16700Schasinglulu	.globl	cpu_rev_var_range
279*91f16700Schasinglulufunc cpu_rev_var_range
280*91f16700Schasinglulu	mov	x3, #ERRATA_APPLIES
281*91f16700Schasinglulu	mov	x4, #ERRATA_NOT_APPLIES
282*91f16700Schasinglulu	cmp	x0, x1
283*91f16700Schasinglulu	csel	x1, x3, x4, hs
284*91f16700Schasinglulu	cbz	x1, 1f
285*91f16700Schasinglulu	cmp	x0, x2
286*91f16700Schasinglulu	csel	x1, x3, x4, ls
287*91f16700Schasinglulu1:
288*91f16700Schasinglulu	mov	x0, x1
289*91f16700Schasinglulu	ret
290*91f16700Schasingluluendfunc cpu_rev_var_range
291*91f16700Schasinglulu
292*91f16700Schasinglulu/*
293*91f16700Schasinglulu * int check_wa_cve_2017_5715(void);
294*91f16700Schasinglulu *
295*91f16700Schasinglulu * This function returns:
296*91f16700Schasinglulu *  - ERRATA_APPLIES when firmware mitigation is required.
297*91f16700Schasinglulu *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
298*91f16700Schasinglulu *  - ERRATA_MISSING when firmware mitigation would be required but
299*91f16700Schasinglulu *    is not compiled in.
300*91f16700Schasinglulu *
301*91f16700Schasinglulu * NOTE: Must be called only after cpu_ops have been initialized
302*91f16700Schasinglulu *       in per-CPU data.
303*91f16700Schasinglulu */
304*91f16700Schasinglulu	.globl	check_wa_cve_2017_5715
305*91f16700Schasinglulufunc check_wa_cve_2017_5715
306*91f16700Schasinglulu	mrs	x0, tpidr_el3
307*91f16700Schasinglulu#if ENABLE_ASSERTIONS
308*91f16700Schasinglulu	cmp	x0, #0
309*91f16700Schasinglulu	ASM_ASSERT(ne)
310*91f16700Schasinglulu#endif
311*91f16700Schasinglulu	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
312*91f16700Schasinglulu#if ENABLE_ASSERTIONS
313*91f16700Schasinglulu	cmp	x0, #0
314*91f16700Schasinglulu	ASM_ASSERT(ne)
315*91f16700Schasinglulu#endif
316*91f16700Schasinglulu	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
317*91f16700Schasinglulu	/*
318*91f16700Schasinglulu	 * If the reserved function pointer is NULL, this CPU
319*91f16700Schasinglulu	 * is unaffected by CVE-2017-5715 so bail out.
320*91f16700Schasinglulu	 */
321*91f16700Schasinglulu	cmp	x0, #CPU_NO_EXTRA1_FUNC
322*91f16700Schasinglulu	beq	1f
323*91f16700Schasinglulu	br	x0
324*91f16700Schasinglulu1:
325*91f16700Schasinglulu	mov	x0, #ERRATA_NOT_APPLIES
326*91f16700Schasinglulu	ret
327*91f16700Schasingluluendfunc check_wa_cve_2017_5715
328*91f16700Schasinglulu
329*91f16700Schasinglulu/*
330*91f16700Schasinglulu * void *wa_cve_2018_3639_get_disable_ptr(void);
331*91f16700Schasinglulu *
332*91f16700Schasinglulu * Returns a function pointer which is used to disable mitigation
333*91f16700Schasinglulu * for CVE-2018-3639.
334*91f16700Schasinglulu * The function pointer is only returned on cores that employ
335*91f16700Schasinglulu * dynamic mitigation.  If the core uses static mitigation or is
336*91f16700Schasinglulu * unaffected by CVE-2018-3639 this function returns NULL.
337*91f16700Schasinglulu *
338*91f16700Schasinglulu * NOTE: Must be called only after cpu_ops have been initialized
339*91f16700Schasinglulu *       in per-CPU data.
340*91f16700Schasinglulu */
341*91f16700Schasinglulu	.globl	wa_cve_2018_3639_get_disable_ptr
342*91f16700Schasinglulufunc wa_cve_2018_3639_get_disable_ptr
343*91f16700Schasinglulu	mrs	x0, tpidr_el3
344*91f16700Schasinglulu#if ENABLE_ASSERTIONS
345*91f16700Schasinglulu	cmp	x0, #0
346*91f16700Schasinglulu	ASM_ASSERT(ne)
347*91f16700Schasinglulu#endif
348*91f16700Schasinglulu	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
349*91f16700Schasinglulu#if ENABLE_ASSERTIONS
350*91f16700Schasinglulu	cmp	x0, #0
351*91f16700Schasinglulu	ASM_ASSERT(ne)
352*91f16700Schasinglulu#endif
353*91f16700Schasinglulu	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
354*91f16700Schasinglulu	ret
355*91f16700Schasingluluendfunc wa_cve_2018_3639_get_disable_ptr
356*91f16700Schasinglulu
357*91f16700Schasinglulu/*
358*91f16700Schasinglulu * int check_smccc_arch_wa3_applies(void);
359*91f16700Schasinglulu *
360*91f16700Schasinglulu * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
361*91f16700Schasinglulu * CVE-2022-23960 for this CPU. It returns:
362*91f16700Schasinglulu *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
363*91f16700Schasinglulu *    the CVE.
364*91f16700Schasinglulu *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
365*91f16700Schasinglulu *    mitigate the CVE.
366*91f16700Schasinglulu *
367*91f16700Schasinglulu * NOTE: Must be called only after cpu_ops have been initialized
368*91f16700Schasinglulu *       in per-CPU data.
369*91f16700Schasinglulu */
370*91f16700Schasinglulu	.globl	check_smccc_arch_wa3_applies
371*91f16700Schasinglulufunc check_smccc_arch_wa3_applies
372*91f16700Schasinglulu	mrs	x0, tpidr_el3
373*91f16700Schasinglulu#if ENABLE_ASSERTIONS
374*91f16700Schasinglulu	cmp	x0, #0
375*91f16700Schasinglulu	ASM_ASSERT(ne)
376*91f16700Schasinglulu#endif
377*91f16700Schasinglulu	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
378*91f16700Schasinglulu#if ENABLE_ASSERTIONS
379*91f16700Schasinglulu	cmp	x0, #0
380*91f16700Schasinglulu	ASM_ASSERT(ne)
381*91f16700Schasinglulu#endif
382*91f16700Schasinglulu	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
383*91f16700Schasinglulu	/*
384*91f16700Schasinglulu	 * If the reserved function pointer is NULL, this CPU
385*91f16700Schasinglulu	 * is unaffected by CVE-2022-23960 so bail out.
386*91f16700Schasinglulu	 */
387*91f16700Schasinglulu	cmp	x0, #CPU_NO_EXTRA3_FUNC
388*91f16700Schasinglulu	beq	1f
389*91f16700Schasinglulu	br	x0
390*91f16700Schasinglulu1:
391*91f16700Schasinglulu	mov	x0, #ERRATA_NOT_APPLIES
392*91f16700Schasinglulu	ret
393*91f16700Schasingluluendfunc check_smccc_arch_wa3_applies
394