xref: /arm-trusted-firmware/plat/arm/common/aarch64/execution_state_switch.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <stdbool.h>
8*91f16700Schasinglulu #include <string.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <arch_helpers.h>
11*91f16700Schasinglulu #include <context.h>
12*91f16700Schasinglulu #include <lib/el3_runtime/context_mgmt.h>
13*91f16700Schasinglulu #include <lib/psci/psci.h>
14*91f16700Schasinglulu #include <lib/utils.h>
15*91f16700Schasinglulu #include <plat/arm/common/plat_arm.h>
16*91f16700Schasinglulu #include <smccc_helpers.h>
17*91f16700Schasinglulu 
18*91f16700Schasinglulu /*
19*91f16700Schasinglulu  * Handle SMC from a lower exception level to switch its execution state
20*91f16700Schasinglulu  * (either from AArch64 to AArch32, or vice versa).
21*91f16700Schasinglulu  *
22*91f16700Schasinglulu  * smc_fid:
23*91f16700Schasinglulu  *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
24*91f16700Schasinglulu  *	ARM_SIP_SVC_STATE_SWITCH_32.
25*91f16700Schasinglulu  * pc_hi, pc_lo:
26*91f16700Schasinglulu  *	PC upon re-entry to the calling exception level; width dependent on the
27*91f16700Schasinglulu  *	calling exception level.
28*91f16700Schasinglulu  * cookie_hi, cookie_lo:
29*91f16700Schasinglulu  *	Opaque pointer pairs received from the caller to pass it back, upon
30*91f16700Schasinglulu  *	re-entry.
31*91f16700Schasinglulu  * handle:
32*91f16700Schasinglulu  *	Handle to saved context.
33*91f16700Schasinglulu  */
34*91f16700Schasinglulu int arm_execution_state_switch(unsigned int smc_fid,
35*91f16700Schasinglulu 		uint32_t pc_hi,
36*91f16700Schasinglulu 		uint32_t pc_lo,
37*91f16700Schasinglulu 		uint32_t cookie_hi,
38*91f16700Schasinglulu 		uint32_t cookie_lo,
39*91f16700Schasinglulu 		void *handle)
40*91f16700Schasinglulu {
41*91f16700Schasinglulu 	bool caller_64, thumb = false, from_el2;
42*91f16700Schasinglulu 	unsigned int el, endianness;
43*91f16700Schasinglulu 	u_register_t spsr, pc, scr, sctlr;
44*91f16700Schasinglulu 	entry_point_info_t ep;
45*91f16700Schasinglulu 	cpu_context_t *ctx = (cpu_context_t *) handle;
46*91f16700Schasinglulu 	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
47*91f16700Schasinglulu 
48*91f16700Schasinglulu 	/* Validate supplied entry point */
49*91f16700Schasinglulu 	pc = (u_register_t) (((uint64_t) pc_hi << 32) | pc_lo);
50*91f16700Schasinglulu 	if (arm_validate_ns_entrypoint(pc) != 0)
51*91f16700Schasinglulu 		goto invalid_param;
52*91f16700Schasinglulu 
53*91f16700Schasinglulu 	/* That the SMC originated from NS is already validated by the caller */
54*91f16700Schasinglulu 
55*91f16700Schasinglulu 	/*
56*91f16700Schasinglulu 	 * Disallow state switch if any of the secondaries have been brought up.
57*91f16700Schasinglulu 	 */
58*91f16700Schasinglulu 	if (psci_secondaries_brought_up() != 0)
59*91f16700Schasinglulu 		goto exec_denied;
60*91f16700Schasinglulu 
61*91f16700Schasinglulu 	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
62*91f16700Schasinglulu 	caller_64 = (GET_RW(spsr) == MODE_RW_64);
63*91f16700Schasinglulu 
64*91f16700Schasinglulu 	if (caller_64) {
65*91f16700Schasinglulu 		/*
66*91f16700Schasinglulu 		 * If the call originated from AArch64, expect 32-bit pointers when
67*91f16700Schasinglulu 		 * switching to AArch32.
68*91f16700Schasinglulu 		 */
69*91f16700Schasinglulu 		if ((pc_hi != 0U) || (cookie_hi != 0U))
70*91f16700Schasinglulu 			goto invalid_param;
71*91f16700Schasinglulu 
72*91f16700Schasinglulu 		pc = pc_lo;
73*91f16700Schasinglulu 
74*91f16700Schasinglulu 		/* Instruction state when entering AArch32 */
75*91f16700Schasinglulu 		thumb = (pc & 1U) != 0U;
76*91f16700Schasinglulu 	} else {
77*91f16700Schasinglulu 		/* Construct AArch64 PC */
78*91f16700Schasinglulu 		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
79*91f16700Schasinglulu 	}
80*91f16700Schasinglulu 
81*91f16700Schasinglulu 	/* Make sure PC is 4-byte aligned, except for Thumb */
82*91f16700Schasinglulu 	if (((pc & 0x3U) != 0U) && !thumb)
83*91f16700Schasinglulu 		goto invalid_param;
84*91f16700Schasinglulu 
85*91f16700Schasinglulu 	/*
86*91f16700Schasinglulu 	 * EL3 controls register width of the immediate lower EL only. Expect
87*91f16700Schasinglulu 	 * this request from EL2/Hyp unless:
88*91f16700Schasinglulu 	 *
89*91f16700Schasinglulu 	 * - EL2 is not implemented;
90*91f16700Schasinglulu 	 * - EL2 is implemented, but was disabled. This can be inferred from
91*91f16700Schasinglulu 	 *   SCR_EL3.HCE.
92*91f16700Schasinglulu 	 */
93*91f16700Schasinglulu 	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
94*91f16700Schasinglulu 		(GET_M32(spsr) == MODE32_hyp);
95*91f16700Schasinglulu 	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
96*91f16700Schasinglulu 	if (!from_el2) {
97*91f16700Schasinglulu 		/* The call is from NS privilege level other than HYP */
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 		/*
100*91f16700Schasinglulu 		 * Disallow switching state if there's a Hypervisor in place;
101*91f16700Schasinglulu 		 * this request must be taken up with the Hypervisor instead.
102*91f16700Schasinglulu 		 */
103*91f16700Schasinglulu 		if ((scr & SCR_HCE_BIT) != 0U)
104*91f16700Schasinglulu 			goto exec_denied;
105*91f16700Schasinglulu 	}
106*91f16700Schasinglulu 
107*91f16700Schasinglulu 	/*
108*91f16700Schasinglulu 	 * Return to the caller using the same endianness. Extract
109*91f16700Schasinglulu 	 * endianness bit from the respective system control register
110*91f16700Schasinglulu 	 * directly.
111*91f16700Schasinglulu 	 */
112*91f16700Schasinglulu 	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
113*91f16700Schasinglulu 	endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
114*91f16700Schasinglulu 
115*91f16700Schasinglulu 	/* Construct SPSR for the exception state we're about to switch to */
116*91f16700Schasinglulu 	if (caller_64) {
117*91f16700Schasinglulu 		unsigned long long impl;
118*91f16700Schasinglulu 
119*91f16700Schasinglulu 		/*
120*91f16700Schasinglulu 		 * Switching from AArch64 to AArch32. Ensure this CPU implements
121*91f16700Schasinglulu 		 * the target EL in AArch32.
122*91f16700Schasinglulu 		 */
123*91f16700Schasinglulu 		impl = from_el2 ? el_implemented(2) : el_implemented(1);
124*91f16700Schasinglulu 		if (impl != EL_IMPL_A64_A32)
125*91f16700Schasinglulu 			goto exec_denied;
126*91f16700Schasinglulu 
127*91f16700Schasinglulu 		/* Return to the equivalent AArch32 privilege level */
128*91f16700Schasinglulu 		el = from_el2 ? MODE32_hyp : MODE32_svc;
129*91f16700Schasinglulu 		spsr = SPSR_MODE32((u_register_t) el,
130*91f16700Schasinglulu 				thumb ? SPSR_T_THUMB : SPSR_T_ARM,
131*91f16700Schasinglulu 				endianness, DISABLE_ALL_EXCEPTIONS);
132*91f16700Schasinglulu 	} else {
133*91f16700Schasinglulu 		/*
134*91f16700Schasinglulu 		 * Switching from AArch32 to AArch64. Since it's not possible to
135*91f16700Schasinglulu 		 * implement an EL as AArch32-only (from which this call was
136*91f16700Schasinglulu 		 * raised), it's safe to assume AArch64 is also implemented.
137*91f16700Schasinglulu 		 */
138*91f16700Schasinglulu 		el = from_el2 ? MODE_EL2 : MODE_EL1;
139*91f16700Schasinglulu 		spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
140*91f16700Schasinglulu 				DISABLE_ALL_EXCEPTIONS);
141*91f16700Schasinglulu 	}
142*91f16700Schasinglulu 
143*91f16700Schasinglulu 	/*
144*91f16700Schasinglulu 	 * Use the context management library to re-initialize the existing
145*91f16700Schasinglulu 	 * context with the execution state flipped. Since the library takes
146*91f16700Schasinglulu 	 * entry_point_info_t pointer as the argument, construct a dummy one
147*91f16700Schasinglulu 	 * with PC, state width, endianness, security etc. appropriately set.
148*91f16700Schasinglulu 	 * Other entries in the entry point structure are irrelevant for
149*91f16700Schasinglulu 	 * purpose.
150*91f16700Schasinglulu 	 */
151*91f16700Schasinglulu 	zeromem(&ep, sizeof(ep));
152*91f16700Schasinglulu 	ep.pc = pc;
153*91f16700Schasinglulu 	ep.spsr = (uint32_t) spsr;
154*91f16700Schasinglulu 	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
155*91f16700Schasinglulu 			((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
156*91f16700Schasinglulu 				EP_EE_LITTLE)
157*91f16700Schasinglulu 			 | NON_SECURE | EP_ST_DISABLE));
158*91f16700Schasinglulu 
159*91f16700Schasinglulu 	/*
160*91f16700Schasinglulu 	 * Re-initialize the system register context, and exit EL3 as if for the
161*91f16700Schasinglulu 	 * first time. State switch is effectively a soft reset of the
162*91f16700Schasinglulu 	 * calling EL.
163*91f16700Schasinglulu 	 */
164*91f16700Schasinglulu 	cm_init_my_context(&ep);
165*91f16700Schasinglulu 	cm_prepare_el3_exit_ns();
166*91f16700Schasinglulu 
167*91f16700Schasinglulu 	/*
168*91f16700Schasinglulu 	 * State switch success. The caller of SMC wouldn't see the SMC
169*91f16700Schasinglulu 	 * returning. Instead, execution starts at the supplied entry point,
170*91f16700Schasinglulu 	 * with context pointers populated in registers 0 and 1.
171*91f16700Schasinglulu 	 */
172*91f16700Schasinglulu 	SMC_RET2(handle, cookie_hi, cookie_lo);
173*91f16700Schasinglulu 
174*91f16700Schasinglulu invalid_param:
175*91f16700Schasinglulu 	SMC_RET1(handle, STATE_SW_E_PARAM);
176*91f16700Schasinglulu 
177*91f16700Schasinglulu exec_denied:
178*91f16700Schasinglulu 	/* State switch denied */
179*91f16700Schasinglulu 	SMC_RET1(handle, STATE_SW_E_DENIED);
180*91f16700Schasinglulu }
181