xref: /arm-trusted-firmware/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <assert.h>
8*91f16700Schasinglulu #include <stdbool.h>
9*91f16700Schasinglulu #include <stdint.h>
10*91f16700Schasinglulu 
11*91f16700Schasinglulu #include <arch.h>
12*91f16700Schasinglulu #include <arch_features.h>
13*91f16700Schasinglulu #include <arch_helpers.h>
14*91f16700Schasinglulu #include <lib/cassert.h>
15*91f16700Schasinglulu #include <lib/utils_def.h>
16*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
17*91f16700Schasinglulu 
18*91f16700Schasinglulu #include "../xlat_tables_private.h"
19*91f16700Schasinglulu 
20*91f16700Schasinglulu /*
21*91f16700Schasinglulu  * Returns true if the provided granule size is supported, false otherwise.
22*91f16700Schasinglulu  */
23*91f16700Schasinglulu bool xlat_arch_is_granule_size_supported(size_t size)
24*91f16700Schasinglulu {
25*91f16700Schasinglulu 	unsigned int tgranx;
26*91f16700Schasinglulu 
27*91f16700Schasinglulu 	if (size == PAGE_SIZE_4KB) {
28*91f16700Schasinglulu 		tgranx = read_id_aa64mmfr0_el0_tgran4_field();
29*91f16700Schasinglulu 		/* MSB of TGRAN4 field will be '1' for unsupported feature */
30*91f16700Schasinglulu 		return (tgranx < 8U);
31*91f16700Schasinglulu 	} else if (size == PAGE_SIZE_16KB) {
32*91f16700Schasinglulu 		tgranx = read_id_aa64mmfr0_el0_tgran16_field();
33*91f16700Schasinglulu 		return (tgranx >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED);
34*91f16700Schasinglulu 	} else if (size == PAGE_SIZE_64KB) {
35*91f16700Schasinglulu 		tgranx = read_id_aa64mmfr0_el0_tgran64_field();
36*91f16700Schasinglulu 		/* MSB of TGRAN64 field will be '1' for unsupported feature */
37*91f16700Schasinglulu 		return (tgranx < 8U);
38*91f16700Schasinglulu 	} else {
39*91f16700Schasinglulu 		return false;
40*91f16700Schasinglulu 	}
41*91f16700Schasinglulu }
42*91f16700Schasinglulu 
43*91f16700Schasinglulu size_t xlat_arch_get_max_supported_granule_size(void)
44*91f16700Schasinglulu {
45*91f16700Schasinglulu 	if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
46*91f16700Schasinglulu 		return PAGE_SIZE_64KB;
47*91f16700Schasinglulu 	} else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
48*91f16700Schasinglulu 		return PAGE_SIZE_16KB;
49*91f16700Schasinglulu 	} else {
50*91f16700Schasinglulu 		assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
51*91f16700Schasinglulu 		return PAGE_SIZE_4KB;
52*91f16700Schasinglulu 	}
53*91f16700Schasinglulu }
54*91f16700Schasinglulu 
55*91f16700Schasinglulu /*
56*91f16700Schasinglulu  * Determine the physical address space encoded in the 'attr' parameter.
57*91f16700Schasinglulu  *
58*91f16700Schasinglulu  * The physical address will fall into one of four spaces; secure,
59*91f16700Schasinglulu  * nonsecure, root, or realm if RME is enabled, or one of two spaces;
60*91f16700Schasinglulu  * secure and nonsecure otherwise.
61*91f16700Schasinglulu  */
62*91f16700Schasinglulu uint32_t xlat_arch_get_pas(uint32_t attr)
63*91f16700Schasinglulu {
64*91f16700Schasinglulu 	uint32_t pas = MT_PAS(attr);
65*91f16700Schasinglulu 
66*91f16700Schasinglulu 	switch (pas) {
67*91f16700Schasinglulu #if ENABLE_RME
68*91f16700Schasinglulu 	/* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */
69*91f16700Schasinglulu 	case MT_REALM:
70*91f16700Schasinglulu 		return LOWER_ATTRS(EL3_S1_NSE | NS);
71*91f16700Schasinglulu 	/* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */
72*91f16700Schasinglulu 	case MT_ROOT:
73*91f16700Schasinglulu 		return LOWER_ATTRS(EL3_S1_NSE);
74*91f16700Schasinglulu #endif
75*91f16700Schasinglulu 	case MT_NS:
76*91f16700Schasinglulu 		return LOWER_ATTRS(NS);
77*91f16700Schasinglulu 	default: /* MT_SECURE */
78*91f16700Schasinglulu 		return 0U;
79*91f16700Schasinglulu 	}
80*91f16700Schasinglulu }
81*91f16700Schasinglulu 
82*91f16700Schasinglulu unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
83*91f16700Schasinglulu {
84*91f16700Schasinglulu 	/* Physical address can't exceed 48 bits */
85*91f16700Schasinglulu 	assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
86*91f16700Schasinglulu 
87*91f16700Schasinglulu 	/* 48 bits address */
88*91f16700Schasinglulu 	if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
89*91f16700Schasinglulu 		return TCR_PS_BITS_256TB;
90*91f16700Schasinglulu 
91*91f16700Schasinglulu 	/* 44 bits address */
92*91f16700Schasinglulu 	if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
93*91f16700Schasinglulu 		return TCR_PS_BITS_16TB;
94*91f16700Schasinglulu 
95*91f16700Schasinglulu 	/* 42 bits address */
96*91f16700Schasinglulu 	if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
97*91f16700Schasinglulu 		return TCR_PS_BITS_4TB;
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 	/* 40 bits address */
100*91f16700Schasinglulu 	if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
101*91f16700Schasinglulu 		return TCR_PS_BITS_1TB;
102*91f16700Schasinglulu 
103*91f16700Schasinglulu 	/* 36 bits address */
104*91f16700Schasinglulu 	if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
105*91f16700Schasinglulu 		return TCR_PS_BITS_64GB;
106*91f16700Schasinglulu 
107*91f16700Schasinglulu 	return TCR_PS_BITS_4GB;
108*91f16700Schasinglulu }
109*91f16700Schasinglulu 
110*91f16700Schasinglulu #if ENABLE_ASSERTIONS
111*91f16700Schasinglulu /*
112*91f16700Schasinglulu  * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
113*91f16700Schasinglulu  * supported in ARMv8.2 onwards.
114*91f16700Schasinglulu  */
115*91f16700Schasinglulu static const unsigned int pa_range_bits_arr[] = {
116*91f16700Schasinglulu 	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
117*91f16700Schasinglulu 	PARANGE_0101, PARANGE_0110
118*91f16700Schasinglulu };
119*91f16700Schasinglulu 
120*91f16700Schasinglulu unsigned long long xlat_arch_get_max_supported_pa(void)
121*91f16700Schasinglulu {
122*91f16700Schasinglulu 	u_register_t pa_range = read_id_aa64mmfr0_el1() &
123*91f16700Schasinglulu 						ID_AA64MMFR0_EL1_PARANGE_MASK;
124*91f16700Schasinglulu 
125*91f16700Schasinglulu 	/* All other values are reserved */
126*91f16700Schasinglulu 	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
127*91f16700Schasinglulu 
128*91f16700Schasinglulu 	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
129*91f16700Schasinglulu }
130*91f16700Schasinglulu 
131*91f16700Schasinglulu /*
132*91f16700Schasinglulu  * Return minimum virtual address space size supported by the architecture
133*91f16700Schasinglulu  */
134*91f16700Schasinglulu uintptr_t xlat_get_min_virt_addr_space_size(void)
135*91f16700Schasinglulu {
136*91f16700Schasinglulu 	uintptr_t ret;
137*91f16700Schasinglulu 
138*91f16700Schasinglulu 	if (is_armv8_4_ttst_present())
139*91f16700Schasinglulu 		ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
140*91f16700Schasinglulu 	else
141*91f16700Schasinglulu 		ret = MIN_VIRT_ADDR_SPACE_SIZE;
142*91f16700Schasinglulu 
143*91f16700Schasinglulu 	return ret;
144*91f16700Schasinglulu }
145*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS*/
146*91f16700Schasinglulu 
147*91f16700Schasinglulu bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
148*91f16700Schasinglulu {
149*91f16700Schasinglulu 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
150*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 1U);
151*91f16700Schasinglulu 		return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
152*91f16700Schasinglulu 	} else if (ctx->xlat_regime == EL2_REGIME) {
153*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 2U);
154*91f16700Schasinglulu 		return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
155*91f16700Schasinglulu 	} else {
156*91f16700Schasinglulu 		assert(ctx->xlat_regime == EL3_REGIME);
157*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 3U);
158*91f16700Schasinglulu 		return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
159*91f16700Schasinglulu 	}
160*91f16700Schasinglulu }
161*91f16700Schasinglulu 
162*91f16700Schasinglulu bool is_dcache_enabled(void)
163*91f16700Schasinglulu {
164*91f16700Schasinglulu 	unsigned int el = get_current_el_maybe_constant();
165*91f16700Schasinglulu 
166*91f16700Schasinglulu 	if (el == 1U) {
167*91f16700Schasinglulu 		return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
168*91f16700Schasinglulu 	} else if (el == 2U) {
169*91f16700Schasinglulu 		return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
170*91f16700Schasinglulu 	} else {
171*91f16700Schasinglulu 		return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
172*91f16700Schasinglulu 	}
173*91f16700Schasinglulu }
174*91f16700Schasinglulu 
175*91f16700Schasinglulu uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
176*91f16700Schasinglulu {
177*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
178*91f16700Schasinglulu 		return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
179*91f16700Schasinglulu 	} else {
180*91f16700Schasinglulu 		assert((xlat_regime == EL2_REGIME) ||
181*91f16700Schasinglulu 		       (xlat_regime == EL3_REGIME));
182*91f16700Schasinglulu 		return UPPER_ATTRS(XN);
183*91f16700Schasinglulu 	}
184*91f16700Schasinglulu }
185*91f16700Schasinglulu 
186*91f16700Schasinglulu void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
187*91f16700Schasinglulu {
188*91f16700Schasinglulu 	/*
189*91f16700Schasinglulu 	 * Ensure the translation table write has drained into memory before
190*91f16700Schasinglulu 	 * invalidating the TLB entry.
191*91f16700Schasinglulu 	 */
192*91f16700Schasinglulu 	dsbishst();
193*91f16700Schasinglulu 
194*91f16700Schasinglulu 	/*
195*91f16700Schasinglulu 	 * This function only supports invalidation of TLB entries for the EL3
196*91f16700Schasinglulu 	 * and EL1&0 translation regimes.
197*91f16700Schasinglulu 	 *
198*91f16700Schasinglulu 	 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
199*91f16700Schasinglulu 	 * exception level (see section D4.9.2 of the ARM ARM rev B.a).
200*91f16700Schasinglulu 	 */
201*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
202*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 1U);
203*91f16700Schasinglulu 		tlbivaae1is(TLBI_ADDR(va));
204*91f16700Schasinglulu 	} else if (xlat_regime == EL2_REGIME) {
205*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 2U);
206*91f16700Schasinglulu 		tlbivae2is(TLBI_ADDR(va));
207*91f16700Schasinglulu 	} else {
208*91f16700Schasinglulu 		assert(xlat_regime == EL3_REGIME);
209*91f16700Schasinglulu 		assert(xlat_arch_current_el() >= 3U);
210*91f16700Schasinglulu 		tlbivae3is(TLBI_ADDR(va));
211*91f16700Schasinglulu 	}
212*91f16700Schasinglulu }
213*91f16700Schasinglulu 
214*91f16700Schasinglulu void xlat_arch_tlbi_va_sync(void)
215*91f16700Schasinglulu {
216*91f16700Schasinglulu 	/*
217*91f16700Schasinglulu 	 * A TLB maintenance instruction can complete at any time after
218*91f16700Schasinglulu 	 * it is issued, but is only guaranteed to be complete after the
219*91f16700Schasinglulu 	 * execution of DSB by the PE that executed the TLB maintenance
220*91f16700Schasinglulu 	 * instruction. After the TLB invalidate instruction is
221*91f16700Schasinglulu 	 * complete, no new memory accesses using the invalidated TLB
222*91f16700Schasinglulu 	 * entries will be observed by any observer of the system
223*91f16700Schasinglulu 	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
224*91f16700Schasinglulu 	 * "Ordering and completion of TLB maintenance instructions".
225*91f16700Schasinglulu 	 */
226*91f16700Schasinglulu 	dsbish();
227*91f16700Schasinglulu 
228*91f16700Schasinglulu 	/*
229*91f16700Schasinglulu 	 * The effects of a completed TLB maintenance instruction are
230*91f16700Schasinglulu 	 * only guaranteed to be visible on the PE that executed the
231*91f16700Schasinglulu 	 * instruction after the execution of an ISB instruction by the
232*91f16700Schasinglulu 	 * PE that executed the TLB maintenance instruction.
233*91f16700Schasinglulu 	 */
234*91f16700Schasinglulu 	isb();
235*91f16700Schasinglulu }
236*91f16700Schasinglulu 
237*91f16700Schasinglulu unsigned int xlat_arch_current_el(void)
238*91f16700Schasinglulu {
239*91f16700Schasinglulu 	unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
240*91f16700Schasinglulu 
241*91f16700Schasinglulu 	assert(el > 0U);
242*91f16700Schasinglulu 
243*91f16700Schasinglulu 	return el;
244*91f16700Schasinglulu }
245*91f16700Schasinglulu 
246*91f16700Schasinglulu void setup_mmu_cfg(uint64_t *params, unsigned int flags,
247*91f16700Schasinglulu 		   const uint64_t *base_table, unsigned long long max_pa,
248*91f16700Schasinglulu 		   uintptr_t max_va, int xlat_regime)
249*91f16700Schasinglulu {
250*91f16700Schasinglulu 	uint64_t mair, ttbr0, tcr;
251*91f16700Schasinglulu 	uintptr_t virtual_addr_space_size;
252*91f16700Schasinglulu 
253*91f16700Schasinglulu 	/* Set attributes in the right indices of the MAIR. */
254*91f16700Schasinglulu 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
255*91f16700Schasinglulu 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
256*91f16700Schasinglulu 	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
257*91f16700Schasinglulu 
258*91f16700Schasinglulu 	/*
259*91f16700Schasinglulu 	 * Limit the input address ranges and memory region sizes translated
260*91f16700Schasinglulu 	 * using TTBR0 to the given virtual address space size.
261*91f16700Schasinglulu 	 */
262*91f16700Schasinglulu 	assert(max_va < ((uint64_t)UINTPTR_MAX));
263*91f16700Schasinglulu 
264*91f16700Schasinglulu 	virtual_addr_space_size = (uintptr_t)max_va + 1U;
265*91f16700Schasinglulu 
266*91f16700Schasinglulu 	assert(virtual_addr_space_size >=
267*91f16700Schasinglulu 		xlat_get_min_virt_addr_space_size());
268*91f16700Schasinglulu 	assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE);
269*91f16700Schasinglulu 	assert(IS_POWER_OF_TWO(virtual_addr_space_size));
270*91f16700Schasinglulu 
271*91f16700Schasinglulu 	/*
272*91f16700Schasinglulu 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
273*91f16700Schasinglulu 	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
274*91f16700Schasinglulu 	 */
275*91f16700Schasinglulu 	int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
276*91f16700Schasinglulu 
277*91f16700Schasinglulu 	tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT;
278*91f16700Schasinglulu 
279*91f16700Schasinglulu 	/*
280*91f16700Schasinglulu 	 * Set the cacheability and shareability attributes for memory
281*91f16700Schasinglulu 	 * associated with translation table walks.
282*91f16700Schasinglulu 	 */
283*91f16700Schasinglulu 	if ((flags & XLAT_TABLE_NC) != 0U) {
284*91f16700Schasinglulu 		/* Inner & outer non-cacheable non-shareable. */
285*91f16700Schasinglulu 		tcr |= TCR_SH_NON_SHAREABLE |
286*91f16700Schasinglulu 			TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
287*91f16700Schasinglulu 	} else {
288*91f16700Schasinglulu 		/* Inner & outer WBWA & shareable. */
289*91f16700Schasinglulu 		tcr |= TCR_SH_INNER_SHAREABLE |
290*91f16700Schasinglulu 			TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
291*91f16700Schasinglulu 	}
292*91f16700Schasinglulu 
293*91f16700Schasinglulu 	/*
294*91f16700Schasinglulu 	 * It is safer to restrict the max physical address accessible by the
295*91f16700Schasinglulu 	 * hardware as much as possible.
296*91f16700Schasinglulu 	 */
297*91f16700Schasinglulu 	unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
298*91f16700Schasinglulu 
299*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
300*91f16700Schasinglulu 		/*
301*91f16700Schasinglulu 		 * TCR_EL1.EPD1: Disable translation table walk for addresses
302*91f16700Schasinglulu 		 * that are translated using TTBR1_EL1.
303*91f16700Schasinglulu 		 */
304*91f16700Schasinglulu 		tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
305*91f16700Schasinglulu 	} else if (xlat_regime == EL2_REGIME) {
306*91f16700Schasinglulu 		tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
307*91f16700Schasinglulu 	} else {
308*91f16700Schasinglulu 		assert(xlat_regime == EL3_REGIME);
309*91f16700Schasinglulu 		tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
310*91f16700Schasinglulu 	}
311*91f16700Schasinglulu 
312*91f16700Schasinglulu 	/* Set TTBR bits as well */
313*91f16700Schasinglulu 	ttbr0 = (uint64_t) base_table;
314*91f16700Schasinglulu 
315*91f16700Schasinglulu 	if (is_armv8_2_ttcnp_present()) {
316*91f16700Schasinglulu 		/* Enable CnP bit so as to share page tables with all PEs. */
317*91f16700Schasinglulu 		ttbr0 |= TTBR_CNP_BIT;
318*91f16700Schasinglulu 	}
319*91f16700Schasinglulu 
320*91f16700Schasinglulu 	params[MMU_CFG_MAIR] = mair;
321*91f16700Schasinglulu 	params[MMU_CFG_TCR] = tcr;
322*91f16700Schasinglulu 	params[MMU_CFG_TTBR0] = ttbr0;
323*91f16700Schasinglulu }
324