xref: /arm-trusted-firmware/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <assert.h>
8*91f16700Schasinglulu #include <stdbool.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <platform_def.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu #include <arch.h>
13*91f16700Schasinglulu #include <arch_features.h>
14*91f16700Schasinglulu #include <arch_helpers.h>
15*91f16700Schasinglulu #include <lib/cassert.h>
16*91f16700Schasinglulu #include <lib/utils_def.h>
17*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
18*91f16700Schasinglulu 
19*91f16700Schasinglulu #include "../xlat_tables_private.h"
20*91f16700Schasinglulu 
21*91f16700Schasinglulu #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
22*91f16700Schasinglulu #error ARMv7 target does not support LPAE MMU descriptors
23*91f16700Schasinglulu #endif
24*91f16700Schasinglulu 
25*91f16700Schasinglulu /*
26*91f16700Schasinglulu  * Returns true if the provided granule size is supported, false otherwise.
27*91f16700Schasinglulu  */
28*91f16700Schasinglulu bool xlat_arch_is_granule_size_supported(size_t size)
29*91f16700Schasinglulu {
30*91f16700Schasinglulu 	/*
31*91f16700Schasinglulu 	 * The library uses the long descriptor translation table format, which
32*91f16700Schasinglulu 	 * supports 4 KiB pages only.
33*91f16700Schasinglulu 	 */
34*91f16700Schasinglulu 	return size == PAGE_SIZE_4KB;
35*91f16700Schasinglulu }
36*91f16700Schasinglulu 
37*91f16700Schasinglulu size_t xlat_arch_get_max_supported_granule_size(void)
38*91f16700Schasinglulu {
39*91f16700Schasinglulu 	return PAGE_SIZE_4KB;
40*91f16700Schasinglulu }
41*91f16700Schasinglulu 
42*91f16700Schasinglulu /*
43*91f16700Schasinglulu  * Determine the physical address space encoded in the 'attr' parameter.
44*91f16700Schasinglulu  *
45*91f16700Schasinglulu  * The physical address will fall into one of two spaces; secure or
46*91f16700Schasinglulu  * nonsecure.
47*91f16700Schasinglulu  */
48*91f16700Schasinglulu uint32_t xlat_arch_get_pas(uint32_t attr)
49*91f16700Schasinglulu {
50*91f16700Schasinglulu 	uint32_t pas = MT_PAS(attr);
51*91f16700Schasinglulu 
52*91f16700Schasinglulu 	if (pas == MT_NS) {
53*91f16700Schasinglulu 		return LOWER_ATTRS(NS);
54*91f16700Schasinglulu 	} else { /* MT_SECURE */
55*91f16700Schasinglulu 		return 0U;
56*91f16700Schasinglulu 	}
57*91f16700Schasinglulu }
58*91f16700Schasinglulu 
59*91f16700Schasinglulu #if ENABLE_ASSERTIONS
60*91f16700Schasinglulu unsigned long long xlat_arch_get_max_supported_pa(void)
61*91f16700Schasinglulu {
62*91f16700Schasinglulu 	/* Physical address space size for long descriptor format. */
63*91f16700Schasinglulu 	return (1ULL << 40) - 1ULL;
64*91f16700Schasinglulu }
65*91f16700Schasinglulu 
66*91f16700Schasinglulu /*
67*91f16700Schasinglulu  * Return minimum virtual address space size supported by the architecture
68*91f16700Schasinglulu  */
69*91f16700Schasinglulu uintptr_t xlat_get_min_virt_addr_space_size(void)
70*91f16700Schasinglulu {
71*91f16700Schasinglulu 	return MIN_VIRT_ADDR_SPACE_SIZE;
72*91f16700Schasinglulu }
73*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS*/
74*91f16700Schasinglulu 
75*91f16700Schasinglulu bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
76*91f16700Schasinglulu {
77*91f16700Schasinglulu 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
78*91f16700Schasinglulu 		assert(xlat_arch_current_el() == 1U);
79*91f16700Schasinglulu 		return (read_sctlr() & SCTLR_M_BIT) != 0U;
80*91f16700Schasinglulu 	} else {
81*91f16700Schasinglulu 		assert(ctx->xlat_regime == EL2_REGIME);
82*91f16700Schasinglulu 		assert(xlat_arch_current_el() == 2U);
83*91f16700Schasinglulu 		return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
84*91f16700Schasinglulu 	}
85*91f16700Schasinglulu }
86*91f16700Schasinglulu 
87*91f16700Schasinglulu bool is_dcache_enabled(void)
88*91f16700Schasinglulu {
89*91f16700Schasinglulu 	if (IS_IN_EL2()) {
90*91f16700Schasinglulu 		return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
91*91f16700Schasinglulu 	} else {
92*91f16700Schasinglulu 		return (read_sctlr() & SCTLR_C_BIT) != 0U;
93*91f16700Schasinglulu 	}
94*91f16700Schasinglulu }
95*91f16700Schasinglulu 
96*91f16700Schasinglulu uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
97*91f16700Schasinglulu {
98*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
99*91f16700Schasinglulu 		return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
100*91f16700Schasinglulu 	} else {
101*91f16700Schasinglulu 		assert(xlat_regime == EL2_REGIME);
102*91f16700Schasinglulu 		return UPPER_ATTRS(XN);
103*91f16700Schasinglulu 	}
104*91f16700Schasinglulu }
105*91f16700Schasinglulu 
106*91f16700Schasinglulu void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
107*91f16700Schasinglulu {
108*91f16700Schasinglulu 	/*
109*91f16700Schasinglulu 	 * Ensure the translation table write has drained into memory before
110*91f16700Schasinglulu 	 * invalidating the TLB entry.
111*91f16700Schasinglulu 	 */
112*91f16700Schasinglulu 	dsbishst();
113*91f16700Schasinglulu 
114*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
115*91f16700Schasinglulu 		tlbimvaais(TLBI_ADDR(va));
116*91f16700Schasinglulu 	} else {
117*91f16700Schasinglulu 		assert(xlat_regime == EL2_REGIME);
118*91f16700Schasinglulu 		tlbimvahis(TLBI_ADDR(va));
119*91f16700Schasinglulu 	}
120*91f16700Schasinglulu }
121*91f16700Schasinglulu 
122*91f16700Schasinglulu void xlat_arch_tlbi_va_sync(void)
123*91f16700Schasinglulu {
124*91f16700Schasinglulu 	/* Invalidate all entries from branch predictors. */
125*91f16700Schasinglulu 	bpiallis();
126*91f16700Schasinglulu 
127*91f16700Schasinglulu 	/*
128*91f16700Schasinglulu 	 * A TLB maintenance instruction can complete at any time after
129*91f16700Schasinglulu 	 * it is issued, but is only guaranteed to be complete after the
130*91f16700Schasinglulu 	 * execution of DSB by the PE that executed the TLB maintenance
131*91f16700Schasinglulu 	 * instruction. After the TLB invalidate instruction is
132*91f16700Schasinglulu 	 * complete, no new memory accesses using the invalidated TLB
133*91f16700Schasinglulu 	 * entries will be observed by any observer of the system
134*91f16700Schasinglulu 	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
135*91f16700Schasinglulu 	 * "Ordering and completion of TLB maintenance instructions".
136*91f16700Schasinglulu 	 */
137*91f16700Schasinglulu 	dsbish();
138*91f16700Schasinglulu 
139*91f16700Schasinglulu 	/*
140*91f16700Schasinglulu 	 * The effects of a completed TLB maintenance instruction are
141*91f16700Schasinglulu 	 * only guaranteed to be visible on the PE that executed the
142*91f16700Schasinglulu 	 * instruction after the execution of an ISB instruction by the
143*91f16700Schasinglulu 	 * PE that executed the TLB maintenance instruction.
144*91f16700Schasinglulu 	 */
145*91f16700Schasinglulu 	isb();
146*91f16700Schasinglulu }
147*91f16700Schasinglulu 
148*91f16700Schasinglulu unsigned int xlat_arch_current_el(void)
149*91f16700Schasinglulu {
150*91f16700Schasinglulu 	if (IS_IN_HYP()) {
151*91f16700Schasinglulu 		return 2U;
152*91f16700Schasinglulu 	} else {
153*91f16700Schasinglulu 		assert(IS_IN_SVC() || IS_IN_MON());
154*91f16700Schasinglulu 		/*
155*91f16700Schasinglulu 		 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
156*91f16700Schasinglulu 		 * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
157*91f16700Schasinglulu 		 *
158*91f16700Schasinglulu 		 * The PL1&0 translation regime in AArch32 behaves like the
159*91f16700Schasinglulu 		 * EL1&0 regime in AArch64 except for the XN bits, but we set
160*91f16700Schasinglulu 		 * and unset them at the same time, so there's no difference in
161*91f16700Schasinglulu 		 * practice.
162*91f16700Schasinglulu 		 */
163*91f16700Schasinglulu 		return 1U;
164*91f16700Schasinglulu 	}
165*91f16700Schasinglulu }
166*91f16700Schasinglulu 
167*91f16700Schasinglulu /*******************************************************************************
168*91f16700Schasinglulu  * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
169*91f16700Schasinglulu  * have already been created.
170*91f16700Schasinglulu  ******************************************************************************/
171*91f16700Schasinglulu void setup_mmu_cfg(uint64_t *params, unsigned int flags,
172*91f16700Schasinglulu 		   const uint64_t *base_table, unsigned long long max_pa,
173*91f16700Schasinglulu 		   uintptr_t max_va, __unused int xlat_regime)
174*91f16700Schasinglulu {
175*91f16700Schasinglulu 	uint64_t mair, ttbr0;
176*91f16700Schasinglulu 	uint32_t ttbcr;
177*91f16700Schasinglulu 
178*91f16700Schasinglulu 	/* Set attributes in the right indices of the MAIR */
179*91f16700Schasinglulu 	mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
180*91f16700Schasinglulu 	mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
181*91f16700Schasinglulu 			ATTR_IWBWA_OWBWA_NTR_INDEX);
182*91f16700Schasinglulu 	mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
183*91f16700Schasinglulu 			ATTR_NON_CACHEABLE_INDEX);
184*91f16700Schasinglulu 
185*91f16700Schasinglulu 	/*
186*91f16700Schasinglulu 	 * Configure the control register for stage 1 of the PL1&0 or EL2
187*91f16700Schasinglulu 	 * translation regimes.
188*91f16700Schasinglulu 	 */
189*91f16700Schasinglulu 
190*91f16700Schasinglulu 	/* Use the Long-descriptor translation table format. */
191*91f16700Schasinglulu 	ttbcr = TTBCR_EAE_BIT;
192*91f16700Schasinglulu 
193*91f16700Schasinglulu 	if (xlat_regime == EL1_EL0_REGIME) {
194*91f16700Schasinglulu 		assert(IS_IN_SVC() || IS_IN_MON());
195*91f16700Schasinglulu 		/*
196*91f16700Schasinglulu 		 * Disable translation table walk for addresses that are
197*91f16700Schasinglulu 		 * translated using TTBR1. Therefore, only TTBR0 is used.
198*91f16700Schasinglulu 		 */
199*91f16700Schasinglulu 		ttbcr |= TTBCR_EPD1_BIT;
200*91f16700Schasinglulu 	} else {
201*91f16700Schasinglulu 		assert(xlat_regime == EL2_REGIME);
202*91f16700Schasinglulu 		assert(IS_IN_HYP());
203*91f16700Schasinglulu 
204*91f16700Schasinglulu 		/*
205*91f16700Schasinglulu 		 * Set HTCR bits as well. Set HTTBR table properties
206*91f16700Schasinglulu 		 * as Inner & outer WBWA & shareable.
207*91f16700Schasinglulu 		 */
208*91f16700Schasinglulu 		ttbcr |= HTCR_RES1 |
209*91f16700Schasinglulu 			 HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
210*91f16700Schasinglulu 			 HTCR_RGN0_INNER_WBA;
211*91f16700Schasinglulu 	}
212*91f16700Schasinglulu 
213*91f16700Schasinglulu 	/*
214*91f16700Schasinglulu 	 * Limit the input address ranges and memory region sizes translated
215*91f16700Schasinglulu 	 * using TTBR0 to the given virtual address space size, if smaller than
216*91f16700Schasinglulu 	 * 32 bits.
217*91f16700Schasinglulu 	 */
218*91f16700Schasinglulu 	if (max_va != UINT32_MAX) {
219*91f16700Schasinglulu 		uintptr_t virtual_addr_space_size = max_va + 1U;
220*91f16700Schasinglulu 
221*91f16700Schasinglulu 		assert(virtual_addr_space_size >=
222*91f16700Schasinglulu 			xlat_get_min_virt_addr_space_size());
223*91f16700Schasinglulu 		assert(IS_POWER_OF_TWO(virtual_addr_space_size));
224*91f16700Schasinglulu 
225*91f16700Schasinglulu 		/*
226*91f16700Schasinglulu 		 * __builtin_ctzll(0) is undefined but here we are guaranteed
227*91f16700Schasinglulu 		 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
228*91f16700Schasinglulu 		 */
229*91f16700Schasinglulu 		int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
230*91f16700Schasinglulu 
231*91f16700Schasinglulu 		ttbcr |= (uint32_t) t0sz;
232*91f16700Schasinglulu 	}
233*91f16700Schasinglulu 
234*91f16700Schasinglulu 	/*
235*91f16700Schasinglulu 	 * Set the cacheability and shareability attributes for memory
236*91f16700Schasinglulu 	 * associated with translation table walks using TTBR0.
237*91f16700Schasinglulu 	 */
238*91f16700Schasinglulu 	if ((flags & XLAT_TABLE_NC) != 0U) {
239*91f16700Schasinglulu 		/* Inner & outer non-cacheable non-shareable. */
240*91f16700Schasinglulu 		ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
241*91f16700Schasinglulu 			TTBCR_RGN0_INNER_NC;
242*91f16700Schasinglulu 	} else {
243*91f16700Schasinglulu 		/* Inner & outer WBWA & shareable. */
244*91f16700Schasinglulu 		ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
245*91f16700Schasinglulu 			TTBCR_RGN0_INNER_WBA;
246*91f16700Schasinglulu 	}
247*91f16700Schasinglulu 
248*91f16700Schasinglulu 	/* Set TTBR0 bits as well */
249*91f16700Schasinglulu 	ttbr0 = (uint64_t)(uintptr_t) base_table;
250*91f16700Schasinglulu 
251*91f16700Schasinglulu 	if (is_armv8_2_ttcnp_present()) {
252*91f16700Schasinglulu 		/* Enable CnP bit so as to share page tables with all PEs. */
253*91f16700Schasinglulu 		ttbr0 |= TTBR_CNP_BIT;
254*91f16700Schasinglulu 	}
255*91f16700Schasinglulu 
256*91f16700Schasinglulu 	/* Now populate MMU configuration */
257*91f16700Schasinglulu 	params[MMU_CFG_MAIR] = mair;
258*91f16700Schasinglulu 	params[MMU_CFG_TCR] = (uint64_t) ttbcr;
259*91f16700Schasinglulu 	params[MMU_CFG_TTBR0] = ttbr0;
260*91f16700Schasinglulu }
261