xref: /arm-trusted-firmware/lib/xlat_tables_v2/xlat_tables_context.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2020, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <arch_helpers.h>
8*91f16700Schasinglulu #include <assert.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <platform_def.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu #include <common/debug.h>
13*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_defs.h>
14*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
15*91f16700Schasinglulu 
16*91f16700Schasinglulu #include "xlat_tables_private.h"
17*91f16700Schasinglulu 
18*91f16700Schasinglulu /*
19*91f16700Schasinglulu  * MMU configuration register values for the active translation context. Used
20*91f16700Schasinglulu  * from the MMU assembly helpers.
21*91f16700Schasinglulu  */
22*91f16700Schasinglulu uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
23*91f16700Schasinglulu 
24*91f16700Schasinglulu /*
25*91f16700Schasinglulu  * Allocate and initialise the default translation context for the BL image
26*91f16700Schasinglulu  * currently executing.
27*91f16700Schasinglulu  */
28*91f16700Schasinglulu REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
29*91f16700Schasinglulu 		      PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
30*91f16700Schasinglulu 
31*91f16700Schasinglulu void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
32*91f16700Schasinglulu 		     unsigned int attr)
33*91f16700Schasinglulu {
34*91f16700Schasinglulu 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
35*91f16700Schasinglulu 
36*91f16700Schasinglulu 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
37*91f16700Schasinglulu }
38*91f16700Schasinglulu 
39*91f16700Schasinglulu void mmap_add(const mmap_region_t *mm)
40*91f16700Schasinglulu {
41*91f16700Schasinglulu 	mmap_add_ctx(&tf_xlat_ctx, mm);
42*91f16700Schasinglulu }
43*91f16700Schasinglulu 
44*91f16700Schasinglulu void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
45*91f16700Schasinglulu 			      size_t size, unsigned int attr)
46*91f16700Schasinglulu {
47*91f16700Schasinglulu 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
48*91f16700Schasinglulu 
49*91f16700Schasinglulu 	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
50*91f16700Schasinglulu 
51*91f16700Schasinglulu 	*base_va = mm.base_va;
52*91f16700Schasinglulu }
53*91f16700Schasinglulu 
54*91f16700Schasinglulu void mmap_add_alloc_va(mmap_region_t *mm)
55*91f16700Schasinglulu {
56*91f16700Schasinglulu 	while (mm->granularity != 0U) {
57*91f16700Schasinglulu 		assert(mm->base_va == 0U);
58*91f16700Schasinglulu 		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
59*91f16700Schasinglulu 		mm++;
60*91f16700Schasinglulu 	}
61*91f16700Schasinglulu }
62*91f16700Schasinglulu 
63*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
64*91f16700Schasinglulu 
65*91f16700Schasinglulu int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
66*91f16700Schasinglulu 			    size_t size, unsigned int attr)
67*91f16700Schasinglulu {
68*91f16700Schasinglulu 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
69*91f16700Schasinglulu 
70*91f16700Schasinglulu 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
71*91f16700Schasinglulu }
72*91f16700Schasinglulu 
73*91f16700Schasinglulu int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
74*91f16700Schasinglulu 				     uintptr_t *base_va, size_t size,
75*91f16700Schasinglulu 				     unsigned int attr)
76*91f16700Schasinglulu {
77*91f16700Schasinglulu 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
78*91f16700Schasinglulu 
79*91f16700Schasinglulu 	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
80*91f16700Schasinglulu 
81*91f16700Schasinglulu 	*base_va = mm.base_va;
82*91f16700Schasinglulu 
83*91f16700Schasinglulu 	return rc;
84*91f16700Schasinglulu }
85*91f16700Schasinglulu 
86*91f16700Schasinglulu 
87*91f16700Schasinglulu int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
88*91f16700Schasinglulu {
89*91f16700Schasinglulu 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
90*91f16700Schasinglulu 					base_va, size);
91*91f16700Schasinglulu }
92*91f16700Schasinglulu 
93*91f16700Schasinglulu #endif /* PLAT_XLAT_TABLES_DYNAMIC */
94*91f16700Schasinglulu 
95*91f16700Schasinglulu void __init init_xlat_tables(void)
96*91f16700Schasinglulu {
97*91f16700Schasinglulu 	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 	unsigned int current_el = xlat_arch_current_el();
100*91f16700Schasinglulu 
101*91f16700Schasinglulu 	if (current_el == 1U) {
102*91f16700Schasinglulu 		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
103*91f16700Schasinglulu 	} else if (current_el == 2U) {
104*91f16700Schasinglulu 		tf_xlat_ctx.xlat_regime = EL2_REGIME;
105*91f16700Schasinglulu 	} else {
106*91f16700Schasinglulu 		assert(current_el == 3U);
107*91f16700Schasinglulu 		tf_xlat_ctx.xlat_regime = EL3_REGIME;
108*91f16700Schasinglulu 	}
109*91f16700Schasinglulu 
110*91f16700Schasinglulu 	init_xlat_tables_ctx(&tf_xlat_ctx);
111*91f16700Schasinglulu }
112*91f16700Schasinglulu 
113*91f16700Schasinglulu int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
114*91f16700Schasinglulu {
115*91f16700Schasinglulu 	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
116*91f16700Schasinglulu }
117*91f16700Schasinglulu 
118*91f16700Schasinglulu int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
119*91f16700Schasinglulu {
120*91f16700Schasinglulu 	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
121*91f16700Schasinglulu }
122*91f16700Schasinglulu 
123*91f16700Schasinglulu #if PLAT_RO_XLAT_TABLES
124*91f16700Schasinglulu /* Change the memory attributes of the descriptors which resolve the address
125*91f16700Schasinglulu  * range that belongs to the translation tables themselves, which are by default
126*91f16700Schasinglulu  * mapped as part of read-write data in the BL image's memory.
127*91f16700Schasinglulu  *
128*91f16700Schasinglulu  * Since the translation tables map themselves via these level 3 (page)
129*91f16700Schasinglulu  * descriptors, any change applied to them with the MMU on would introduce a
130*91f16700Schasinglulu  * chicken and egg problem because of the break-before-make sequence.
131*91f16700Schasinglulu  * Eventually, it would reach the descriptor that resolves the very table it
132*91f16700Schasinglulu  * belongs to and the invalidation (break step) would cause the subsequent write
133*91f16700Schasinglulu  * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
134*91f16700Schasinglulu  * before making the change.
135*91f16700Schasinglulu  *
136*91f16700Schasinglulu  * No assumption is made about what data this function needs, therefore all the
137*91f16700Schasinglulu  * caches are flushed in order to ensure coherency. A future optimization would
138*91f16700Schasinglulu  * be to only flush the required data to main memory.
139*91f16700Schasinglulu  */
140*91f16700Schasinglulu int xlat_make_tables_readonly(void)
141*91f16700Schasinglulu {
142*91f16700Schasinglulu 	assert(tf_xlat_ctx.initialized == true);
143*91f16700Schasinglulu #ifdef __aarch64__
144*91f16700Schasinglulu 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
145*91f16700Schasinglulu 		disable_mmu_el1();
146*91f16700Schasinglulu 	} else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
147*91f16700Schasinglulu 		disable_mmu_el3();
148*91f16700Schasinglulu 	} else {
149*91f16700Schasinglulu 		assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
150*91f16700Schasinglulu 		return -1;
151*91f16700Schasinglulu 	}
152*91f16700Schasinglulu 
153*91f16700Schasinglulu 	/* Flush all caches. */
154*91f16700Schasinglulu 	dcsw_op_all(DCCISW);
155*91f16700Schasinglulu #else /* !__aarch64__ */
156*91f16700Schasinglulu 	assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
157*91f16700Schasinglulu 	/* On AArch32, we flush the caches before disabling the MMU. The reason
158*91f16700Schasinglulu 	 * for this is that the dcsw_op_all AArch32 function pushes some
159*91f16700Schasinglulu 	 * registers onto the stack under the assumption that it is writing to
160*91f16700Schasinglulu 	 * cache, which is not true with the MMU off. This would result in the
161*91f16700Schasinglulu 	 * stack becoming corrupted and a wrong/junk value for the LR being
162*91f16700Schasinglulu 	 * restored at the end of the routine.
163*91f16700Schasinglulu 	 */
164*91f16700Schasinglulu 	dcsw_op_all(DC_OP_CISW);
165*91f16700Schasinglulu 	disable_mmu_secure();
166*91f16700Schasinglulu #endif
167*91f16700Schasinglulu 
168*91f16700Schasinglulu 	int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
169*91f16700Schasinglulu 				(uintptr_t)tf_xlat_ctx.tables,
170*91f16700Schasinglulu 				tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
171*91f16700Schasinglulu 				MT_RO_DATA | MT_SECURE);
172*91f16700Schasinglulu 
173*91f16700Schasinglulu #ifdef __aarch64__
174*91f16700Schasinglulu 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
175*91f16700Schasinglulu 		enable_mmu_el1(0U);
176*91f16700Schasinglulu 	} else {
177*91f16700Schasinglulu 		assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
178*91f16700Schasinglulu 		enable_mmu_el3(0U);
179*91f16700Schasinglulu 	}
180*91f16700Schasinglulu #else /* !__aarch64__ */
181*91f16700Schasinglulu 	enable_mmu_svc_mon(0U);
182*91f16700Schasinglulu #endif
183*91f16700Schasinglulu 
184*91f16700Schasinglulu 	if (rc == 0) {
185*91f16700Schasinglulu 		tf_xlat_ctx.readonly_tables = true;
186*91f16700Schasinglulu 	}
187*91f16700Schasinglulu 
188*91f16700Schasinglulu 	return rc;
189*91f16700Schasinglulu }
190*91f16700Schasinglulu #endif /* PLAT_RO_XLAT_TABLES */
191*91f16700Schasinglulu 
192*91f16700Schasinglulu /*
193*91f16700Schasinglulu  * If dynamic allocation of new regions is disabled then by the time we call the
194*91f16700Schasinglulu  * function enabling the MMU, we'll have registered all the memory regions to
195*91f16700Schasinglulu  * map for the system's lifetime. Therefore, at this point we know the maximum
196*91f16700Schasinglulu  * physical address that will ever be mapped.
197*91f16700Schasinglulu  *
198*91f16700Schasinglulu  * If dynamic allocation is enabled then we can't make any such assumption
199*91f16700Schasinglulu  * because the maximum physical address could get pushed while adding a new
200*91f16700Schasinglulu  * region. Therefore, in this case we have to assume that the whole address
201*91f16700Schasinglulu  * space size might be mapped.
202*91f16700Schasinglulu  */
203*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
204*91f16700Schasinglulu #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
205*91f16700Schasinglulu #else
206*91f16700Schasinglulu #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
207*91f16700Schasinglulu #endif
208*91f16700Schasinglulu 
209*91f16700Schasinglulu #ifdef __aarch64__
210*91f16700Schasinglulu 
211*91f16700Schasinglulu void enable_mmu_el1(unsigned int flags)
212*91f16700Schasinglulu {
213*91f16700Schasinglulu 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
214*91f16700Schasinglulu 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
215*91f16700Schasinglulu 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
216*91f16700Schasinglulu 	enable_mmu_direct_el1(flags);
217*91f16700Schasinglulu }
218*91f16700Schasinglulu 
219*91f16700Schasinglulu void enable_mmu_el2(unsigned int flags)
220*91f16700Schasinglulu {
221*91f16700Schasinglulu 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
222*91f16700Schasinglulu 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
223*91f16700Schasinglulu 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
224*91f16700Schasinglulu 	enable_mmu_direct_el2(flags);
225*91f16700Schasinglulu }
226*91f16700Schasinglulu 
227*91f16700Schasinglulu void enable_mmu_el3(unsigned int flags)
228*91f16700Schasinglulu {
229*91f16700Schasinglulu 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
230*91f16700Schasinglulu 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
231*91f16700Schasinglulu 		      tf_xlat_ctx.va_max_address, EL3_REGIME);
232*91f16700Schasinglulu 	enable_mmu_direct_el3(flags);
233*91f16700Schasinglulu }
234*91f16700Schasinglulu 
235*91f16700Schasinglulu void enable_mmu(unsigned int flags)
236*91f16700Schasinglulu {
237*91f16700Schasinglulu 	switch (get_current_el_maybe_constant()) {
238*91f16700Schasinglulu 	case 1:
239*91f16700Schasinglulu 		enable_mmu_el1(flags);
240*91f16700Schasinglulu 		break;
241*91f16700Schasinglulu 	case 2:
242*91f16700Schasinglulu 		enable_mmu_el2(flags);
243*91f16700Schasinglulu 		break;
244*91f16700Schasinglulu 	case 3:
245*91f16700Schasinglulu 		enable_mmu_el3(flags);
246*91f16700Schasinglulu 		break;
247*91f16700Schasinglulu 	default:
248*91f16700Schasinglulu 		panic();
249*91f16700Schasinglulu 	}
250*91f16700Schasinglulu }
251*91f16700Schasinglulu 
252*91f16700Schasinglulu #else /* !__aarch64__ */
253*91f16700Schasinglulu 
254*91f16700Schasinglulu void enable_mmu_svc_mon(unsigned int flags)
255*91f16700Schasinglulu {
256*91f16700Schasinglulu 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
257*91f16700Schasinglulu 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
258*91f16700Schasinglulu 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
259*91f16700Schasinglulu 	enable_mmu_direct_svc_mon(flags);
260*91f16700Schasinglulu }
261*91f16700Schasinglulu 
262*91f16700Schasinglulu void enable_mmu_hyp(unsigned int flags)
263*91f16700Schasinglulu {
264*91f16700Schasinglulu 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
265*91f16700Schasinglulu 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
266*91f16700Schasinglulu 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
267*91f16700Schasinglulu 	enable_mmu_direct_hyp(flags);
268*91f16700Schasinglulu }
269*91f16700Schasinglulu 
270*91f16700Schasinglulu #endif /* __aarch64__ */
271