xref: /arm-trusted-firmware/lib/xlat_tables_v2/xlat_tables_core.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <assert.h>
8*91f16700Schasinglulu #include <errno.h>
9*91f16700Schasinglulu #include <stdbool.h>
10*91f16700Schasinglulu #include <stdint.h>
11*91f16700Schasinglulu #include <string.h>
12*91f16700Schasinglulu 
13*91f16700Schasinglulu #include <platform_def.h>
14*91f16700Schasinglulu 
15*91f16700Schasinglulu #include <arch_features.h>
16*91f16700Schasinglulu #include <arch_helpers.h>
17*91f16700Schasinglulu #include <common/debug.h>
18*91f16700Schasinglulu #include <lib/utils_def.h>
19*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_defs.h>
20*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
21*91f16700Schasinglulu 
22*91f16700Schasinglulu #include "xlat_tables_private.h"
23*91f16700Schasinglulu 
24*91f16700Schasinglulu /* Helper function that cleans the data cache only if it is enabled. */
25*91f16700Schasinglulu static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
26*91f16700Schasinglulu {
27*91f16700Schasinglulu 	if (is_dcache_enabled())
28*91f16700Schasinglulu 		clean_dcache_range(addr, size);
29*91f16700Schasinglulu }
30*91f16700Schasinglulu 
31*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
32*91f16700Schasinglulu 
33*91f16700Schasinglulu /*
34*91f16700Schasinglulu  * The following functions assume that they will be called using subtables only.
35*91f16700Schasinglulu  * The base table can't be unmapped, so it is not needed to do any special
36*91f16700Schasinglulu  * handling for it.
37*91f16700Schasinglulu  */
38*91f16700Schasinglulu 
39*91f16700Schasinglulu /*
40*91f16700Schasinglulu  * Returns the index of the array corresponding to the specified translation
41*91f16700Schasinglulu  * table.
42*91f16700Schasinglulu  */
43*91f16700Schasinglulu static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
44*91f16700Schasinglulu {
45*91f16700Schasinglulu 	for (int i = 0; i < ctx->tables_num; i++)
46*91f16700Schasinglulu 		if (ctx->tables[i] == table)
47*91f16700Schasinglulu 			return i;
48*91f16700Schasinglulu 
49*91f16700Schasinglulu 	/*
50*91f16700Schasinglulu 	 * Maybe we were asked to get the index of the base level table, which
51*91f16700Schasinglulu 	 * should never happen.
52*91f16700Schasinglulu 	 */
53*91f16700Schasinglulu 	assert(false);
54*91f16700Schasinglulu 
55*91f16700Schasinglulu 	return -1;
56*91f16700Schasinglulu }
57*91f16700Schasinglulu 
58*91f16700Schasinglulu /* Returns a pointer to an empty translation table. */
59*91f16700Schasinglulu static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
60*91f16700Schasinglulu {
61*91f16700Schasinglulu 	for (int i = 0; i < ctx->tables_num; i++)
62*91f16700Schasinglulu 		if (ctx->tables_mapped_regions[i] == 0)
63*91f16700Schasinglulu 			return ctx->tables[i];
64*91f16700Schasinglulu 
65*91f16700Schasinglulu 	return NULL;
66*91f16700Schasinglulu }
67*91f16700Schasinglulu 
68*91f16700Schasinglulu /* Increments region count for a given table. */
69*91f16700Schasinglulu static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
70*91f16700Schasinglulu 					 const uint64_t *table)
71*91f16700Schasinglulu {
72*91f16700Schasinglulu 	int idx = xlat_table_get_index(ctx, table);
73*91f16700Schasinglulu 
74*91f16700Schasinglulu 	ctx->tables_mapped_regions[idx]++;
75*91f16700Schasinglulu }
76*91f16700Schasinglulu 
77*91f16700Schasinglulu /* Decrements region count for a given table. */
78*91f16700Schasinglulu static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
79*91f16700Schasinglulu 					 const uint64_t *table)
80*91f16700Schasinglulu {
81*91f16700Schasinglulu 	int idx = xlat_table_get_index(ctx, table);
82*91f16700Schasinglulu 
83*91f16700Schasinglulu 	ctx->tables_mapped_regions[idx]--;
84*91f16700Schasinglulu }
85*91f16700Schasinglulu 
86*91f16700Schasinglulu /* Returns 0 if the specified table isn't empty, otherwise 1. */
87*91f16700Schasinglulu static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
88*91f16700Schasinglulu {
89*91f16700Schasinglulu 	return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
90*91f16700Schasinglulu }
91*91f16700Schasinglulu 
92*91f16700Schasinglulu #else /* PLAT_XLAT_TABLES_DYNAMIC */
93*91f16700Schasinglulu 
94*91f16700Schasinglulu /* Returns a pointer to the first empty translation table. */
95*91f16700Schasinglulu static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
96*91f16700Schasinglulu {
97*91f16700Schasinglulu 	assert(ctx->next_table < ctx->tables_num);
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 	return ctx->tables[ctx->next_table++];
100*91f16700Schasinglulu }
101*91f16700Schasinglulu 
102*91f16700Schasinglulu #endif /* PLAT_XLAT_TABLES_DYNAMIC */
103*91f16700Schasinglulu 
104*91f16700Schasinglulu /*
105*91f16700Schasinglulu  * Returns a block/page table descriptor for the given level and attributes.
106*91f16700Schasinglulu  */
107*91f16700Schasinglulu uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
108*91f16700Schasinglulu 		   unsigned long long addr_pa, unsigned int level)
109*91f16700Schasinglulu {
110*91f16700Schasinglulu 	uint64_t desc;
111*91f16700Schasinglulu 	uint32_t mem_type;
112*91f16700Schasinglulu 	uint32_t shareability_type;
113*91f16700Schasinglulu 
114*91f16700Schasinglulu 	/* Make sure that the granularity is fine enough to map this address. */
115*91f16700Schasinglulu 	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
116*91f16700Schasinglulu 
117*91f16700Schasinglulu 	desc = addr_pa;
118*91f16700Schasinglulu 	/*
119*91f16700Schasinglulu 	 * There are different translation table descriptors for level 3 and the
120*91f16700Schasinglulu 	 * rest.
121*91f16700Schasinglulu 	 */
122*91f16700Schasinglulu 	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
123*91f16700Schasinglulu 	/*
124*91f16700Schasinglulu 	 * Always set the access flag, as this library assumes access flag
125*91f16700Schasinglulu 	 * faults aren't managed.
126*91f16700Schasinglulu 	 */
127*91f16700Schasinglulu 	desc |= LOWER_ATTRS(ACCESS_FLAG);
128*91f16700Schasinglulu 
129*91f16700Schasinglulu 	/* Determine the physical address space this region belongs to. */
130*91f16700Schasinglulu 	desc |= xlat_arch_get_pas(attr);
131*91f16700Schasinglulu 
132*91f16700Schasinglulu 	/*
133*91f16700Schasinglulu 	 * Deduce other fields of the descriptor based on the MT_RW memory
134*91f16700Schasinglulu 	 * region attributes.
135*91f16700Schasinglulu 	 */
136*91f16700Schasinglulu 	desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
137*91f16700Schasinglulu 
138*91f16700Schasinglulu 	/*
139*91f16700Schasinglulu 	 * Do not allow unprivileged access when the mapping is for a privileged
140*91f16700Schasinglulu 	 * EL. For translation regimes that do not have mappings for access for
141*91f16700Schasinglulu 	 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
142*91f16700Schasinglulu 	 */
143*91f16700Schasinglulu 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
144*91f16700Schasinglulu 		if ((attr & MT_USER) != 0U) {
145*91f16700Schasinglulu 			/* EL0 mapping requested, so we give User access */
146*91f16700Schasinglulu 			desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
147*91f16700Schasinglulu 		} else {
148*91f16700Schasinglulu 			/* EL1 mapping requested, no User access granted */
149*91f16700Schasinglulu 			desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
150*91f16700Schasinglulu 		}
151*91f16700Schasinglulu 	} else {
152*91f16700Schasinglulu 		assert((ctx->xlat_regime == EL2_REGIME) ||
153*91f16700Schasinglulu 		       (ctx->xlat_regime == EL3_REGIME));
154*91f16700Schasinglulu 		desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
155*91f16700Schasinglulu 	}
156*91f16700Schasinglulu 
157*91f16700Schasinglulu 	/*
158*91f16700Schasinglulu 	 * Deduce shareability domain and executability of the memory region
159*91f16700Schasinglulu 	 * from the memory type of the attributes (MT_TYPE).
160*91f16700Schasinglulu 	 *
161*91f16700Schasinglulu 	 * Data accesses to device memory and non-cacheable normal memory are
162*91f16700Schasinglulu 	 * coherent for all observers in the system, and correspondingly are
163*91f16700Schasinglulu 	 * always treated as being Outer Shareable. Therefore, for these 2 types
164*91f16700Schasinglulu 	 * of memory, it is not strictly needed to set the shareability field
165*91f16700Schasinglulu 	 * in the translation tables.
166*91f16700Schasinglulu 	 */
167*91f16700Schasinglulu 	mem_type = MT_TYPE(attr);
168*91f16700Schasinglulu 	if (mem_type == MT_DEVICE) {
169*91f16700Schasinglulu 		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
170*91f16700Schasinglulu 		/*
171*91f16700Schasinglulu 		 * Always map device memory as execute-never.
172*91f16700Schasinglulu 		 * This is to avoid the possibility of a speculative instruction
173*91f16700Schasinglulu 		 * fetch, which could be an issue if this memory region
174*91f16700Schasinglulu 		 * corresponds to a read-sensitive peripheral.
175*91f16700Schasinglulu 		 */
176*91f16700Schasinglulu 		desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
177*91f16700Schasinglulu 
178*91f16700Schasinglulu 	} else { /* Normal memory */
179*91f16700Schasinglulu 		/*
180*91f16700Schasinglulu 		 * Always map read-write normal memory as execute-never.
181*91f16700Schasinglulu 		 * This library assumes that it is used by software that does
182*91f16700Schasinglulu 		 * not self-modify its code, therefore R/W memory is reserved
183*91f16700Schasinglulu 		 * for data storage, which must not be executable.
184*91f16700Schasinglulu 		 *
185*91f16700Schasinglulu 		 * Note that setting the XN bit here is for consistency only.
186*91f16700Schasinglulu 		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
187*91f16700Schasinglulu 		 * which makes any writable memory region to be treated as
188*91f16700Schasinglulu 		 * execute-never, regardless of the value of the XN bit in the
189*91f16700Schasinglulu 		 * translation table.
190*91f16700Schasinglulu 		 *
191*91f16700Schasinglulu 		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
192*91f16700Schasinglulu 		 * attribute to figure out the value of the XN bit.  The actual
193*91f16700Schasinglulu 		 * XN bit(s) to set in the descriptor depends on the context's
194*91f16700Schasinglulu 		 * translation regime and the policy applied in
195*91f16700Schasinglulu 		 * xlat_arch_regime_get_xn_desc().
196*91f16700Schasinglulu 		 */
197*91f16700Schasinglulu 		if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
198*91f16700Schasinglulu 			desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
199*91f16700Schasinglulu 		}
200*91f16700Schasinglulu 
201*91f16700Schasinglulu 		shareability_type = MT_SHAREABILITY(attr);
202*91f16700Schasinglulu 		if (mem_type == MT_MEMORY) {
203*91f16700Schasinglulu 			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
204*91f16700Schasinglulu 			if (shareability_type == MT_SHAREABILITY_NSH) {
205*91f16700Schasinglulu 				desc |= LOWER_ATTRS(NSH);
206*91f16700Schasinglulu 			} else if (shareability_type == MT_SHAREABILITY_OSH) {
207*91f16700Schasinglulu 				desc |= LOWER_ATTRS(OSH);
208*91f16700Schasinglulu 			} else {
209*91f16700Schasinglulu 				desc |= LOWER_ATTRS(ISH);
210*91f16700Schasinglulu 			}
211*91f16700Schasinglulu 
212*91f16700Schasinglulu 			/* Check if Branch Target Identification is enabled */
213*91f16700Schasinglulu #if ENABLE_BTI
214*91f16700Schasinglulu 			/* Set GP bit for block and page code entries
215*91f16700Schasinglulu 			 * if BTI mechanism is implemented.
216*91f16700Schasinglulu 			 */
217*91f16700Schasinglulu 			if (is_armv8_5_bti_present() &&
218*91f16700Schasinglulu 			   ((attr & (MT_TYPE_MASK | MT_RW |
219*91f16700Schasinglulu 				MT_EXECUTE_NEVER)) == MT_CODE)) {
220*91f16700Schasinglulu 				desc |= GP;
221*91f16700Schasinglulu 			}
222*91f16700Schasinglulu #endif
223*91f16700Schasinglulu 		} else {
224*91f16700Schasinglulu 			assert(mem_type == MT_NON_CACHEABLE);
225*91f16700Schasinglulu 			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
226*91f16700Schasinglulu 		}
227*91f16700Schasinglulu 	}
228*91f16700Schasinglulu 
229*91f16700Schasinglulu 	return desc;
230*91f16700Schasinglulu }
231*91f16700Schasinglulu 
232*91f16700Schasinglulu /*
233*91f16700Schasinglulu  * Enumeration of actions that can be made when mapping table entries depending
234*91f16700Schasinglulu  * on the previous value in that entry and information about the region being
235*91f16700Schasinglulu  * mapped.
236*91f16700Schasinglulu  */
237*91f16700Schasinglulu typedef enum {
238*91f16700Schasinglulu 
239*91f16700Schasinglulu 	/* Do nothing */
240*91f16700Schasinglulu 	ACTION_NONE,
241*91f16700Schasinglulu 
242*91f16700Schasinglulu 	/* Write a block (or page, if in level 3) entry. */
243*91f16700Schasinglulu 	ACTION_WRITE_BLOCK_ENTRY,
244*91f16700Schasinglulu 
245*91f16700Schasinglulu 	/*
246*91f16700Schasinglulu 	 * Create a new table and write a table entry pointing to it. Recurse
247*91f16700Schasinglulu 	 * into it for further processing.
248*91f16700Schasinglulu 	 */
249*91f16700Schasinglulu 	ACTION_CREATE_NEW_TABLE,
250*91f16700Schasinglulu 
251*91f16700Schasinglulu 	/*
252*91f16700Schasinglulu 	 * There is a table descriptor in this entry, read it and recurse into
253*91f16700Schasinglulu 	 * that table for further processing.
254*91f16700Schasinglulu 	 */
255*91f16700Schasinglulu 	ACTION_RECURSE_INTO_TABLE,
256*91f16700Schasinglulu 
257*91f16700Schasinglulu } action_t;
258*91f16700Schasinglulu 
259*91f16700Schasinglulu /*
260*91f16700Schasinglulu  * Function that returns the first VA of the table affected by the specified
261*91f16700Schasinglulu  * mmap region.
262*91f16700Schasinglulu  */
263*91f16700Schasinglulu static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
264*91f16700Schasinglulu 				   const uintptr_t table_base_va,
265*91f16700Schasinglulu 				   const unsigned int level)
266*91f16700Schasinglulu {
267*91f16700Schasinglulu 	uintptr_t table_idx_va;
268*91f16700Schasinglulu 
269*91f16700Schasinglulu 	if (mm->base_va > table_base_va) {
270*91f16700Schasinglulu 		/* Find the first index of the table affected by the region. */
271*91f16700Schasinglulu 		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
272*91f16700Schasinglulu 	} else {
273*91f16700Schasinglulu 		/* Start from the beginning of the table. */
274*91f16700Schasinglulu 		table_idx_va = table_base_va;
275*91f16700Schasinglulu 	}
276*91f16700Schasinglulu 
277*91f16700Schasinglulu 	return table_idx_va;
278*91f16700Schasinglulu }
279*91f16700Schasinglulu 
280*91f16700Schasinglulu /*
281*91f16700Schasinglulu  * Function that returns table index for the given VA and level arguments.
282*91f16700Schasinglulu  */
283*91f16700Schasinglulu static inline unsigned int  xlat_tables_va_to_index(const uintptr_t table_base_va,
284*91f16700Schasinglulu 						const uintptr_t va,
285*91f16700Schasinglulu 						const unsigned int level)
286*91f16700Schasinglulu {
287*91f16700Schasinglulu 	return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
288*91f16700Schasinglulu }
289*91f16700Schasinglulu 
290*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
291*91f16700Schasinglulu 
292*91f16700Schasinglulu /*
293*91f16700Schasinglulu  * From the given arguments, it decides which action to take when unmapping the
294*91f16700Schasinglulu  * specified region.
295*91f16700Schasinglulu  */
296*91f16700Schasinglulu static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
297*91f16700Schasinglulu 		const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
298*91f16700Schasinglulu 		const unsigned int level, const uint64_t desc_type)
299*91f16700Schasinglulu {
300*91f16700Schasinglulu 	action_t action;
301*91f16700Schasinglulu 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
302*91f16700Schasinglulu 
303*91f16700Schasinglulu 	if ((mm->base_va <= table_idx_va) &&
304*91f16700Schasinglulu 	    (region_end_va >= table_idx_end_va)) {
305*91f16700Schasinglulu 		/* Region covers all block */
306*91f16700Schasinglulu 
307*91f16700Schasinglulu 		if (level == 3U) {
308*91f16700Schasinglulu 			/*
309*91f16700Schasinglulu 			 * Last level, only page descriptors allowed,
310*91f16700Schasinglulu 			 * erase it.
311*91f16700Schasinglulu 			 */
312*91f16700Schasinglulu 			assert(desc_type == PAGE_DESC);
313*91f16700Schasinglulu 
314*91f16700Schasinglulu 			action = ACTION_WRITE_BLOCK_ENTRY;
315*91f16700Schasinglulu 		} else {
316*91f16700Schasinglulu 			/*
317*91f16700Schasinglulu 			 * Other levels can have table descriptors. If
318*91f16700Schasinglulu 			 * so, recurse into it and erase descriptors
319*91f16700Schasinglulu 			 * inside it as needed. If there is a block
320*91f16700Schasinglulu 			 * descriptor, just erase it. If an invalid
321*91f16700Schasinglulu 			 * descriptor is found, this table isn't
322*91f16700Schasinglulu 			 * actually mapped, which shouldn't happen.
323*91f16700Schasinglulu 			 */
324*91f16700Schasinglulu 			if (desc_type == TABLE_DESC) {
325*91f16700Schasinglulu 				action = ACTION_RECURSE_INTO_TABLE;
326*91f16700Schasinglulu 			} else {
327*91f16700Schasinglulu 				assert(desc_type == BLOCK_DESC);
328*91f16700Schasinglulu 				action = ACTION_WRITE_BLOCK_ENTRY;
329*91f16700Schasinglulu 			}
330*91f16700Schasinglulu 		}
331*91f16700Schasinglulu 
332*91f16700Schasinglulu 	} else if ((mm->base_va <= table_idx_end_va) ||
333*91f16700Schasinglulu 		   (region_end_va >= table_idx_va)) {
334*91f16700Schasinglulu 		/*
335*91f16700Schasinglulu 		 * Region partially covers block.
336*91f16700Schasinglulu 		 *
337*91f16700Schasinglulu 		 * It can't happen in level 3.
338*91f16700Schasinglulu 		 *
339*91f16700Schasinglulu 		 * There must be a table descriptor here, if not there
340*91f16700Schasinglulu 		 * was a problem when mapping the region.
341*91f16700Schasinglulu 		 */
342*91f16700Schasinglulu 		assert(level < 3U);
343*91f16700Schasinglulu 		assert(desc_type == TABLE_DESC);
344*91f16700Schasinglulu 
345*91f16700Schasinglulu 		action = ACTION_RECURSE_INTO_TABLE;
346*91f16700Schasinglulu 	} else {
347*91f16700Schasinglulu 		/* The region doesn't cover the block at all */
348*91f16700Schasinglulu 		action = ACTION_NONE;
349*91f16700Schasinglulu 	}
350*91f16700Schasinglulu 
351*91f16700Schasinglulu 	return action;
352*91f16700Schasinglulu }
353*91f16700Schasinglulu /*
354*91f16700Schasinglulu  * Recursive function that writes to the translation tables and unmaps the
355*91f16700Schasinglulu  * specified region.
356*91f16700Schasinglulu  */
357*91f16700Schasinglulu static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
358*91f16700Schasinglulu 				     const uintptr_t table_base_va,
359*91f16700Schasinglulu 				     uint64_t *const table_base,
360*91f16700Schasinglulu 				     const unsigned int table_entries,
361*91f16700Schasinglulu 				     const unsigned int level)
362*91f16700Schasinglulu {
363*91f16700Schasinglulu 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
364*91f16700Schasinglulu 
365*91f16700Schasinglulu 	uint64_t *subtable;
366*91f16700Schasinglulu 	uint64_t desc;
367*91f16700Schasinglulu 
368*91f16700Schasinglulu 	uintptr_t table_idx_va;
369*91f16700Schasinglulu 	uintptr_t table_idx_end_va; /* End VA of this entry */
370*91f16700Schasinglulu 
371*91f16700Schasinglulu 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
372*91f16700Schasinglulu 
373*91f16700Schasinglulu 	unsigned int table_idx;
374*91f16700Schasinglulu 
375*91f16700Schasinglulu 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
376*91f16700Schasinglulu 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
377*91f16700Schasinglulu 
378*91f16700Schasinglulu 	while (table_idx < table_entries) {
379*91f16700Schasinglulu 
380*91f16700Schasinglulu 		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
381*91f16700Schasinglulu 
382*91f16700Schasinglulu 		desc = table_base[table_idx];
383*91f16700Schasinglulu 		uint64_t desc_type = desc & DESC_MASK;
384*91f16700Schasinglulu 
385*91f16700Schasinglulu 		action_t action = xlat_tables_unmap_region_action(mm,
386*91f16700Schasinglulu 				table_idx_va, table_idx_end_va, level,
387*91f16700Schasinglulu 				desc_type);
388*91f16700Schasinglulu 
389*91f16700Schasinglulu 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
390*91f16700Schasinglulu 
391*91f16700Schasinglulu 			table_base[table_idx] = INVALID_DESC;
392*91f16700Schasinglulu 			xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
393*91f16700Schasinglulu 
394*91f16700Schasinglulu 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
395*91f16700Schasinglulu 
396*91f16700Schasinglulu 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
397*91f16700Schasinglulu 
398*91f16700Schasinglulu 			/* Recurse to write into subtable */
399*91f16700Schasinglulu 			xlat_tables_unmap_region(ctx, mm, table_idx_va,
400*91f16700Schasinglulu 						 subtable, XLAT_TABLE_ENTRIES,
401*91f16700Schasinglulu 						 level + 1U);
402*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
403*91f16700Schasinglulu 			xlat_clean_dcache_range((uintptr_t)subtable,
404*91f16700Schasinglulu 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
405*91f16700Schasinglulu #endif
406*91f16700Schasinglulu 			/*
407*91f16700Schasinglulu 			 * If the subtable is now empty, remove its reference.
408*91f16700Schasinglulu 			 */
409*91f16700Schasinglulu 			if (xlat_table_is_empty(ctx, subtable)) {
410*91f16700Schasinglulu 				table_base[table_idx] = INVALID_DESC;
411*91f16700Schasinglulu 				xlat_arch_tlbi_va(table_idx_va,
412*91f16700Schasinglulu 						  ctx->xlat_regime);
413*91f16700Schasinglulu 			}
414*91f16700Schasinglulu 
415*91f16700Schasinglulu 		} else {
416*91f16700Schasinglulu 			assert(action == ACTION_NONE);
417*91f16700Schasinglulu 		}
418*91f16700Schasinglulu 
419*91f16700Schasinglulu 		table_idx++;
420*91f16700Schasinglulu 		table_idx_va += XLAT_BLOCK_SIZE(level);
421*91f16700Schasinglulu 
422*91f16700Schasinglulu 		/* If reached the end of the region, exit */
423*91f16700Schasinglulu 		if (region_end_va <= table_idx_va)
424*91f16700Schasinglulu 			break;
425*91f16700Schasinglulu 	}
426*91f16700Schasinglulu 
427*91f16700Schasinglulu 	if (level > ctx->base_level)
428*91f16700Schasinglulu 		xlat_table_dec_regions_count(ctx, table_base);
429*91f16700Schasinglulu }
430*91f16700Schasinglulu 
431*91f16700Schasinglulu #endif /* PLAT_XLAT_TABLES_DYNAMIC */
432*91f16700Schasinglulu 
433*91f16700Schasinglulu /*
434*91f16700Schasinglulu  * From the given arguments, it decides which action to take when mapping the
435*91f16700Schasinglulu  * specified region.
436*91f16700Schasinglulu  */
437*91f16700Schasinglulu static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
438*91f16700Schasinglulu 		unsigned int desc_type, unsigned long long dest_pa,
439*91f16700Schasinglulu 		uintptr_t table_entry_base_va, unsigned int level)
440*91f16700Schasinglulu {
441*91f16700Schasinglulu 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
442*91f16700Schasinglulu 	uintptr_t table_entry_end_va =
443*91f16700Schasinglulu 			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
444*91f16700Schasinglulu 
445*91f16700Schasinglulu 	/*
446*91f16700Schasinglulu 	 * The descriptor types allowed depend on the current table level.
447*91f16700Schasinglulu 	 */
448*91f16700Schasinglulu 
449*91f16700Schasinglulu 	if ((mm->base_va <= table_entry_base_va) &&
450*91f16700Schasinglulu 	    (mm_end_va >= table_entry_end_va)) {
451*91f16700Schasinglulu 
452*91f16700Schasinglulu 		/*
453*91f16700Schasinglulu 		 * Table entry is covered by region
454*91f16700Schasinglulu 		 * --------------------------------
455*91f16700Schasinglulu 		 *
456*91f16700Schasinglulu 		 * This means that this table entry can describe the whole
457*91f16700Schasinglulu 		 * translation with this granularity in principle.
458*91f16700Schasinglulu 		 */
459*91f16700Schasinglulu 
460*91f16700Schasinglulu 		if (level == 3U) {
461*91f16700Schasinglulu 			/*
462*91f16700Schasinglulu 			 * Last level, only page descriptors are allowed.
463*91f16700Schasinglulu 			 */
464*91f16700Schasinglulu 			if (desc_type == PAGE_DESC) {
465*91f16700Schasinglulu 				/*
466*91f16700Schasinglulu 				 * There's another region mapped here, don't
467*91f16700Schasinglulu 				 * overwrite.
468*91f16700Schasinglulu 				 */
469*91f16700Schasinglulu 				return ACTION_NONE;
470*91f16700Schasinglulu 			} else {
471*91f16700Schasinglulu 				assert(desc_type == INVALID_DESC);
472*91f16700Schasinglulu 				return ACTION_WRITE_BLOCK_ENTRY;
473*91f16700Schasinglulu 			}
474*91f16700Schasinglulu 
475*91f16700Schasinglulu 		} else {
476*91f16700Schasinglulu 
477*91f16700Schasinglulu 			/*
478*91f16700Schasinglulu 			 * Other levels. Table descriptors are allowed. Block
479*91f16700Schasinglulu 			 * descriptors too, but they have some limitations.
480*91f16700Schasinglulu 			 */
481*91f16700Schasinglulu 
482*91f16700Schasinglulu 			if (desc_type == TABLE_DESC) {
483*91f16700Schasinglulu 				/* There's already a table, recurse into it. */
484*91f16700Schasinglulu 				return ACTION_RECURSE_INTO_TABLE;
485*91f16700Schasinglulu 
486*91f16700Schasinglulu 			} else if (desc_type == INVALID_DESC) {
487*91f16700Schasinglulu 				/*
488*91f16700Schasinglulu 				 * There's nothing mapped here, create a new
489*91f16700Schasinglulu 				 * entry.
490*91f16700Schasinglulu 				 *
491*91f16700Schasinglulu 				 * Check if the destination granularity allows
492*91f16700Schasinglulu 				 * us to use a block descriptor or we need a
493*91f16700Schasinglulu 				 * finer table for it.
494*91f16700Schasinglulu 				 *
495*91f16700Schasinglulu 				 * Also, check if the current level allows block
496*91f16700Schasinglulu 				 * descriptors. If not, create a table instead.
497*91f16700Schasinglulu 				 */
498*91f16700Schasinglulu 				if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
499*91f16700Schasinglulu 				    || (level < MIN_LVL_BLOCK_DESC) ||
500*91f16700Schasinglulu 				    (mm->granularity < XLAT_BLOCK_SIZE(level)))
501*91f16700Schasinglulu 					return ACTION_CREATE_NEW_TABLE;
502*91f16700Schasinglulu 				else
503*91f16700Schasinglulu 					return ACTION_WRITE_BLOCK_ENTRY;
504*91f16700Schasinglulu 
505*91f16700Schasinglulu 			} else {
506*91f16700Schasinglulu 				/*
507*91f16700Schasinglulu 				 * There's another region mapped here, don't
508*91f16700Schasinglulu 				 * overwrite.
509*91f16700Schasinglulu 				 */
510*91f16700Schasinglulu 				assert(desc_type == BLOCK_DESC);
511*91f16700Schasinglulu 
512*91f16700Schasinglulu 				return ACTION_NONE;
513*91f16700Schasinglulu 			}
514*91f16700Schasinglulu 		}
515*91f16700Schasinglulu 
516*91f16700Schasinglulu 	} else if ((mm->base_va <= table_entry_end_va) ||
517*91f16700Schasinglulu 		   (mm_end_va >= table_entry_base_va)) {
518*91f16700Schasinglulu 
519*91f16700Schasinglulu 		/*
520*91f16700Schasinglulu 		 * Region partially covers table entry
521*91f16700Schasinglulu 		 * -----------------------------------
522*91f16700Schasinglulu 		 *
523*91f16700Schasinglulu 		 * This means that this table entry can't describe the whole
524*91f16700Schasinglulu 		 * translation, a finer table is needed.
525*91f16700Schasinglulu 
526*91f16700Schasinglulu 		 * There cannot be partial block overlaps in level 3. If that
527*91f16700Schasinglulu 		 * happens, some of the preliminary checks when adding the
528*91f16700Schasinglulu 		 * mmap region failed to detect that PA and VA must at least be
529*91f16700Schasinglulu 		 * aligned to PAGE_SIZE.
530*91f16700Schasinglulu 		 */
531*91f16700Schasinglulu 		assert(level < 3U);
532*91f16700Schasinglulu 
533*91f16700Schasinglulu 		if (desc_type == INVALID_DESC) {
534*91f16700Schasinglulu 			/*
535*91f16700Schasinglulu 			 * The block is not fully covered by the region. Create
536*91f16700Schasinglulu 			 * a new table, recurse into it and try to map the
537*91f16700Schasinglulu 			 * region with finer granularity.
538*91f16700Schasinglulu 			 */
539*91f16700Schasinglulu 			return ACTION_CREATE_NEW_TABLE;
540*91f16700Schasinglulu 
541*91f16700Schasinglulu 		} else {
542*91f16700Schasinglulu 			assert(desc_type == TABLE_DESC);
543*91f16700Schasinglulu 			/*
544*91f16700Schasinglulu 			 * The block is not fully covered by the region, but
545*91f16700Schasinglulu 			 * there is already a table here. Recurse into it and
546*91f16700Schasinglulu 			 * try to map with finer granularity.
547*91f16700Schasinglulu 			 *
548*91f16700Schasinglulu 			 * PAGE_DESC for level 3 has the same value as
549*91f16700Schasinglulu 			 * TABLE_DESC, but this code can't run on a level 3
550*91f16700Schasinglulu 			 * table because there can't be overlaps in level 3.
551*91f16700Schasinglulu 			 */
552*91f16700Schasinglulu 			return ACTION_RECURSE_INTO_TABLE;
553*91f16700Schasinglulu 		}
554*91f16700Schasinglulu 	} else {
555*91f16700Schasinglulu 
556*91f16700Schasinglulu 		/*
557*91f16700Schasinglulu 		 * This table entry is outside of the region specified in the
558*91f16700Schasinglulu 		 * arguments, don't write anything to it.
559*91f16700Schasinglulu 		 */
560*91f16700Schasinglulu 		return ACTION_NONE;
561*91f16700Schasinglulu 	}
562*91f16700Schasinglulu }
563*91f16700Schasinglulu 
564*91f16700Schasinglulu /*
565*91f16700Schasinglulu  * Recursive function that writes to the translation tables and maps the
566*91f16700Schasinglulu  * specified region. On success, it returns the VA of the last byte that was
567*91f16700Schasinglulu  * successfully mapped. On error, it returns the VA of the next entry that
568*91f16700Schasinglulu  * should have been mapped.
569*91f16700Schasinglulu  */
570*91f16700Schasinglulu static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
571*91f16700Schasinglulu 				   uintptr_t table_base_va,
572*91f16700Schasinglulu 				   uint64_t *const table_base,
573*91f16700Schasinglulu 				   unsigned int table_entries,
574*91f16700Schasinglulu 				   unsigned int level)
575*91f16700Schasinglulu {
576*91f16700Schasinglulu 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
577*91f16700Schasinglulu 
578*91f16700Schasinglulu 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
579*91f16700Schasinglulu 
580*91f16700Schasinglulu 	uintptr_t table_idx_va;
581*91f16700Schasinglulu 	unsigned long long table_idx_pa;
582*91f16700Schasinglulu 
583*91f16700Schasinglulu 	uint64_t *subtable;
584*91f16700Schasinglulu 	uint64_t desc;
585*91f16700Schasinglulu 
586*91f16700Schasinglulu 	unsigned int table_idx;
587*91f16700Schasinglulu 
588*91f16700Schasinglulu 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
589*91f16700Schasinglulu 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
590*91f16700Schasinglulu 
591*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
592*91f16700Schasinglulu 	if (level > ctx->base_level)
593*91f16700Schasinglulu 		xlat_table_inc_regions_count(ctx, table_base);
594*91f16700Schasinglulu #endif
595*91f16700Schasinglulu 
596*91f16700Schasinglulu 	while (table_idx < table_entries) {
597*91f16700Schasinglulu 
598*91f16700Schasinglulu 		desc = table_base[table_idx];
599*91f16700Schasinglulu 
600*91f16700Schasinglulu 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
601*91f16700Schasinglulu 
602*91f16700Schasinglulu 		action_t action = xlat_tables_map_region_action(mm,
603*91f16700Schasinglulu 			(uint32_t)(desc & DESC_MASK), table_idx_pa,
604*91f16700Schasinglulu 			table_idx_va, level);
605*91f16700Schasinglulu 
606*91f16700Schasinglulu 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
607*91f16700Schasinglulu 
608*91f16700Schasinglulu 			table_base[table_idx] =
609*91f16700Schasinglulu 				xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
610*91f16700Schasinglulu 					  level);
611*91f16700Schasinglulu 
612*91f16700Schasinglulu 		} else if (action == ACTION_CREATE_NEW_TABLE) {
613*91f16700Schasinglulu 			uintptr_t end_va;
614*91f16700Schasinglulu 
615*91f16700Schasinglulu 			subtable = xlat_table_get_empty(ctx);
616*91f16700Schasinglulu 			if (subtable == NULL) {
617*91f16700Schasinglulu 				/* Not enough free tables to map this region */
618*91f16700Schasinglulu 				return table_idx_va;
619*91f16700Schasinglulu 			}
620*91f16700Schasinglulu 
621*91f16700Schasinglulu 			/* Point to new subtable from this one. */
622*91f16700Schasinglulu 			table_base[table_idx] =
623*91f16700Schasinglulu 				TABLE_DESC | (uintptr_t)subtable;
624*91f16700Schasinglulu 
625*91f16700Schasinglulu 			/* Recurse to write into subtable */
626*91f16700Schasinglulu 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
627*91f16700Schasinglulu 					       subtable, XLAT_TABLE_ENTRIES,
628*91f16700Schasinglulu 					       level + 1U);
629*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
630*91f16700Schasinglulu 			xlat_clean_dcache_range((uintptr_t)subtable,
631*91f16700Schasinglulu 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
632*91f16700Schasinglulu #endif
633*91f16700Schasinglulu 			if (end_va !=
634*91f16700Schasinglulu 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
635*91f16700Schasinglulu 				return end_va;
636*91f16700Schasinglulu 
637*91f16700Schasinglulu 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
638*91f16700Schasinglulu 			uintptr_t end_va;
639*91f16700Schasinglulu 
640*91f16700Schasinglulu 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
641*91f16700Schasinglulu 			/* Recurse to write into subtable */
642*91f16700Schasinglulu 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
643*91f16700Schasinglulu 					       subtable, XLAT_TABLE_ENTRIES,
644*91f16700Schasinglulu 					       level + 1U);
645*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
646*91f16700Schasinglulu 			xlat_clean_dcache_range((uintptr_t)subtable,
647*91f16700Schasinglulu 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
648*91f16700Schasinglulu #endif
649*91f16700Schasinglulu 			if (end_va !=
650*91f16700Schasinglulu 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
651*91f16700Schasinglulu 				return end_va;
652*91f16700Schasinglulu 
653*91f16700Schasinglulu 		} else {
654*91f16700Schasinglulu 
655*91f16700Schasinglulu 			assert(action == ACTION_NONE);
656*91f16700Schasinglulu 
657*91f16700Schasinglulu 		}
658*91f16700Schasinglulu 
659*91f16700Schasinglulu 		table_idx++;
660*91f16700Schasinglulu 		table_idx_va += XLAT_BLOCK_SIZE(level);
661*91f16700Schasinglulu 
662*91f16700Schasinglulu 		/* If reached the end of the region, exit */
663*91f16700Schasinglulu 		if (mm_end_va <= table_idx_va)
664*91f16700Schasinglulu 			break;
665*91f16700Schasinglulu 	}
666*91f16700Schasinglulu 
667*91f16700Schasinglulu 	return table_idx_va - 1U;
668*91f16700Schasinglulu }
669*91f16700Schasinglulu 
670*91f16700Schasinglulu /*
671*91f16700Schasinglulu  * Function that verifies that a region can be mapped.
672*91f16700Schasinglulu  * Returns:
673*91f16700Schasinglulu  *        0: Success, the mapping is allowed.
674*91f16700Schasinglulu  *   EINVAL: Invalid values were used as arguments.
675*91f16700Schasinglulu  *   ERANGE: The memory limits were surpassed.
676*91f16700Schasinglulu  *   ENOMEM: There is not enough memory in the mmap array.
677*91f16700Schasinglulu  *    EPERM: Region overlaps another one in an invalid way.
678*91f16700Schasinglulu  */
679*91f16700Schasinglulu static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
680*91f16700Schasinglulu {
681*91f16700Schasinglulu 	unsigned long long base_pa = mm->base_pa;
682*91f16700Schasinglulu 	uintptr_t base_va = mm->base_va;
683*91f16700Schasinglulu 	size_t size = mm->size;
684*91f16700Schasinglulu 	size_t granularity = mm->granularity;
685*91f16700Schasinglulu 
686*91f16700Schasinglulu 	unsigned long long end_pa = base_pa + size - 1U;
687*91f16700Schasinglulu 	uintptr_t end_va = base_va + size - 1U;
688*91f16700Schasinglulu 
689*91f16700Schasinglulu 	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
690*91f16700Schasinglulu 			!IS_PAGE_ALIGNED(size))
691*91f16700Schasinglulu 		return -EINVAL;
692*91f16700Schasinglulu 
693*91f16700Schasinglulu 	if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
694*91f16700Schasinglulu 		(granularity != XLAT_BLOCK_SIZE(2U)) &&
695*91f16700Schasinglulu 		(granularity != XLAT_BLOCK_SIZE(3U))) {
696*91f16700Schasinglulu 		return -EINVAL;
697*91f16700Schasinglulu 	}
698*91f16700Schasinglulu 
699*91f16700Schasinglulu 	/* Check for overflows */
700*91f16700Schasinglulu 	if ((base_pa > end_pa) || (base_va > end_va))
701*91f16700Schasinglulu 		return -ERANGE;
702*91f16700Schasinglulu 
703*91f16700Schasinglulu 	if (end_va > ctx->va_max_address)
704*91f16700Schasinglulu 		return -ERANGE;
705*91f16700Schasinglulu 
706*91f16700Schasinglulu 	if (end_pa > ctx->pa_max_address)
707*91f16700Schasinglulu 		return -ERANGE;
708*91f16700Schasinglulu 
709*91f16700Schasinglulu 	/* Check that there is space in the ctx->mmap array */
710*91f16700Schasinglulu 	if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
711*91f16700Schasinglulu 		return -ENOMEM;
712*91f16700Schasinglulu 
713*91f16700Schasinglulu 	/* Check for PAs and VAs overlaps with all other regions */
714*91f16700Schasinglulu 	for (const mmap_region_t *mm_cursor = ctx->mmap;
715*91f16700Schasinglulu 	     mm_cursor->size != 0U; ++mm_cursor) {
716*91f16700Schasinglulu 
717*91f16700Schasinglulu 		uintptr_t mm_cursor_end_va = mm_cursor->base_va
718*91f16700Schasinglulu 							+ mm_cursor->size - 1U;
719*91f16700Schasinglulu 
720*91f16700Schasinglulu 		/*
721*91f16700Schasinglulu 		 * Check if one of the regions is completely inside the other
722*91f16700Schasinglulu 		 * one.
723*91f16700Schasinglulu 		 */
724*91f16700Schasinglulu 		bool fully_overlapped_va =
725*91f16700Schasinglulu 			((base_va >= mm_cursor->base_va) &&
726*91f16700Schasinglulu 					(end_va <= mm_cursor_end_va)) ||
727*91f16700Schasinglulu 			((mm_cursor->base_va >= base_va) &&
728*91f16700Schasinglulu 						(mm_cursor_end_va <= end_va));
729*91f16700Schasinglulu 
730*91f16700Schasinglulu 		/*
731*91f16700Schasinglulu 		 * Full VA overlaps are only allowed if both regions are
732*91f16700Schasinglulu 		 * identity mapped (zero offset) or have the same VA to PA
733*91f16700Schasinglulu 		 * offset. Also, make sure that it's not the exact same area.
734*91f16700Schasinglulu 		 * This can only be done with static regions.
735*91f16700Schasinglulu 		 */
736*91f16700Schasinglulu 		if (fully_overlapped_va) {
737*91f16700Schasinglulu 
738*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
739*91f16700Schasinglulu 			if (((mm->attr & MT_DYNAMIC) != 0U) ||
740*91f16700Schasinglulu 			    ((mm_cursor->attr & MT_DYNAMIC) != 0U))
741*91f16700Schasinglulu 				return -EPERM;
742*91f16700Schasinglulu #endif /* PLAT_XLAT_TABLES_DYNAMIC */
743*91f16700Schasinglulu 			if ((mm_cursor->base_va - mm_cursor->base_pa) !=
744*91f16700Schasinglulu 							(base_va - base_pa))
745*91f16700Schasinglulu 				return -EPERM;
746*91f16700Schasinglulu 
747*91f16700Schasinglulu 			if ((base_va == mm_cursor->base_va) &&
748*91f16700Schasinglulu 						(size == mm_cursor->size))
749*91f16700Schasinglulu 				return -EPERM;
750*91f16700Schasinglulu 
751*91f16700Schasinglulu 		} else {
752*91f16700Schasinglulu 			/*
753*91f16700Schasinglulu 			 * If the regions do not have fully overlapping VAs,
754*91f16700Schasinglulu 			 * then they must have fully separated VAs and PAs.
755*91f16700Schasinglulu 			 * Partial overlaps are not allowed
756*91f16700Schasinglulu 			 */
757*91f16700Schasinglulu 
758*91f16700Schasinglulu 			unsigned long long mm_cursor_end_pa =
759*91f16700Schasinglulu 				     mm_cursor->base_pa + mm_cursor->size - 1U;
760*91f16700Schasinglulu 
761*91f16700Schasinglulu 			bool separated_pa = (end_pa < mm_cursor->base_pa) ||
762*91f16700Schasinglulu 				(base_pa > mm_cursor_end_pa);
763*91f16700Schasinglulu 			bool separated_va = (end_va < mm_cursor->base_va) ||
764*91f16700Schasinglulu 				(base_va > mm_cursor_end_va);
765*91f16700Schasinglulu 
766*91f16700Schasinglulu 			if (!separated_va || !separated_pa)
767*91f16700Schasinglulu 				return -EPERM;
768*91f16700Schasinglulu 		}
769*91f16700Schasinglulu 	}
770*91f16700Schasinglulu 
771*91f16700Schasinglulu 	return 0;
772*91f16700Schasinglulu }
773*91f16700Schasinglulu 
774*91f16700Schasinglulu void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
775*91f16700Schasinglulu {
776*91f16700Schasinglulu 	mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
777*91f16700Schasinglulu 	const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
778*91f16700Schasinglulu 	const mmap_region_t *mm_last;
779*91f16700Schasinglulu 	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
780*91f16700Schasinglulu 	uintptr_t end_va = mm->base_va + mm->size - 1U;
781*91f16700Schasinglulu 	int ret;
782*91f16700Schasinglulu 
783*91f16700Schasinglulu 	/* Ignore empty regions */
784*91f16700Schasinglulu 	if (mm->size == 0U)
785*91f16700Schasinglulu 		return;
786*91f16700Schasinglulu 
787*91f16700Schasinglulu 	/* Static regions must be added before initializing the xlat tables. */
788*91f16700Schasinglulu 	assert(!ctx->initialized);
789*91f16700Schasinglulu 
790*91f16700Schasinglulu 	ret = mmap_add_region_check(ctx, mm);
791*91f16700Schasinglulu 	if (ret != 0) {
792*91f16700Schasinglulu 		ERROR("mmap_add_region_check() failed. error %d\n", ret);
793*91f16700Schasinglulu 		assert(false);
794*91f16700Schasinglulu 		return;
795*91f16700Schasinglulu 	}
796*91f16700Schasinglulu 
797*91f16700Schasinglulu 	/*
798*91f16700Schasinglulu 	 * Find correct place in mmap to insert new region.
799*91f16700Schasinglulu 	 *
800*91f16700Schasinglulu 	 * 1 - Lower region VA end first.
801*91f16700Schasinglulu 	 * 2 - Smaller region size first.
802*91f16700Schasinglulu 	 *
803*91f16700Schasinglulu 	 * VA  0                                   0xFF
804*91f16700Schasinglulu 	 *
805*91f16700Schasinglulu 	 * 1st |------|
806*91f16700Schasinglulu 	 * 2nd |------------|
807*91f16700Schasinglulu 	 * 3rd                 |------|
808*91f16700Schasinglulu 	 * 4th                            |---|
809*91f16700Schasinglulu 	 * 5th                                   |---|
810*91f16700Schasinglulu 	 * 6th                            |----------|
811*91f16700Schasinglulu 	 * 7th |-------------------------------------|
812*91f16700Schasinglulu 	 *
813*91f16700Schasinglulu 	 * This is required for overlapping regions only. It simplifies adding
814*91f16700Schasinglulu 	 * regions with the loop in xlat_tables_init_internal because the outer
815*91f16700Schasinglulu 	 * ones won't overwrite block or page descriptors of regions added
816*91f16700Schasinglulu 	 * previously.
817*91f16700Schasinglulu 	 *
818*91f16700Schasinglulu 	 * Overlapping is only allowed for static regions.
819*91f16700Schasinglulu 	 */
820*91f16700Schasinglulu 
821*91f16700Schasinglulu 	while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
822*91f16700Schasinglulu 	       && (mm_cursor->size != 0U)) {
823*91f16700Schasinglulu 		++mm_cursor;
824*91f16700Schasinglulu 	}
825*91f16700Schasinglulu 
826*91f16700Schasinglulu 	while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
827*91f16700Schasinglulu 	       (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
828*91f16700Schasinglulu 		++mm_cursor;
829*91f16700Schasinglulu 	}
830*91f16700Schasinglulu 
831*91f16700Schasinglulu 	/*
832*91f16700Schasinglulu 	 * Find the last entry marker in the mmap
833*91f16700Schasinglulu 	 */
834*91f16700Schasinglulu 	mm_last = ctx->mmap;
835*91f16700Schasinglulu 	while ((mm_last->size != 0U) && (mm_last < mm_end)) {
836*91f16700Schasinglulu 		++mm_last;
837*91f16700Schasinglulu 	}
838*91f16700Schasinglulu 
839*91f16700Schasinglulu 	/*
840*91f16700Schasinglulu 	 * Check if we have enough space in the memory mapping table.
841*91f16700Schasinglulu 	 * This shouldn't happen as we have checked in mmap_add_region_check
842*91f16700Schasinglulu 	 * that there is free space.
843*91f16700Schasinglulu 	 */
844*91f16700Schasinglulu 	assert(mm_last->size == 0U);
845*91f16700Schasinglulu 
846*91f16700Schasinglulu 	/* Make room for new region by moving other regions up by one place */
847*91f16700Schasinglulu 	mm_destination = mm_cursor + 1;
848*91f16700Schasinglulu 	(void)memmove(mm_destination, mm_cursor,
849*91f16700Schasinglulu 		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
850*91f16700Schasinglulu 
851*91f16700Schasinglulu 	/*
852*91f16700Schasinglulu 	 * Check we haven't lost the empty sentinel from the end of the array.
853*91f16700Schasinglulu 	 * This shouldn't happen as we have checked in mmap_add_region_check
854*91f16700Schasinglulu 	 * that there is free space.
855*91f16700Schasinglulu 	 */
856*91f16700Schasinglulu 	assert(mm_end->size == 0U);
857*91f16700Schasinglulu 
858*91f16700Schasinglulu 	*mm_cursor = *mm;
859*91f16700Schasinglulu 
860*91f16700Schasinglulu 	if (end_pa > ctx->max_pa)
861*91f16700Schasinglulu 		ctx->max_pa = end_pa;
862*91f16700Schasinglulu 	if (end_va > ctx->max_va)
863*91f16700Schasinglulu 		ctx->max_va = end_va;
864*91f16700Schasinglulu }
865*91f16700Schasinglulu 
866*91f16700Schasinglulu /*
867*91f16700Schasinglulu  * Determine the table level closest to the initial lookup level that
868*91f16700Schasinglulu  * can describe this translation. Then, align base VA to the next block
869*91f16700Schasinglulu  * at the determined level.
870*91f16700Schasinglulu  */
871*91f16700Schasinglulu static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
872*91f16700Schasinglulu {
873*91f16700Schasinglulu 	/*
874*91f16700Schasinglulu 	 * By or'ing the size and base PA the alignment will be the one
875*91f16700Schasinglulu 	 * corresponding to the smallest boundary of the two of them.
876*91f16700Schasinglulu 	 *
877*91f16700Schasinglulu 	 * There are three different cases. For example (for 4 KiB page size):
878*91f16700Schasinglulu 	 *
879*91f16700Schasinglulu 	 * +--------------+------------------++--------------+
880*91f16700Schasinglulu 	 * | PA alignment | Size multiple of || VA alignment |
881*91f16700Schasinglulu 	 * +--------------+------------------++--------------+
882*91f16700Schasinglulu 	 * |     2 MiB    |       2 MiB      ||     2 MiB    | (1)
883*91f16700Schasinglulu 	 * |     2 MiB    |       4 KiB      ||     4 KiB    | (2)
884*91f16700Schasinglulu 	 * |     4 KiB    |       2 MiB      ||     4 KiB    | (3)
885*91f16700Schasinglulu 	 * +--------------+------------------++--------------+
886*91f16700Schasinglulu 	 *
887*91f16700Schasinglulu 	 * - In (1), it is possible to take advantage of the alignment of the PA
888*91f16700Schasinglulu 	 *   and the size of the region to use a level 2 translation table
889*91f16700Schasinglulu 	 *   instead of a level 3 one.
890*91f16700Schasinglulu 	 *
891*91f16700Schasinglulu 	 * - In (2), the size is smaller than a block entry of level 2, so it is
892*91f16700Schasinglulu 	 *   needed to use a level 3 table to describe the region or the library
893*91f16700Schasinglulu 	 *   will map more memory than the desired one.
894*91f16700Schasinglulu 	 *
895*91f16700Schasinglulu 	 * - In (3), even though the region has the size of one level 2 block
896*91f16700Schasinglulu 	 *   entry, it isn't possible to describe the translation with a level 2
897*91f16700Schasinglulu 	 *   block entry because of the alignment of the base PA.
898*91f16700Schasinglulu 	 *
899*91f16700Schasinglulu 	 *   Only bits 47:21 of a level 2 block descriptor are used by the MMU,
900*91f16700Schasinglulu 	 *   bits 20:0 of the resulting address are 0 in this case. Because of
901*91f16700Schasinglulu 	 *   this, the PA generated as result of this translation is aligned to
902*91f16700Schasinglulu 	 *   2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
903*91f16700Schasinglulu 	 *   though, which means that the resulting translation is incorrect.
904*91f16700Schasinglulu 	 *   The only way to prevent this is by using a finer granularity.
905*91f16700Schasinglulu 	 */
906*91f16700Schasinglulu 	unsigned long long align_check;
907*91f16700Schasinglulu 
908*91f16700Schasinglulu 	align_check = mm->base_pa | (unsigned long long)mm->size;
909*91f16700Schasinglulu 
910*91f16700Schasinglulu 	/*
911*91f16700Schasinglulu 	 * Assume it is always aligned to level 3. There's no need to check that
912*91f16700Schasinglulu 	 * level because its block size is PAGE_SIZE. The checks to verify that
913*91f16700Schasinglulu 	 * the addresses and size are aligned to PAGE_SIZE are inside
914*91f16700Schasinglulu 	 * mmap_add_region.
915*91f16700Schasinglulu 	 */
916*91f16700Schasinglulu 	for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
917*91f16700Schasinglulu 
918*91f16700Schasinglulu 		if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
919*91f16700Schasinglulu 			continue;
920*91f16700Schasinglulu 
921*91f16700Schasinglulu 		mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
922*91f16700Schasinglulu 		return;
923*91f16700Schasinglulu 	}
924*91f16700Schasinglulu }
925*91f16700Schasinglulu 
926*91f16700Schasinglulu void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
927*91f16700Schasinglulu {
928*91f16700Schasinglulu 	mm->base_va = ctx->max_va + 1UL;
929*91f16700Schasinglulu 
930*91f16700Schasinglulu 	assert(mm->size > 0U);
931*91f16700Schasinglulu 
932*91f16700Schasinglulu 	mmap_alloc_va_align_ctx(ctx, mm);
933*91f16700Schasinglulu 
934*91f16700Schasinglulu 	/* Detect overflows. More checks are done in mmap_add_region_check(). */
935*91f16700Schasinglulu 	assert(mm->base_va > ctx->max_va);
936*91f16700Schasinglulu 
937*91f16700Schasinglulu 	mmap_add_region_ctx(ctx, mm);
938*91f16700Schasinglulu }
939*91f16700Schasinglulu 
940*91f16700Schasinglulu void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
941*91f16700Schasinglulu {
942*91f16700Schasinglulu 	const mmap_region_t *mm_cursor = mm;
943*91f16700Schasinglulu 
944*91f16700Schasinglulu 	while (mm_cursor->granularity != 0U) {
945*91f16700Schasinglulu 		mmap_add_region_ctx(ctx, mm_cursor);
946*91f16700Schasinglulu 		mm_cursor++;
947*91f16700Schasinglulu 	}
948*91f16700Schasinglulu }
949*91f16700Schasinglulu 
950*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
951*91f16700Schasinglulu 
952*91f16700Schasinglulu int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
953*91f16700Schasinglulu {
954*91f16700Schasinglulu 	mmap_region_t *mm_cursor = ctx->mmap;
955*91f16700Schasinglulu 	const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
956*91f16700Schasinglulu 	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
957*91f16700Schasinglulu 	uintptr_t end_va = mm->base_va + mm->size - 1U;
958*91f16700Schasinglulu 	int ret;
959*91f16700Schasinglulu 
960*91f16700Schasinglulu 	/* Nothing to do */
961*91f16700Schasinglulu 	if (mm->size == 0U)
962*91f16700Schasinglulu 		return 0;
963*91f16700Schasinglulu 
964*91f16700Schasinglulu 	/* Now this region is a dynamic one */
965*91f16700Schasinglulu 	mm->attr |= MT_DYNAMIC;
966*91f16700Schasinglulu 
967*91f16700Schasinglulu 	ret = mmap_add_region_check(ctx, mm);
968*91f16700Schasinglulu 	if (ret != 0)
969*91f16700Schasinglulu 		return ret;
970*91f16700Schasinglulu 
971*91f16700Schasinglulu 	/*
972*91f16700Schasinglulu 	 * Find the adequate entry in the mmap array in the same way done for
973*91f16700Schasinglulu 	 * static regions in mmap_add_region_ctx().
974*91f16700Schasinglulu 	 */
975*91f16700Schasinglulu 
976*91f16700Schasinglulu 	while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
977*91f16700Schasinglulu 	       && (mm_cursor->size != 0U)) {
978*91f16700Schasinglulu 		++mm_cursor;
979*91f16700Schasinglulu 	}
980*91f16700Schasinglulu 
981*91f16700Schasinglulu 	while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
982*91f16700Schasinglulu 	       (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
983*91f16700Schasinglulu 		++mm_cursor;
984*91f16700Schasinglulu 	}
985*91f16700Schasinglulu 
986*91f16700Schasinglulu 	/* Make room for new region by moving other regions up by one place */
987*91f16700Schasinglulu 	(void)memmove(mm_cursor + 1U, mm_cursor,
988*91f16700Schasinglulu 		     (uintptr_t)mm_last - (uintptr_t)mm_cursor);
989*91f16700Schasinglulu 
990*91f16700Schasinglulu 	/*
991*91f16700Schasinglulu 	 * Check we haven't lost the empty sentinel from the end of the array.
992*91f16700Schasinglulu 	 * This shouldn't happen as we have checked in mmap_add_region_check
993*91f16700Schasinglulu 	 * that there is free space.
994*91f16700Schasinglulu 	 */
995*91f16700Schasinglulu 	assert(mm_last->size == 0U);
996*91f16700Schasinglulu 
997*91f16700Schasinglulu 	*mm_cursor = *mm;
998*91f16700Schasinglulu 
999*91f16700Schasinglulu 	/*
1000*91f16700Schasinglulu 	 * Update the translation tables if the xlat tables are initialized. If
1001*91f16700Schasinglulu 	 * not, this region will be mapped when they are initialized.
1002*91f16700Schasinglulu 	 */
1003*91f16700Schasinglulu 	if (ctx->initialized) {
1004*91f16700Schasinglulu 		end_va = xlat_tables_map_region(ctx, mm_cursor,
1005*91f16700Schasinglulu 				0U, ctx->base_table, ctx->base_table_entries,
1006*91f16700Schasinglulu 				ctx->base_level);
1007*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1008*91f16700Schasinglulu 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1009*91f16700Schasinglulu 				   ctx->base_table_entries * sizeof(uint64_t));
1010*91f16700Schasinglulu #endif
1011*91f16700Schasinglulu 		/* Failed to map, remove mmap entry, unmap and return error. */
1012*91f16700Schasinglulu 		if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
1013*91f16700Schasinglulu 			(void)memmove(mm_cursor, mm_cursor + 1U,
1014*91f16700Schasinglulu 				(uintptr_t)mm_last - (uintptr_t)mm_cursor);
1015*91f16700Schasinglulu 
1016*91f16700Schasinglulu 			/*
1017*91f16700Schasinglulu 			 * Check if the mapping function actually managed to map
1018*91f16700Schasinglulu 			 * anything. If not, just return now.
1019*91f16700Schasinglulu 			 */
1020*91f16700Schasinglulu 			if (mm->base_va >= end_va)
1021*91f16700Schasinglulu 				return -ENOMEM;
1022*91f16700Schasinglulu 
1023*91f16700Schasinglulu 			/*
1024*91f16700Schasinglulu 			 * Something went wrong after mapping some table
1025*91f16700Schasinglulu 			 * entries, undo every change done up to this point.
1026*91f16700Schasinglulu 			 */
1027*91f16700Schasinglulu 			mmap_region_t unmap_mm = {
1028*91f16700Schasinglulu 					.base_pa = 0U,
1029*91f16700Schasinglulu 					.base_va = mm->base_va,
1030*91f16700Schasinglulu 					.size = end_va - mm->base_va,
1031*91f16700Schasinglulu 					.attr = 0U
1032*91f16700Schasinglulu 			};
1033*91f16700Schasinglulu 			xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
1034*91f16700Schasinglulu 				ctx->base_table, ctx->base_table_entries,
1035*91f16700Schasinglulu 				ctx->base_level);
1036*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1037*91f16700Schasinglulu 			xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1038*91f16700Schasinglulu 				ctx->base_table_entries * sizeof(uint64_t));
1039*91f16700Schasinglulu #endif
1040*91f16700Schasinglulu 			return -ENOMEM;
1041*91f16700Schasinglulu 		}
1042*91f16700Schasinglulu 
1043*91f16700Schasinglulu 		/*
1044*91f16700Schasinglulu 		 * Make sure that all entries are written to the memory. There
1045*91f16700Schasinglulu 		 * is no need to invalidate entries when mapping dynamic regions
1046*91f16700Schasinglulu 		 * because new table/block/page descriptors only replace old
1047*91f16700Schasinglulu 		 * invalid descriptors, that aren't TLB cached.
1048*91f16700Schasinglulu 		 */
1049*91f16700Schasinglulu 		dsbishst();
1050*91f16700Schasinglulu 	}
1051*91f16700Schasinglulu 
1052*91f16700Schasinglulu 	if (end_pa > ctx->max_pa)
1053*91f16700Schasinglulu 		ctx->max_pa = end_pa;
1054*91f16700Schasinglulu 	if (end_va > ctx->max_va)
1055*91f16700Schasinglulu 		ctx->max_va = end_va;
1056*91f16700Schasinglulu 
1057*91f16700Schasinglulu 	return 0;
1058*91f16700Schasinglulu }
1059*91f16700Schasinglulu 
1060*91f16700Schasinglulu int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
1061*91f16700Schasinglulu {
1062*91f16700Schasinglulu 	mm->base_va = ctx->max_va + 1UL;
1063*91f16700Schasinglulu 
1064*91f16700Schasinglulu 	if (mm->size == 0U)
1065*91f16700Schasinglulu 		return 0;
1066*91f16700Schasinglulu 
1067*91f16700Schasinglulu 	mmap_alloc_va_align_ctx(ctx, mm);
1068*91f16700Schasinglulu 
1069*91f16700Schasinglulu 	/* Detect overflows. More checks are done in mmap_add_region_check(). */
1070*91f16700Schasinglulu 	if (mm->base_va < ctx->max_va) {
1071*91f16700Schasinglulu 		return -ENOMEM;
1072*91f16700Schasinglulu 	}
1073*91f16700Schasinglulu 
1074*91f16700Schasinglulu 	return mmap_add_dynamic_region_ctx(ctx, mm);
1075*91f16700Schasinglulu }
1076*91f16700Schasinglulu 
1077*91f16700Schasinglulu /*
1078*91f16700Schasinglulu  * Removes the region with given base Virtual Address and size from the given
1079*91f16700Schasinglulu  * context.
1080*91f16700Schasinglulu  *
1081*91f16700Schasinglulu  * Returns:
1082*91f16700Schasinglulu  *        0: Success.
1083*91f16700Schasinglulu  *   EINVAL: Invalid values were used as arguments (region not found).
1084*91f16700Schasinglulu  *    EPERM: Tried to remove a static region.
1085*91f16700Schasinglulu  */
1086*91f16700Schasinglulu int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
1087*91f16700Schasinglulu 				   size_t size)
1088*91f16700Schasinglulu {
1089*91f16700Schasinglulu 	mmap_region_t *mm = ctx->mmap;
1090*91f16700Schasinglulu 	const mmap_region_t *mm_last = mm + ctx->mmap_num;
1091*91f16700Schasinglulu 	int update_max_va_needed = 0;
1092*91f16700Schasinglulu 	int update_max_pa_needed = 0;
1093*91f16700Schasinglulu 
1094*91f16700Schasinglulu 	/* Check sanity of mmap array. */
1095*91f16700Schasinglulu 	assert(mm[ctx->mmap_num].size == 0U);
1096*91f16700Schasinglulu 
1097*91f16700Schasinglulu 	while (mm->size != 0U) {
1098*91f16700Schasinglulu 		if ((mm->base_va == base_va) && (mm->size == size))
1099*91f16700Schasinglulu 			break;
1100*91f16700Schasinglulu 		++mm;
1101*91f16700Schasinglulu 	}
1102*91f16700Schasinglulu 
1103*91f16700Schasinglulu 	/* Check that the region was found */
1104*91f16700Schasinglulu 	if (mm->size == 0U)
1105*91f16700Schasinglulu 		return -EINVAL;
1106*91f16700Schasinglulu 
1107*91f16700Schasinglulu 	/* If the region is static it can't be removed */
1108*91f16700Schasinglulu 	if ((mm->attr & MT_DYNAMIC) == 0U)
1109*91f16700Schasinglulu 		return -EPERM;
1110*91f16700Schasinglulu 
1111*91f16700Schasinglulu 	/* Check if this region is using the top VAs or PAs. */
1112*91f16700Schasinglulu 	if ((mm->base_va + mm->size - 1U) == ctx->max_va)
1113*91f16700Schasinglulu 		update_max_va_needed = 1;
1114*91f16700Schasinglulu 	if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
1115*91f16700Schasinglulu 		update_max_pa_needed = 1;
1116*91f16700Schasinglulu 
1117*91f16700Schasinglulu 	/* Update the translation tables if needed */
1118*91f16700Schasinglulu 	if (ctx->initialized) {
1119*91f16700Schasinglulu 		xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
1120*91f16700Schasinglulu 					 ctx->base_table_entries,
1121*91f16700Schasinglulu 					 ctx->base_level);
1122*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1123*91f16700Schasinglulu 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1124*91f16700Schasinglulu 			ctx->base_table_entries * sizeof(uint64_t));
1125*91f16700Schasinglulu #endif
1126*91f16700Schasinglulu 		xlat_arch_tlbi_va_sync();
1127*91f16700Schasinglulu 	}
1128*91f16700Schasinglulu 
1129*91f16700Schasinglulu 	/* Remove this region by moving the rest down by one place. */
1130*91f16700Schasinglulu 	(void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
1131*91f16700Schasinglulu 
1132*91f16700Schasinglulu 	/* Check if we need to update the max VAs and PAs */
1133*91f16700Schasinglulu 	if (update_max_va_needed == 1) {
1134*91f16700Schasinglulu 		ctx->max_va = 0U;
1135*91f16700Schasinglulu 		mm = ctx->mmap;
1136*91f16700Schasinglulu 		while (mm->size != 0U) {
1137*91f16700Schasinglulu 			if ((mm->base_va + mm->size - 1U) > ctx->max_va)
1138*91f16700Schasinglulu 				ctx->max_va = mm->base_va + mm->size - 1U;
1139*91f16700Schasinglulu 			++mm;
1140*91f16700Schasinglulu 		}
1141*91f16700Schasinglulu 	}
1142*91f16700Schasinglulu 
1143*91f16700Schasinglulu 	if (update_max_pa_needed == 1) {
1144*91f16700Schasinglulu 		ctx->max_pa = 0U;
1145*91f16700Schasinglulu 		mm = ctx->mmap;
1146*91f16700Schasinglulu 		while (mm->size != 0U) {
1147*91f16700Schasinglulu 			if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
1148*91f16700Schasinglulu 				ctx->max_pa = mm->base_pa + mm->size - 1U;
1149*91f16700Schasinglulu 			++mm;
1150*91f16700Schasinglulu 		}
1151*91f16700Schasinglulu 	}
1152*91f16700Schasinglulu 
1153*91f16700Schasinglulu 	return 0;
1154*91f16700Schasinglulu }
1155*91f16700Schasinglulu 
1156*91f16700Schasinglulu void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
1157*91f16700Schasinglulu 			    uintptr_t va_max, struct mmap_region *mmap,
1158*91f16700Schasinglulu 			    unsigned int mmap_num, uint64_t **tables,
1159*91f16700Schasinglulu 			    unsigned int tables_num, uint64_t *base_table,
1160*91f16700Schasinglulu 			    int xlat_regime, int *mapped_regions)
1161*91f16700Schasinglulu {
1162*91f16700Schasinglulu 	ctx->xlat_regime = xlat_regime;
1163*91f16700Schasinglulu 
1164*91f16700Schasinglulu 	ctx->pa_max_address = pa_max;
1165*91f16700Schasinglulu 	ctx->va_max_address = va_max;
1166*91f16700Schasinglulu 
1167*91f16700Schasinglulu 	ctx->mmap = mmap;
1168*91f16700Schasinglulu 	ctx->mmap_num = mmap_num;
1169*91f16700Schasinglulu 	memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
1170*91f16700Schasinglulu 
1171*91f16700Schasinglulu 	ctx->tables = (void *) tables;
1172*91f16700Schasinglulu 	ctx->tables_num = tables_num;
1173*91f16700Schasinglulu 
1174*91f16700Schasinglulu 	uintptr_t va_space_size = va_max + 1;
1175*91f16700Schasinglulu 	ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
1176*91f16700Schasinglulu 	ctx->base_table = base_table;
1177*91f16700Schasinglulu 	ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
1178*91f16700Schasinglulu 
1179*91f16700Schasinglulu 	ctx->tables_mapped_regions = mapped_regions;
1180*91f16700Schasinglulu 
1181*91f16700Schasinglulu 	ctx->max_pa = 0;
1182*91f16700Schasinglulu 	ctx->max_va = 0;
1183*91f16700Schasinglulu 	ctx->initialized = 0;
1184*91f16700Schasinglulu }
1185*91f16700Schasinglulu 
1186*91f16700Schasinglulu #endif /* PLAT_XLAT_TABLES_DYNAMIC */
1187*91f16700Schasinglulu 
1188*91f16700Schasinglulu void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
1189*91f16700Schasinglulu {
1190*91f16700Schasinglulu 	assert(ctx != NULL);
1191*91f16700Schasinglulu 	assert(!ctx->initialized);
1192*91f16700Schasinglulu 	assert((ctx->xlat_regime == EL3_REGIME) ||
1193*91f16700Schasinglulu 	       (ctx->xlat_regime == EL2_REGIME) ||
1194*91f16700Schasinglulu 	       (ctx->xlat_regime == EL1_EL0_REGIME));
1195*91f16700Schasinglulu 	assert(!is_mmu_enabled_ctx(ctx));
1196*91f16700Schasinglulu 
1197*91f16700Schasinglulu 	mmap_region_t *mm = ctx->mmap;
1198*91f16700Schasinglulu 
1199*91f16700Schasinglulu 	assert(ctx->va_max_address >=
1200*91f16700Schasinglulu 		(xlat_get_min_virt_addr_space_size() - 1U));
1201*91f16700Schasinglulu 	assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
1202*91f16700Schasinglulu 	assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
1203*91f16700Schasinglulu 
1204*91f16700Schasinglulu 	xlat_mmap_print(mm);
1205*91f16700Schasinglulu 
1206*91f16700Schasinglulu 	/* All tables must be zeroed before mapping any region. */
1207*91f16700Schasinglulu 
1208*91f16700Schasinglulu 	for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
1209*91f16700Schasinglulu 		ctx->base_table[i] = INVALID_DESC;
1210*91f16700Schasinglulu 
1211*91f16700Schasinglulu 	for (int j = 0; j < ctx->tables_num; j++) {
1212*91f16700Schasinglulu #if PLAT_XLAT_TABLES_DYNAMIC
1213*91f16700Schasinglulu 		ctx->tables_mapped_regions[j] = 0;
1214*91f16700Schasinglulu #endif
1215*91f16700Schasinglulu 		for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
1216*91f16700Schasinglulu 			ctx->tables[j][i] = INVALID_DESC;
1217*91f16700Schasinglulu 	}
1218*91f16700Schasinglulu 
1219*91f16700Schasinglulu 	while (mm->size != 0U) {
1220*91f16700Schasinglulu 		uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1221*91f16700Schasinglulu 				ctx->base_table, ctx->base_table_entries,
1222*91f16700Schasinglulu 				ctx->base_level);
1223*91f16700Schasinglulu #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1224*91f16700Schasinglulu 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1225*91f16700Schasinglulu 				   ctx->base_table_entries * sizeof(uint64_t));
1226*91f16700Schasinglulu #endif
1227*91f16700Schasinglulu 		if (end_va != (mm->base_va + mm->size - 1U)) {
1228*91f16700Schasinglulu 			ERROR("Not enough memory to map region:\n"
1229*91f16700Schasinglulu 			      " VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x\n",
1230*91f16700Schasinglulu 			      mm->base_va, mm->base_pa, mm->size, mm->attr);
1231*91f16700Schasinglulu 			panic();
1232*91f16700Schasinglulu 		}
1233*91f16700Schasinglulu 
1234*91f16700Schasinglulu 		mm++;
1235*91f16700Schasinglulu 	}
1236*91f16700Schasinglulu 
1237*91f16700Schasinglulu 	assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
1238*91f16700Schasinglulu 	assert(ctx->max_va <= ctx->va_max_address);
1239*91f16700Schasinglulu 	assert(ctx->max_pa <= ctx->pa_max_address);
1240*91f16700Schasinglulu 
1241*91f16700Schasinglulu 	ctx->initialized = true;
1242*91f16700Schasinglulu 
1243*91f16700Schasinglulu 	xlat_tables_print(ctx);
1244*91f16700Schasinglulu }
1245