1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2016-2018, Arm Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <assert.h> 8*91f16700Schasinglulu #include <stdbool.h> 9*91f16700Schasinglulu #include <stdint.h> 10*91f16700Schasinglulu #include <string.h> 11*91f16700Schasinglulu 12*91f16700Schasinglulu #include <platform_def.h> 13*91f16700Schasinglulu 14*91f16700Schasinglulu #include <arch.h> 15*91f16700Schasinglulu #include <arch_helpers.h> 16*91f16700Schasinglulu #include <common/debug.h> 17*91f16700Schasinglulu #include <lib/cassert.h> 18*91f16700Schasinglulu #include <lib/utils.h> 19*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables.h> 20*91f16700Schasinglulu #include <plat/common/common_def.h> 21*91f16700Schasinglulu 22*91f16700Schasinglulu #include "xlat_tables_private.h" 23*91f16700Schasinglulu 24*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 25*91f16700Schasinglulu #define LVL0_SPACER "" 26*91f16700Schasinglulu #define LVL1_SPACER " " 27*91f16700Schasinglulu #define LVL2_SPACER " " 28*91f16700Schasinglulu #define LVL3_SPACER " " 29*91f16700Schasinglulu #define get_level_spacer(level) \ 30*91f16700Schasinglulu (((level) == U(0)) ? LVL0_SPACER : \ 31*91f16700Schasinglulu (((level) == U(1)) ? LVL1_SPACER : \ 32*91f16700Schasinglulu (((level) == U(2)) ? LVL2_SPACER : LVL3_SPACER))) 33*91f16700Schasinglulu #define debug_print(...) printf(__VA_ARGS__) 34*91f16700Schasinglulu #else 35*91f16700Schasinglulu #define debug_print(...) ((void)0) 36*91f16700Schasinglulu #endif 37*91f16700Schasinglulu 38*91f16700Schasinglulu #define UNSET_DESC ~0ULL 39*91f16700Schasinglulu #define MT_UNKNOWN ~0U 40*91f16700Schasinglulu 41*91f16700Schasinglulu static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES] 42*91f16700Schasinglulu __aligned(XLAT_TABLE_SIZE) __section(".xlat_table"); 43*91f16700Schasinglulu 44*91f16700Schasinglulu static unsigned int next_xlat; 45*91f16700Schasinglulu static unsigned long long xlat_max_pa; 46*91f16700Schasinglulu static uintptr_t xlat_max_va; 47*91f16700Schasinglulu 48*91f16700Schasinglulu static uint64_t execute_never_mask; 49*91f16700Schasinglulu static uint64_t ap1_mask; 50*91f16700Schasinglulu 51*91f16700Schasinglulu /* 52*91f16700Schasinglulu * Array of all memory regions stored in order of ascending base address. 53*91f16700Schasinglulu * The list is terminated by the first entry with size == 0. 54*91f16700Schasinglulu */ 55*91f16700Schasinglulu static mmap_region_t mmap[MAX_MMAP_REGIONS + 1]; 56*91f16700Schasinglulu 57*91f16700Schasinglulu 58*91f16700Schasinglulu void print_mmap(void) 59*91f16700Schasinglulu { 60*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 61*91f16700Schasinglulu debug_print("mmap:\n"); 62*91f16700Schasinglulu mmap_region_t *mm = mmap; 63*91f16700Schasinglulu while (mm->size != 0U) { 64*91f16700Schasinglulu debug_print(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n", 65*91f16700Schasinglulu (void *)mm->base_va, mm->base_pa, 66*91f16700Schasinglulu mm->size, mm->attr); 67*91f16700Schasinglulu ++mm; 68*91f16700Schasinglulu }; 69*91f16700Schasinglulu debug_print("\n"); 70*91f16700Schasinglulu #endif 71*91f16700Schasinglulu } 72*91f16700Schasinglulu 73*91f16700Schasinglulu void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, 74*91f16700Schasinglulu size_t size, unsigned int attr) 75*91f16700Schasinglulu { 76*91f16700Schasinglulu mmap_region_t *mm = mmap; 77*91f16700Schasinglulu const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U; 78*91f16700Schasinglulu unsigned long long end_pa = base_pa + size - 1U; 79*91f16700Schasinglulu uintptr_t end_va = base_va + size - 1U; 80*91f16700Schasinglulu 81*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(base_pa)); 82*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(base_va)); 83*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(size)); 84*91f16700Schasinglulu 85*91f16700Schasinglulu if (size == 0U) 86*91f16700Schasinglulu return; 87*91f16700Schasinglulu 88*91f16700Schasinglulu assert(base_pa < end_pa); /* Check for overflows */ 89*91f16700Schasinglulu assert(base_va < end_va); 90*91f16700Schasinglulu 91*91f16700Schasinglulu assert((base_va + (uintptr_t)size - (uintptr_t)1) <= 92*91f16700Schasinglulu (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)); 93*91f16700Schasinglulu assert((base_pa + (unsigned long long)size - 1ULL) <= 94*91f16700Schasinglulu (PLAT_PHY_ADDR_SPACE_SIZE - 1U)); 95*91f16700Schasinglulu 96*91f16700Schasinglulu #if ENABLE_ASSERTIONS 97*91f16700Schasinglulu 98*91f16700Schasinglulu /* Check for PAs and VAs overlaps with all other regions */ 99*91f16700Schasinglulu for (mm = mmap; mm->size; ++mm) { 100*91f16700Schasinglulu 101*91f16700Schasinglulu uintptr_t mm_end_va = mm->base_va + mm->size - 1U; 102*91f16700Schasinglulu 103*91f16700Schasinglulu /* 104*91f16700Schasinglulu * Check if one of the regions is completely inside the other 105*91f16700Schasinglulu * one. 106*91f16700Schasinglulu */ 107*91f16700Schasinglulu bool fully_overlapped_va = 108*91f16700Schasinglulu ((base_va >= mm->base_va) && (end_va <= mm_end_va)) || 109*91f16700Schasinglulu ((mm->base_va >= base_va) && (mm_end_va <= end_va)); 110*91f16700Schasinglulu 111*91f16700Schasinglulu /* 112*91f16700Schasinglulu * Full VA overlaps are only allowed if both regions are 113*91f16700Schasinglulu * identity mapped (zero offset) or have the same VA to PA 114*91f16700Schasinglulu * offset. Also, make sure that it's not the exact same area. 115*91f16700Schasinglulu */ 116*91f16700Schasinglulu if (fully_overlapped_va) { 117*91f16700Schasinglulu assert((mm->base_va - mm->base_pa) == 118*91f16700Schasinglulu (base_va - base_pa)); 119*91f16700Schasinglulu assert((base_va != mm->base_va) || (size != mm->size)); 120*91f16700Schasinglulu } else { 121*91f16700Schasinglulu /* 122*91f16700Schasinglulu * If the regions do not have fully overlapping VAs, 123*91f16700Schasinglulu * then they must have fully separated VAs and PAs. 124*91f16700Schasinglulu * Partial overlaps are not allowed 125*91f16700Schasinglulu */ 126*91f16700Schasinglulu 127*91f16700Schasinglulu unsigned long long mm_end_pa = 128*91f16700Schasinglulu mm->base_pa + mm->size - 1; 129*91f16700Schasinglulu 130*91f16700Schasinglulu bool separated_pa = (end_pa < mm->base_pa) || 131*91f16700Schasinglulu (base_pa > mm_end_pa); 132*91f16700Schasinglulu bool separated_va = (end_va < mm->base_va) || 133*91f16700Schasinglulu (base_va > mm_end_va); 134*91f16700Schasinglulu 135*91f16700Schasinglulu assert(separated_va && separated_pa); 136*91f16700Schasinglulu } 137*91f16700Schasinglulu } 138*91f16700Schasinglulu 139*91f16700Schasinglulu mm = mmap; /* Restore pointer to the start of the array */ 140*91f16700Schasinglulu 141*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS */ 142*91f16700Schasinglulu 143*91f16700Schasinglulu /* Find correct place in mmap to insert new region */ 144*91f16700Schasinglulu while ((mm->base_va < base_va) && (mm->size != 0U)) 145*91f16700Schasinglulu ++mm; 146*91f16700Schasinglulu 147*91f16700Schasinglulu /* 148*91f16700Schasinglulu * If a section is contained inside another one with the same base 149*91f16700Schasinglulu * address, it must be placed after the one it is contained in: 150*91f16700Schasinglulu * 151*91f16700Schasinglulu * 1st |-----------------------| 152*91f16700Schasinglulu * 2nd |------------| 153*91f16700Schasinglulu * 3rd |------| 154*91f16700Schasinglulu * 155*91f16700Schasinglulu * This is required for mmap_region_attr() to get the attributes of the 156*91f16700Schasinglulu * small region correctly. 157*91f16700Schasinglulu */ 158*91f16700Schasinglulu while ((mm->base_va == base_va) && (mm->size > size)) 159*91f16700Schasinglulu ++mm; 160*91f16700Schasinglulu 161*91f16700Schasinglulu /* Make room for new region by moving other regions up by one place */ 162*91f16700Schasinglulu (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm); 163*91f16700Schasinglulu 164*91f16700Schasinglulu /* Check we haven't lost the empty sentinel from the end of the array */ 165*91f16700Schasinglulu assert(mm_last->size == 0U); 166*91f16700Schasinglulu 167*91f16700Schasinglulu mm->base_pa = base_pa; 168*91f16700Schasinglulu mm->base_va = base_va; 169*91f16700Schasinglulu mm->size = size; 170*91f16700Schasinglulu mm->attr = attr; 171*91f16700Schasinglulu 172*91f16700Schasinglulu if (end_pa > xlat_max_pa) 173*91f16700Schasinglulu xlat_max_pa = end_pa; 174*91f16700Schasinglulu if (end_va > xlat_max_va) 175*91f16700Schasinglulu xlat_max_va = end_va; 176*91f16700Schasinglulu } 177*91f16700Schasinglulu 178*91f16700Schasinglulu void mmap_add(const mmap_region_t *mm) 179*91f16700Schasinglulu { 180*91f16700Schasinglulu const mmap_region_t *mm_cursor = mm; 181*91f16700Schasinglulu 182*91f16700Schasinglulu while ((mm_cursor->size != 0U) || (mm_cursor->attr != 0U)) { 183*91f16700Schasinglulu mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va, 184*91f16700Schasinglulu mm_cursor->size, mm_cursor->attr); 185*91f16700Schasinglulu mm_cursor++; 186*91f16700Schasinglulu } 187*91f16700Schasinglulu } 188*91f16700Schasinglulu 189*91f16700Schasinglulu static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa, 190*91f16700Schasinglulu unsigned int level) 191*91f16700Schasinglulu { 192*91f16700Schasinglulu uint64_t desc; 193*91f16700Schasinglulu int mem_type; 194*91f16700Schasinglulu 195*91f16700Schasinglulu /* Make sure that the granularity is fine enough to map this address. */ 196*91f16700Schasinglulu assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U); 197*91f16700Schasinglulu 198*91f16700Schasinglulu desc = addr_pa; 199*91f16700Schasinglulu /* 200*91f16700Schasinglulu * There are different translation table descriptors for level 3 and the 201*91f16700Schasinglulu * rest. 202*91f16700Schasinglulu */ 203*91f16700Schasinglulu desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC; 204*91f16700Schasinglulu desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U; 205*91f16700Schasinglulu desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); 206*91f16700Schasinglulu /* 207*91f16700Schasinglulu * Always set the access flag, as this library assumes access flag 208*91f16700Schasinglulu * faults aren't managed. 209*91f16700Schasinglulu */ 210*91f16700Schasinglulu desc |= LOWER_ATTRS(ACCESS_FLAG); 211*91f16700Schasinglulu desc |= ap1_mask; 212*91f16700Schasinglulu 213*91f16700Schasinglulu /* 214*91f16700Schasinglulu * Deduce shareability domain and executability of the memory region 215*91f16700Schasinglulu * from the memory type. 216*91f16700Schasinglulu * 217*91f16700Schasinglulu * Data accesses to device memory and non-cacheable normal memory are 218*91f16700Schasinglulu * coherent for all observers in the system, and correspondingly are 219*91f16700Schasinglulu * always treated as being Outer Shareable. Therefore, for these 2 types 220*91f16700Schasinglulu * of memory, it is not strictly needed to set the shareability field 221*91f16700Schasinglulu * in the translation tables. 222*91f16700Schasinglulu */ 223*91f16700Schasinglulu mem_type = MT_TYPE(attr); 224*91f16700Schasinglulu if (mem_type == MT_DEVICE) { 225*91f16700Schasinglulu desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); 226*91f16700Schasinglulu /* 227*91f16700Schasinglulu * Always map device memory as execute-never. 228*91f16700Schasinglulu * This is to avoid the possibility of a speculative instruction 229*91f16700Schasinglulu * fetch, which could be an issue if this memory region 230*91f16700Schasinglulu * corresponds to a read-sensitive peripheral. 231*91f16700Schasinglulu */ 232*91f16700Schasinglulu desc |= execute_never_mask; 233*91f16700Schasinglulu 234*91f16700Schasinglulu } else { /* Normal memory */ 235*91f16700Schasinglulu /* 236*91f16700Schasinglulu * Always map read-write normal memory as execute-never. 237*91f16700Schasinglulu * This library assumes that it is used by software that does 238*91f16700Schasinglulu * not self-modify its code, therefore R/W memory is reserved 239*91f16700Schasinglulu * for data storage, which must not be executable. 240*91f16700Schasinglulu * 241*91f16700Schasinglulu * Note that setting the XN bit here is for consistency only. 242*91f16700Schasinglulu * The function that enables the MMU sets the SCTLR_ELx.WXN bit, 243*91f16700Schasinglulu * which makes any writable memory region to be treated as 244*91f16700Schasinglulu * execute-never, regardless of the value of the XN bit in the 245*91f16700Schasinglulu * translation table. 246*91f16700Schasinglulu * 247*91f16700Schasinglulu * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER 248*91f16700Schasinglulu * attribute to figure out the value of the XN bit. 249*91f16700Schasinglulu */ 250*91f16700Schasinglulu if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) { 251*91f16700Schasinglulu desc |= execute_never_mask; 252*91f16700Schasinglulu } 253*91f16700Schasinglulu 254*91f16700Schasinglulu if (mem_type == MT_MEMORY) { 255*91f16700Schasinglulu desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH); 256*91f16700Schasinglulu } else { 257*91f16700Schasinglulu assert(mem_type == MT_NON_CACHEABLE); 258*91f16700Schasinglulu desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH); 259*91f16700Schasinglulu } 260*91f16700Schasinglulu } 261*91f16700Schasinglulu 262*91f16700Schasinglulu debug_print((mem_type == MT_MEMORY) ? "MEM" : 263*91f16700Schasinglulu ((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV")); 264*91f16700Schasinglulu debug_print(((attr & MT_RW) != 0U) ? "-RW" : "-RO"); 265*91f16700Schasinglulu debug_print(((attr & MT_NS) != 0U) ? "-NS" : "-S"); 266*91f16700Schasinglulu debug_print(((attr & MT_EXECUTE_NEVER) != 0U) ? "-XN" : "-EXEC"); 267*91f16700Schasinglulu return desc; 268*91f16700Schasinglulu } 269*91f16700Schasinglulu 270*91f16700Schasinglulu /* 271*91f16700Schasinglulu * Look for the innermost region that contains the area at `base_va` with size 272*91f16700Schasinglulu * `size`. Populate *attr with the attributes of this region. 273*91f16700Schasinglulu * 274*91f16700Schasinglulu * On success, this function returns 0. 275*91f16700Schasinglulu * If there are partial overlaps (meaning that a smaller size is needed) or if 276*91f16700Schasinglulu * the region can't be found in the given area, it returns MT_UNKNOWN. In this 277*91f16700Schasinglulu * case the value pointed by attr should be ignored by the caller. 278*91f16700Schasinglulu */ 279*91f16700Schasinglulu static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va, 280*91f16700Schasinglulu size_t size, unsigned int *attr) 281*91f16700Schasinglulu { 282*91f16700Schasinglulu /* Don't assume that the area is contained in the first region */ 283*91f16700Schasinglulu unsigned int ret = MT_UNKNOWN; 284*91f16700Schasinglulu 285*91f16700Schasinglulu /* 286*91f16700Schasinglulu * Get attributes from last (innermost) region that contains the 287*91f16700Schasinglulu * requested area. Don't stop as soon as one region doesn't contain it 288*91f16700Schasinglulu * because there may be other internal regions that contain this area: 289*91f16700Schasinglulu * 290*91f16700Schasinglulu * |-----------------------------1-----------------------------| 291*91f16700Schasinglulu * |----2----| |-------3-------| |----5----| 292*91f16700Schasinglulu * |--4--| 293*91f16700Schasinglulu * 294*91f16700Schasinglulu * |---| <- Area we want the attributes of. 295*91f16700Schasinglulu * 296*91f16700Schasinglulu * In this example, the area is contained in regions 1, 3 and 4 but not 297*91f16700Schasinglulu * in region 2. The loop shouldn't stop at region 2 as inner regions 298*91f16700Schasinglulu * have priority over outer regions, it should stop at region 5. 299*91f16700Schasinglulu */ 300*91f16700Schasinglulu for ( ; ; ++mm) { 301*91f16700Schasinglulu 302*91f16700Schasinglulu if (mm->size == 0U) 303*91f16700Schasinglulu return ret; /* Reached end of list */ 304*91f16700Schasinglulu 305*91f16700Schasinglulu if (mm->base_va > (base_va + size - 1U)) 306*91f16700Schasinglulu return ret; /* Next region is after area so end */ 307*91f16700Schasinglulu 308*91f16700Schasinglulu if ((mm->base_va + mm->size - 1U) < base_va) 309*91f16700Schasinglulu continue; /* Next region has already been overtaken */ 310*91f16700Schasinglulu 311*91f16700Schasinglulu if ((ret == 0U) && (mm->attr == *attr)) 312*91f16700Schasinglulu continue; /* Region doesn't override attribs so skip */ 313*91f16700Schasinglulu 314*91f16700Schasinglulu if ((mm->base_va > base_va) || 315*91f16700Schasinglulu ((mm->base_va + mm->size - 1U) < (base_va + size - 1U))) 316*91f16700Schasinglulu return MT_UNKNOWN; /* Region doesn't fully cover area */ 317*91f16700Schasinglulu 318*91f16700Schasinglulu *attr = mm->attr; 319*91f16700Schasinglulu ret = 0U; 320*91f16700Schasinglulu } 321*91f16700Schasinglulu return ret; 322*91f16700Schasinglulu } 323*91f16700Schasinglulu 324*91f16700Schasinglulu static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, 325*91f16700Schasinglulu uintptr_t base_va, 326*91f16700Schasinglulu uint64_t *table, 327*91f16700Schasinglulu unsigned int level) 328*91f16700Schasinglulu { 329*91f16700Schasinglulu assert((level >= XLAT_TABLE_LEVEL_MIN) && 330*91f16700Schasinglulu (level <= XLAT_TABLE_LEVEL_MAX)); 331*91f16700Schasinglulu 332*91f16700Schasinglulu unsigned int level_size_shift = 333*91f16700Schasinglulu L0_XLAT_ADDRESS_SHIFT - level * XLAT_TABLE_ENTRIES_SHIFT; 334*91f16700Schasinglulu u_register_t level_size = (u_register_t)1 << level_size_shift; 335*91f16700Schasinglulu u_register_t level_index_mask = 336*91f16700Schasinglulu ((u_register_t)XLAT_TABLE_ENTRIES_MASK) << level_size_shift; 337*91f16700Schasinglulu 338*91f16700Schasinglulu debug_print("New xlat table:\n"); 339*91f16700Schasinglulu 340*91f16700Schasinglulu do { 341*91f16700Schasinglulu uint64_t desc = UNSET_DESC; 342*91f16700Schasinglulu 343*91f16700Schasinglulu if (mm->size == 0U) { 344*91f16700Schasinglulu /* Done mapping regions; finish zeroing the table */ 345*91f16700Schasinglulu desc = INVALID_DESC; 346*91f16700Schasinglulu } else if ((mm->base_va + mm->size - 1U) < base_va) { 347*91f16700Schasinglulu /* This area is after the region so get next region */ 348*91f16700Schasinglulu ++mm; 349*91f16700Schasinglulu continue; 350*91f16700Schasinglulu } 351*91f16700Schasinglulu 352*91f16700Schasinglulu debug_print("%s VA:%p size:0x%llx ", get_level_spacer(level), 353*91f16700Schasinglulu (void *)base_va, (unsigned long long)level_size); 354*91f16700Schasinglulu 355*91f16700Schasinglulu if (mm->base_va > (base_va + level_size - 1U)) { 356*91f16700Schasinglulu /* Next region is after this area. Nothing to map yet */ 357*91f16700Schasinglulu desc = INVALID_DESC; 358*91f16700Schasinglulu /* Make sure that the current level allows block descriptors */ 359*91f16700Schasinglulu } else if (level >= XLAT_BLOCK_LEVEL_MIN) { 360*91f16700Schasinglulu /* 361*91f16700Schasinglulu * Try to get attributes of this area. It will fail if 362*91f16700Schasinglulu * there are partially overlapping regions. On success, 363*91f16700Schasinglulu * it will return the innermost region's attributes. 364*91f16700Schasinglulu */ 365*91f16700Schasinglulu unsigned int attr; 366*91f16700Schasinglulu unsigned int r = mmap_region_attr(mm, base_va, 367*91f16700Schasinglulu level_size, &attr); 368*91f16700Schasinglulu 369*91f16700Schasinglulu if (r == 0U) { 370*91f16700Schasinglulu desc = mmap_desc(attr, 371*91f16700Schasinglulu base_va - mm->base_va + mm->base_pa, 372*91f16700Schasinglulu level); 373*91f16700Schasinglulu } 374*91f16700Schasinglulu } 375*91f16700Schasinglulu 376*91f16700Schasinglulu if (desc == UNSET_DESC) { 377*91f16700Schasinglulu /* Area not covered by a region so need finer table */ 378*91f16700Schasinglulu uint64_t *new_table = xlat_tables[next_xlat]; 379*91f16700Schasinglulu 380*91f16700Schasinglulu next_xlat++; 381*91f16700Schasinglulu assert(next_xlat <= MAX_XLAT_TABLES); 382*91f16700Schasinglulu desc = TABLE_DESC | (uintptr_t)new_table; 383*91f16700Schasinglulu 384*91f16700Schasinglulu /* Recurse to fill in new table */ 385*91f16700Schasinglulu mm = init_xlation_table_inner(mm, base_va, 386*91f16700Schasinglulu new_table, level + 1U); 387*91f16700Schasinglulu } 388*91f16700Schasinglulu 389*91f16700Schasinglulu debug_print("\n"); 390*91f16700Schasinglulu 391*91f16700Schasinglulu *table++ = desc; 392*91f16700Schasinglulu base_va += level_size; 393*91f16700Schasinglulu } while ((base_va & level_index_mask) && 394*91f16700Schasinglulu ((base_va - 1U) < (PLAT_VIRT_ADDR_SPACE_SIZE - 1U))); 395*91f16700Schasinglulu 396*91f16700Schasinglulu return mm; 397*91f16700Schasinglulu } 398*91f16700Schasinglulu 399*91f16700Schasinglulu void init_xlation_table(uintptr_t base_va, uint64_t *table, 400*91f16700Schasinglulu unsigned int level, uintptr_t *max_va, 401*91f16700Schasinglulu unsigned long long *max_pa) 402*91f16700Schasinglulu { 403*91f16700Schasinglulu unsigned int el = xlat_arch_current_el(); 404*91f16700Schasinglulu 405*91f16700Schasinglulu execute_never_mask = xlat_arch_get_xn_desc(el); 406*91f16700Schasinglulu 407*91f16700Schasinglulu if (el == 3U) { 408*91f16700Schasinglulu ap1_mask = LOWER_ATTRS(AP_ONE_VA_RANGE_RES1); 409*91f16700Schasinglulu } else { 410*91f16700Schasinglulu assert(el == 1U); 411*91f16700Schasinglulu ap1_mask = 0ULL; 412*91f16700Schasinglulu } 413*91f16700Schasinglulu 414*91f16700Schasinglulu init_xlation_table_inner(mmap, base_va, table, level); 415*91f16700Schasinglulu *max_va = xlat_max_va; 416*91f16700Schasinglulu *max_pa = xlat_max_pa; 417*91f16700Schasinglulu } 418