1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2016-2017, Linaro Limited. All rights reserved. 3*91f16700Schasinglulu * Copyright (c) 2014-2020, Arm Limited. All rights reserved. 4*91f16700Schasinglulu * Copyright (c) 2014, STMicroelectronics International N.V. 5*91f16700Schasinglulu * All rights reserved. 6*91f16700Schasinglulu * 7*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 8*91f16700Schasinglulu */ 9*91f16700Schasinglulu 10*91f16700Schasinglulu #include <assert.h> 11*91f16700Schasinglulu #include <stdio.h> 12*91f16700Schasinglulu #include <string.h> 13*91f16700Schasinglulu 14*91f16700Schasinglulu #include <platform_def.h> 15*91f16700Schasinglulu 16*91f16700Schasinglulu #include <arch.h> 17*91f16700Schasinglulu #include <arch_helpers.h> 18*91f16700Schasinglulu #include <common/debug.h> 19*91f16700Schasinglulu #include <lib/cassert.h> 20*91f16700Schasinglulu #include <lib/utils.h> 21*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables.h> 22*91f16700Schasinglulu 23*91f16700Schasinglulu #include "../xlat_tables_private.h" 24*91f16700Schasinglulu 25*91f16700Schasinglulu #ifdef ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING 26*91f16700Schasinglulu #error "ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING flag is set. \ 27*91f16700Schasinglulu This module is to be used when LPAE is not supported" 28*91f16700Schasinglulu #endif 29*91f16700Schasinglulu 30*91f16700Schasinglulu CASSERT(PLAT_VIRT_ADDR_SPACE_SIZE == (1ULL << 32), invalid_vaddr_space_size); 31*91f16700Schasinglulu CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size); 32*91f16700Schasinglulu 33*91f16700Schasinglulu #define MMU32B_UNSET_DESC ~0UL 34*91f16700Schasinglulu #define MMU32B_INVALID_DESC 0UL 35*91f16700Schasinglulu 36*91f16700Schasinglulu #define MT_UNKNOWN ~0U 37*91f16700Schasinglulu 38*91f16700Schasinglulu /* 39*91f16700Schasinglulu * MMU related values 40*91f16700Schasinglulu */ 41*91f16700Schasinglulu 42*91f16700Schasinglulu /* Sharable */ 43*91f16700Schasinglulu #define MMU32B_TTB_S (1U << 1) 44*91f16700Schasinglulu 45*91f16700Schasinglulu /* Not Outer Sharable */ 46*91f16700Schasinglulu #define MMU32B_TTB_NOS (1U << 5) 47*91f16700Schasinglulu 48*91f16700Schasinglulu /* Normal memory, Inner Non-cacheable */ 49*91f16700Schasinglulu #define MMU32B_TTB_IRGN_NC 0U 50*91f16700Schasinglulu 51*91f16700Schasinglulu /* Normal memory, Inner Write-Back Write-Allocate Cacheable */ 52*91f16700Schasinglulu #define MMU32B_TTB_IRGN_WBWA (1U << 6) 53*91f16700Schasinglulu 54*91f16700Schasinglulu /* Normal memory, Inner Write-Through Cacheable */ 55*91f16700Schasinglulu #define MMU32B_TTB_IRGN_WT 1U 56*91f16700Schasinglulu 57*91f16700Schasinglulu /* Normal memory, Inner Write-Back no Write-Allocate Cacheable */ 58*91f16700Schasinglulu #define MMU32B_TTB_IRGN_WB (1U | (1U << 6)) 59*91f16700Schasinglulu 60*91f16700Schasinglulu /* Normal memory, Outer Write-Back Write-Allocate Cacheable */ 61*91f16700Schasinglulu #define MMU32B_TTB_RNG_WBWA (1U << 3) 62*91f16700Schasinglulu 63*91f16700Schasinglulu #define MMU32B_DEFAULT_ATTRS \ 64*91f16700Schasinglulu (MMU32B_TTB_S | MMU32B_TTB_NOS | \ 65*91f16700Schasinglulu MMU32B_TTB_IRGN_WBWA | MMU32B_TTB_RNG_WBWA) 66*91f16700Schasinglulu 67*91f16700Schasinglulu /* armv7 memory mapping attributes: section mapping */ 68*91f16700Schasinglulu #define SECTION_SECURE (0U << 19) 69*91f16700Schasinglulu #define SECTION_NOTSECURE (1U << 19) 70*91f16700Schasinglulu #define SECTION_SHARED (1U << 16) 71*91f16700Schasinglulu #define SECTION_NOTGLOBAL (1U << 17) 72*91f16700Schasinglulu #define SECTION_ACCESS_FLAG (1U << 10) 73*91f16700Schasinglulu #define SECTION_UNPRIV (1U << 11) 74*91f16700Schasinglulu #define SECTION_RO (1U << 15) 75*91f16700Schasinglulu #define SECTION_TEX(tex) ((((tex) >> 2) << 12) | \ 76*91f16700Schasinglulu ((((tex) >> 1) & 0x1) << 3) | \ 77*91f16700Schasinglulu (((tex) & 0x1) << 2)) 78*91f16700Schasinglulu #define SECTION_DEVICE SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX) 79*91f16700Schasinglulu #define SECTION_NORMAL SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX) 80*91f16700Schasinglulu #define SECTION_NORMAL_CACHED \ 81*91f16700Schasinglulu SECTION_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX) 82*91f16700Schasinglulu 83*91f16700Schasinglulu #define SECTION_XN (1U << 4) 84*91f16700Schasinglulu #define SECTION_PXN (1U << 0) 85*91f16700Schasinglulu #define SECTION_SECTION (2U << 0) 86*91f16700Schasinglulu 87*91f16700Schasinglulu #define SECTION_PT_NOTSECURE (1U << 3) 88*91f16700Schasinglulu #define SECTION_PT_PT (1U << 0) 89*91f16700Schasinglulu 90*91f16700Schasinglulu #define SMALL_PAGE_SMALL_PAGE (1U << 1) 91*91f16700Schasinglulu #define SMALL_PAGE_SHARED (1U << 10) 92*91f16700Schasinglulu #define SMALL_PAGE_NOTGLOBAL (1U << 11) 93*91f16700Schasinglulu #define SMALL_PAGE_TEX(tex) ((((tex) >> 2) << 6) | \ 94*91f16700Schasinglulu ((((tex) >> 1) & 0x1) << 3) | \ 95*91f16700Schasinglulu (((tex) & 0x1) << 2)) 96*91f16700Schasinglulu #define SMALL_PAGE_DEVICE \ 97*91f16700Schasinglulu SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX) 98*91f16700Schasinglulu #define SMALL_PAGE_NORMAL \ 99*91f16700Schasinglulu SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX) 100*91f16700Schasinglulu #define SMALL_PAGE_NORMAL_CACHED \ 101*91f16700Schasinglulu SMALL_PAGE_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX) 102*91f16700Schasinglulu #define SMALL_PAGE_ACCESS_FLAG (1U << 4) 103*91f16700Schasinglulu #define SMALL_PAGE_UNPRIV (1U << 5) 104*91f16700Schasinglulu #define SMALL_PAGE_RO (1U << 9) 105*91f16700Schasinglulu #define SMALL_PAGE_XN (1U << 0) 106*91f16700Schasinglulu 107*91f16700Schasinglulu /* The TEX, C and B bits concatenated */ 108*91f16700Schasinglulu #define MMU32B_ATTR_DEVICE_INDEX 0U 109*91f16700Schasinglulu #define MMU32B_ATTR_IWBWA_OWBWA_INDEX 1U 110*91f16700Schasinglulu 111*91f16700Schasinglulu #define MMU32B_PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \ 112*91f16700Schasinglulu ((uint32_t)(nos) << ((idx) + 24))) 113*91f16700Schasinglulu #define MMU32B_NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \ 114*91f16700Schasinglulu ((uint32_t)(or) << (2 * (idx) + 16))) 115*91f16700Schasinglulu #define MMU32B_PRRR_DS0 (1U << 16) 116*91f16700Schasinglulu #define MMU32B_PRRR_DS1 (1U << 17) 117*91f16700Schasinglulu #define MMU32B_PRRR_NS0 (1U << 18) 118*91f16700Schasinglulu #define MMU32B_PRRR_NS1 (1U << 19) 119*91f16700Schasinglulu 120*91f16700Schasinglulu #define DACR_DOMAIN(num, perm) ((perm) << ((num) * 2)) 121*91f16700Schasinglulu #define DACR_DOMAIN_PERM_NO_ACCESS 0U 122*91f16700Schasinglulu #define DACR_DOMAIN_PERM_CLIENT 1U 123*91f16700Schasinglulu #define DACR_DOMAIN_PERM_MANAGER 3U 124*91f16700Schasinglulu 125*91f16700Schasinglulu #define NUM_1MB_IN_4GB (1UL << 12) 126*91f16700Schasinglulu #define NUM_4K_IN_1MB (1UL << 8) 127*91f16700Schasinglulu 128*91f16700Schasinglulu #define ONE_MB_SHIFT 20 129*91f16700Schasinglulu 130*91f16700Schasinglulu /* mmu 32b integration */ 131*91f16700Schasinglulu #define MMU32B_L1_TABLE_SIZE (NUM_1MB_IN_4GB * 4) 132*91f16700Schasinglulu #define MMU32B_L2_TABLE_SIZE (NUM_4K_IN_1MB * 4) 133*91f16700Schasinglulu #define MMU32B_L1_TABLE_ALIGN (1U << 14) 134*91f16700Schasinglulu #define MMU32B_L2_TABLE_ALIGN (1U << 10) 135*91f16700Schasinglulu 136*91f16700Schasinglulu static unsigned int next_xlat; 137*91f16700Schasinglulu static unsigned long long xlat_max_pa; 138*91f16700Schasinglulu static uintptr_t xlat_max_va; 139*91f16700Schasinglulu 140*91f16700Schasinglulu static uint32_t mmu_l1_base[NUM_1MB_IN_4GB] 141*91f16700Schasinglulu __aligned(MMU32B_L1_TABLE_ALIGN) __attribute__((section(".xlat_table"))); 142*91f16700Schasinglulu 143*91f16700Schasinglulu static uint32_t mmu_l2_base[MAX_XLAT_TABLES][NUM_4K_IN_1MB] 144*91f16700Schasinglulu __aligned(MMU32B_L2_TABLE_ALIGN) __attribute__((section(".xlat_table"))); 145*91f16700Schasinglulu 146*91f16700Schasinglulu /* 147*91f16700Schasinglulu * Array of all memory regions stored in order of ascending base address. 148*91f16700Schasinglulu * The list is terminated by the first entry with size == 0. 149*91f16700Schasinglulu */ 150*91f16700Schasinglulu static mmap_region_t mmap[MAX_MMAP_REGIONS + 1]; 151*91f16700Schasinglulu 152*91f16700Schasinglulu void print_mmap(void) 153*91f16700Schasinglulu { 154*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 155*91f16700Schasinglulu mmap_region_t *mm = mmap; 156*91f16700Schasinglulu 157*91f16700Schasinglulu printf("init xlat - l1:%p l2:%p (%d)\n", 158*91f16700Schasinglulu (void *)mmu_l1_base, (void *)mmu_l2_base, MAX_XLAT_TABLES); 159*91f16700Schasinglulu printf("mmap:\n"); 160*91f16700Schasinglulu while (mm->size) { 161*91f16700Schasinglulu printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n", 162*91f16700Schasinglulu (void *)mm->base_va, mm->base_pa, 163*91f16700Schasinglulu mm->size, mm->attr); 164*91f16700Schasinglulu ++mm; 165*91f16700Schasinglulu }; 166*91f16700Schasinglulu printf("\n"); 167*91f16700Schasinglulu #endif 168*91f16700Schasinglulu } 169*91f16700Schasinglulu 170*91f16700Schasinglulu void mmap_add(const mmap_region_t *mm) 171*91f16700Schasinglulu { 172*91f16700Schasinglulu const mmap_region_t *mm_cursor = mm; 173*91f16700Schasinglulu 174*91f16700Schasinglulu while ((mm_cursor->size != 0U) || (mm_cursor->attr != 0U)) { 175*91f16700Schasinglulu mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va, 176*91f16700Schasinglulu mm_cursor->size, mm_cursor->attr); 177*91f16700Schasinglulu mm_cursor++; 178*91f16700Schasinglulu } 179*91f16700Schasinglulu } 180*91f16700Schasinglulu 181*91f16700Schasinglulu void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, 182*91f16700Schasinglulu size_t size, unsigned int attr) 183*91f16700Schasinglulu { 184*91f16700Schasinglulu mmap_region_t *mm = mmap; 185*91f16700Schasinglulu const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U; 186*91f16700Schasinglulu unsigned long long end_pa = base_pa + size - 1U; 187*91f16700Schasinglulu uintptr_t end_va = base_va + size - 1U; 188*91f16700Schasinglulu 189*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(base_pa)); 190*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(base_va)); 191*91f16700Schasinglulu assert(IS_PAGE_ALIGNED(size)); 192*91f16700Schasinglulu 193*91f16700Schasinglulu if (size == 0U) { 194*91f16700Schasinglulu return; 195*91f16700Schasinglulu } 196*91f16700Schasinglulu 197*91f16700Schasinglulu assert(base_pa < end_pa); /* Check for overflows */ 198*91f16700Schasinglulu assert(base_va < end_va); 199*91f16700Schasinglulu 200*91f16700Schasinglulu assert((base_va + (uintptr_t)size - (uintptr_t)1) <= 201*91f16700Schasinglulu (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)); 202*91f16700Schasinglulu assert((base_pa + (unsigned long long)size - 1ULL) <= 203*91f16700Schasinglulu (PLAT_PHY_ADDR_SPACE_SIZE - 1U)); 204*91f16700Schasinglulu 205*91f16700Schasinglulu #if ENABLE_ASSERTIONS 206*91f16700Schasinglulu 207*91f16700Schasinglulu /* Check for PAs and VAs overlaps with all other regions */ 208*91f16700Schasinglulu for (mm = mmap; mm->size; ++mm) { 209*91f16700Schasinglulu 210*91f16700Schasinglulu uintptr_t mm_end_va = mm->base_va + mm->size - 1U; 211*91f16700Schasinglulu 212*91f16700Schasinglulu /* 213*91f16700Schasinglulu * Check if one of the regions is completely inside the other 214*91f16700Schasinglulu * one. 215*91f16700Schasinglulu */ 216*91f16700Schasinglulu bool fully_overlapped_va = 217*91f16700Schasinglulu ((base_va >= mm->base_va) && (end_va <= mm_end_va)) || 218*91f16700Schasinglulu ((mm->base_va >= base_va) && (mm_end_va <= end_va)); 219*91f16700Schasinglulu 220*91f16700Schasinglulu /* 221*91f16700Schasinglulu * Full VA overlaps are only allowed if both regions are 222*91f16700Schasinglulu * identity mapped (zero offset) or have the same VA to PA 223*91f16700Schasinglulu * offset. Also, make sure that it's not the exact same area. 224*91f16700Schasinglulu */ 225*91f16700Schasinglulu if (fully_overlapped_va) { 226*91f16700Schasinglulu assert((mm->base_va - mm->base_pa) == 227*91f16700Schasinglulu (base_va - base_pa)); 228*91f16700Schasinglulu assert((base_va != mm->base_va) || (size != mm->size)); 229*91f16700Schasinglulu } else { 230*91f16700Schasinglulu /* 231*91f16700Schasinglulu * If the regions do not have fully overlapping VAs, 232*91f16700Schasinglulu * then they must have fully separated VAs and PAs. 233*91f16700Schasinglulu * Partial overlaps are not allowed 234*91f16700Schasinglulu */ 235*91f16700Schasinglulu 236*91f16700Schasinglulu unsigned long long mm_end_pa = 237*91f16700Schasinglulu mm->base_pa + mm->size - 1; 238*91f16700Schasinglulu 239*91f16700Schasinglulu bool separated_pa = (end_pa < mm->base_pa) || 240*91f16700Schasinglulu (base_pa > mm_end_pa); 241*91f16700Schasinglulu bool separated_va = (end_va < mm->base_va) || 242*91f16700Schasinglulu (base_va > mm_end_va); 243*91f16700Schasinglulu 244*91f16700Schasinglulu assert(separated_va && separated_pa); 245*91f16700Schasinglulu } 246*91f16700Schasinglulu } 247*91f16700Schasinglulu 248*91f16700Schasinglulu mm = mmap; /* Restore pointer to the start of the array */ 249*91f16700Schasinglulu 250*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS */ 251*91f16700Schasinglulu 252*91f16700Schasinglulu /* Find correct place in mmap to insert new region */ 253*91f16700Schasinglulu while ((mm->base_va < base_va) && (mm->size != 0U)) { 254*91f16700Schasinglulu ++mm; 255*91f16700Schasinglulu } 256*91f16700Schasinglulu 257*91f16700Schasinglulu /* 258*91f16700Schasinglulu * If a section is contained inside another one with the same base 259*91f16700Schasinglulu * address, it must be placed after the one it is contained in: 260*91f16700Schasinglulu * 261*91f16700Schasinglulu * 1st |-----------------------| 262*91f16700Schasinglulu * 2nd |------------| 263*91f16700Schasinglulu * 3rd |------| 264*91f16700Schasinglulu * 265*91f16700Schasinglulu * This is required for mmap_region_attr() to get the attributes of the 266*91f16700Schasinglulu * small region correctly. 267*91f16700Schasinglulu */ 268*91f16700Schasinglulu while ((mm->base_va == base_va) && (mm->size > size)) { 269*91f16700Schasinglulu ++mm; 270*91f16700Schasinglulu } 271*91f16700Schasinglulu 272*91f16700Schasinglulu /* Make room for new region by moving other regions up by one place */ 273*91f16700Schasinglulu (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm); 274*91f16700Schasinglulu 275*91f16700Schasinglulu /* Check we haven't lost the empty sentinel from the end of the array */ 276*91f16700Schasinglulu assert(mm_last->size == 0U); 277*91f16700Schasinglulu 278*91f16700Schasinglulu mm->base_pa = base_pa; 279*91f16700Schasinglulu mm->base_va = base_va; 280*91f16700Schasinglulu mm->size = size; 281*91f16700Schasinglulu mm->attr = attr; 282*91f16700Schasinglulu 283*91f16700Schasinglulu if (end_pa > xlat_max_pa) { 284*91f16700Schasinglulu xlat_max_pa = end_pa; 285*91f16700Schasinglulu } 286*91f16700Schasinglulu if (end_va > xlat_max_va) { 287*91f16700Schasinglulu xlat_max_va = end_va; 288*91f16700Schasinglulu } 289*91f16700Schasinglulu } 290*91f16700Schasinglulu 291*91f16700Schasinglulu /* map all memory as shared/global/domain0/no-usr access */ 292*91f16700Schasinglulu static uint32_t mmap_desc(unsigned attr, unsigned int addr_pa, 293*91f16700Schasinglulu unsigned int level) 294*91f16700Schasinglulu { 295*91f16700Schasinglulu uint32_t desc; 296*91f16700Schasinglulu 297*91f16700Schasinglulu switch (level) { 298*91f16700Schasinglulu case 1U: 299*91f16700Schasinglulu assert((addr_pa & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U); 300*91f16700Schasinglulu 301*91f16700Schasinglulu desc = SECTION_SECTION | SECTION_SHARED; 302*91f16700Schasinglulu 303*91f16700Schasinglulu desc |= (attr & MT_NS) != 0U ? SECTION_NOTSECURE : 0U; 304*91f16700Schasinglulu 305*91f16700Schasinglulu desc |= SECTION_ACCESS_FLAG; 306*91f16700Schasinglulu desc |= (attr & MT_RW) != 0U ? 0U : SECTION_RO; 307*91f16700Schasinglulu 308*91f16700Schasinglulu desc |= (attr & MT_MEMORY) != 0U ? 309*91f16700Schasinglulu SECTION_NORMAL_CACHED : SECTION_DEVICE; 310*91f16700Schasinglulu 311*91f16700Schasinglulu if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) { 312*91f16700Schasinglulu desc |= SECTION_XN; 313*91f16700Schasinglulu } 314*91f16700Schasinglulu break; 315*91f16700Schasinglulu case 2U: 316*91f16700Schasinglulu assert((addr_pa & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U); 317*91f16700Schasinglulu 318*91f16700Schasinglulu desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED; 319*91f16700Schasinglulu 320*91f16700Schasinglulu desc |= SMALL_PAGE_ACCESS_FLAG; 321*91f16700Schasinglulu desc |= (attr & MT_RW) != 0U ? 0U : SMALL_PAGE_RO; 322*91f16700Schasinglulu 323*91f16700Schasinglulu desc |= (attr & MT_MEMORY) != 0U ? 324*91f16700Schasinglulu SMALL_PAGE_NORMAL_CACHED : SMALL_PAGE_DEVICE; 325*91f16700Schasinglulu 326*91f16700Schasinglulu if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) { 327*91f16700Schasinglulu desc |= SMALL_PAGE_XN; 328*91f16700Schasinglulu } 329*91f16700Schasinglulu break; 330*91f16700Schasinglulu default: 331*91f16700Schasinglulu panic(); 332*91f16700Schasinglulu } 333*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 334*91f16700Schasinglulu /* dump only the non-lpae level 2 tables */ 335*91f16700Schasinglulu if (level == 2U) { 336*91f16700Schasinglulu printf(attr & MT_MEMORY ? "MEM" : "dev"); 337*91f16700Schasinglulu printf(attr & MT_RW ? "-rw" : "-RO"); 338*91f16700Schasinglulu printf(attr & MT_NS ? "-NS" : "-S"); 339*91f16700Schasinglulu } 340*91f16700Schasinglulu #endif 341*91f16700Schasinglulu return desc | addr_pa; 342*91f16700Schasinglulu } 343*91f16700Schasinglulu 344*91f16700Schasinglulu static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va, 345*91f16700Schasinglulu size_t size, unsigned int *attr) 346*91f16700Schasinglulu { 347*91f16700Schasinglulu /* Don't assume that the area is contained in the first region */ 348*91f16700Schasinglulu unsigned int ret = MT_UNKNOWN; 349*91f16700Schasinglulu 350*91f16700Schasinglulu /* 351*91f16700Schasinglulu * Get attributes from last (innermost) region that contains the 352*91f16700Schasinglulu * requested area. Don't stop as soon as one region doesn't contain it 353*91f16700Schasinglulu * because there may be other internal regions that contain this area: 354*91f16700Schasinglulu * 355*91f16700Schasinglulu * |-----------------------------1-----------------------------| 356*91f16700Schasinglulu * |----2----| |-------3-------| |----5----| 357*91f16700Schasinglulu * |--4--| 358*91f16700Schasinglulu * 359*91f16700Schasinglulu * |---| <- Area we want the attributes of. 360*91f16700Schasinglulu * 361*91f16700Schasinglulu * In this example, the area is contained in regions 1, 3 and 4 but not 362*91f16700Schasinglulu * in region 2. The loop shouldn't stop at region 2 as inner regions 363*91f16700Schasinglulu * have priority over outer regions, it should stop at region 5. 364*91f16700Schasinglulu */ 365*91f16700Schasinglulu for ( ; ; ++mm) { 366*91f16700Schasinglulu 367*91f16700Schasinglulu if (mm->size == 0U) { 368*91f16700Schasinglulu return ret; /* Reached end of list */ 369*91f16700Schasinglulu } 370*91f16700Schasinglulu 371*91f16700Schasinglulu if (mm->base_va > (base_va + size - 1U)) { 372*91f16700Schasinglulu return ret; /* Next region is after area so end */ 373*91f16700Schasinglulu } 374*91f16700Schasinglulu 375*91f16700Schasinglulu if ((mm->base_va + mm->size - 1U) < base_va) { 376*91f16700Schasinglulu continue; /* Next region has already been overtaken */ 377*91f16700Schasinglulu } 378*91f16700Schasinglulu 379*91f16700Schasinglulu if ((ret == 0U) && (mm->attr == *attr)) { 380*91f16700Schasinglulu continue; /* Region doesn't override attribs so skip */ 381*91f16700Schasinglulu } 382*91f16700Schasinglulu 383*91f16700Schasinglulu if ((mm->base_va > base_va) || 384*91f16700Schasinglulu ((mm->base_va + mm->size - 1U) < 385*91f16700Schasinglulu (base_va + size - 1U))) { 386*91f16700Schasinglulu return MT_UNKNOWN; /* Region doesn't fully cover area */ 387*91f16700Schasinglulu } 388*91f16700Schasinglulu 389*91f16700Schasinglulu *attr = mm->attr; 390*91f16700Schasinglulu ret = 0U; 391*91f16700Schasinglulu } 392*91f16700Schasinglulu } 393*91f16700Schasinglulu 394*91f16700Schasinglulu static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, 395*91f16700Schasinglulu unsigned int base_va, 396*91f16700Schasinglulu uint32_t *table, 397*91f16700Schasinglulu unsigned int level) 398*91f16700Schasinglulu { 399*91f16700Schasinglulu unsigned int level_size_shift = (level == 1U) ? 400*91f16700Schasinglulu ONE_MB_SHIFT : FOUR_KB_SHIFT; 401*91f16700Schasinglulu unsigned int level_size = 1U << level_size_shift; 402*91f16700Schasinglulu unsigned int level_index_mask = (level == 1U) ? 403*91f16700Schasinglulu (NUM_1MB_IN_4GB - 1) << ONE_MB_SHIFT : 404*91f16700Schasinglulu (NUM_4K_IN_1MB - 1) << FOUR_KB_SHIFT; 405*91f16700Schasinglulu 406*91f16700Schasinglulu assert((level == 1U) || (level == 2U)); 407*91f16700Schasinglulu 408*91f16700Schasinglulu VERBOSE("init xlat table at %p (level%1u)\n", (void *)table, level); 409*91f16700Schasinglulu 410*91f16700Schasinglulu do { 411*91f16700Schasinglulu uint32_t desc = MMU32B_UNSET_DESC; 412*91f16700Schasinglulu 413*91f16700Schasinglulu if (mm->base_va + mm->size <= base_va) { 414*91f16700Schasinglulu /* Area now after the region so skip it */ 415*91f16700Schasinglulu ++mm; 416*91f16700Schasinglulu continue; 417*91f16700Schasinglulu } 418*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 419*91f16700Schasinglulu /* dump only non-lpae level 2 tables content */ 420*91f16700Schasinglulu if (level == 2U) { 421*91f16700Schasinglulu printf(" 0x%lx %x " + 6 - 2 * level, 422*91f16700Schasinglulu base_va, level_size); 423*91f16700Schasinglulu } 424*91f16700Schasinglulu #endif 425*91f16700Schasinglulu if (mm->base_va >= base_va + level_size) { 426*91f16700Schasinglulu /* Next region is after area so nothing to map yet */ 427*91f16700Schasinglulu desc = MMU32B_INVALID_DESC; 428*91f16700Schasinglulu } else if ((mm->base_va <= base_va) && 429*91f16700Schasinglulu (mm->base_va + mm->size) >= 430*91f16700Schasinglulu (base_va + level_size)) { 431*91f16700Schasinglulu /* Next region covers all of area */ 432*91f16700Schasinglulu unsigned int attr = mm->attr; 433*91f16700Schasinglulu unsigned int r = mmap_region_attr(mm, base_va, 434*91f16700Schasinglulu level_size, &attr); 435*91f16700Schasinglulu 436*91f16700Schasinglulu if (r == 0U) { 437*91f16700Schasinglulu desc = mmap_desc(attr, 438*91f16700Schasinglulu base_va - mm->base_va + mm->base_pa, 439*91f16700Schasinglulu level); 440*91f16700Schasinglulu } 441*91f16700Schasinglulu } 442*91f16700Schasinglulu 443*91f16700Schasinglulu if (desc == MMU32B_UNSET_DESC) { 444*91f16700Schasinglulu uintptr_t xlat_table; 445*91f16700Schasinglulu 446*91f16700Schasinglulu /* 447*91f16700Schasinglulu * Area not covered by a region so need finer table 448*91f16700Schasinglulu * Reuse next level table if any (assert attrib matching). 449*91f16700Schasinglulu * Otherwise allocate a xlat table. 450*91f16700Schasinglulu */ 451*91f16700Schasinglulu if (*table) { 452*91f16700Schasinglulu assert((*table & 3) == SECTION_PT_PT); 453*91f16700Schasinglulu assert(((*table & SECTION_PT_NOTSECURE) == 0U) 454*91f16700Schasinglulu == ((mm->attr & MT_NS) == 0U)); 455*91f16700Schasinglulu 456*91f16700Schasinglulu xlat_table = (*table) & 457*91f16700Schasinglulu ~(MMU32B_L1_TABLE_ALIGN - 1); 458*91f16700Schasinglulu desc = *table; 459*91f16700Schasinglulu } else { 460*91f16700Schasinglulu xlat_table = (uintptr_t)mmu_l2_base + 461*91f16700Schasinglulu next_xlat * MMU32B_L2_TABLE_SIZE; 462*91f16700Schasinglulu next_xlat++; 463*91f16700Schasinglulu assert(next_xlat <= MAX_XLAT_TABLES); 464*91f16700Schasinglulu (void)memset((char *)xlat_table, 0, 465*91f16700Schasinglulu MMU32B_L2_TABLE_SIZE); 466*91f16700Schasinglulu 467*91f16700Schasinglulu desc = xlat_table | SECTION_PT_PT; 468*91f16700Schasinglulu desc |= (mm->attr & MT_NS) != 0U ? 469*91f16700Schasinglulu SECTION_PT_NOTSECURE : 0; 470*91f16700Schasinglulu } 471*91f16700Schasinglulu /* Recurse to fill in new table */ 472*91f16700Schasinglulu mm = init_xlation_table_inner(mm, base_va, 473*91f16700Schasinglulu (uint32_t *)xlat_table, 474*91f16700Schasinglulu level + 1); 475*91f16700Schasinglulu } 476*91f16700Schasinglulu #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 477*91f16700Schasinglulu /* dump only non-lpae level 2 tables content */ 478*91f16700Schasinglulu if (level == 2U) { 479*91f16700Schasinglulu printf("\n"); 480*91f16700Schasinglulu } 481*91f16700Schasinglulu #endif 482*91f16700Schasinglulu *table++ = desc; 483*91f16700Schasinglulu base_va += level_size; 484*91f16700Schasinglulu } while ((mm->size != 0U) && ((base_va & level_index_mask) != 0U)); 485*91f16700Schasinglulu 486*91f16700Schasinglulu return mm; 487*91f16700Schasinglulu } 488*91f16700Schasinglulu 489*91f16700Schasinglulu void init_xlat_tables(void) 490*91f16700Schasinglulu { 491*91f16700Schasinglulu print_mmap(); 492*91f16700Schasinglulu 493*91f16700Schasinglulu assert(((unsigned int)mmu_l1_base & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U); 494*91f16700Schasinglulu assert(((unsigned int)mmu_l2_base & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U); 495*91f16700Schasinglulu 496*91f16700Schasinglulu (void)memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE); 497*91f16700Schasinglulu 498*91f16700Schasinglulu init_xlation_table_inner(mmap, 0, (uint32_t *)mmu_l1_base, 1); 499*91f16700Schasinglulu 500*91f16700Schasinglulu VERBOSE("init xlat - max_va=%p, max_pa=%llx\n", 501*91f16700Schasinglulu (void *)xlat_max_va, xlat_max_pa); 502*91f16700Schasinglulu assert(xlat_max_pa <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1)); 503*91f16700Schasinglulu } 504*91f16700Schasinglulu 505*91f16700Schasinglulu /******************************************************************************* 506*91f16700Schasinglulu * Function for enabling the MMU in Secure PL1, assuming that the 507*91f16700Schasinglulu * page-tables have already been created. 508*91f16700Schasinglulu ******************************************************************************/ 509*91f16700Schasinglulu void enable_mmu_svc_mon(unsigned int flags) 510*91f16700Schasinglulu { 511*91f16700Schasinglulu unsigned int prrr; 512*91f16700Schasinglulu unsigned int nmrr; 513*91f16700Schasinglulu unsigned int sctlr; 514*91f16700Schasinglulu 515*91f16700Schasinglulu assert(IS_IN_SECURE()); 516*91f16700Schasinglulu assert((read_sctlr() & SCTLR_M_BIT) == 0U); 517*91f16700Schasinglulu 518*91f16700Schasinglulu /* Enable Access flag (simplified access permissions) and TEX remap */ 519*91f16700Schasinglulu write_sctlr(read_sctlr() | SCTLR_AFE_BIT | SCTLR_TRE_BIT); 520*91f16700Schasinglulu 521*91f16700Schasinglulu prrr = MMU32B_PRRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 1, 0) 522*91f16700Schasinglulu | MMU32B_PRRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 2, 1); 523*91f16700Schasinglulu nmrr = MMU32B_NMRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 0, 0) 524*91f16700Schasinglulu | MMU32B_NMRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 1, 1); 525*91f16700Schasinglulu 526*91f16700Schasinglulu prrr |= MMU32B_PRRR_NS1 | MMU32B_PRRR_DS1; 527*91f16700Schasinglulu 528*91f16700Schasinglulu write_prrr(prrr); 529*91f16700Schasinglulu write_nmrr(nmrr); 530*91f16700Schasinglulu 531*91f16700Schasinglulu /* Program Domain access control register: domain 0 only */ 532*91f16700Schasinglulu write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT)); 533*91f16700Schasinglulu 534*91f16700Schasinglulu /* Invalidate TLBs at the current exception level */ 535*91f16700Schasinglulu tlbiall(); 536*91f16700Schasinglulu 537*91f16700Schasinglulu /* set MMU base xlat table entry (use only TTBR0) */ 538*91f16700Schasinglulu write_ttbr0((uint32_t)mmu_l1_base | MMU32B_DEFAULT_ATTRS); 539*91f16700Schasinglulu write_ttbr1(0U); 540*91f16700Schasinglulu 541*91f16700Schasinglulu /* 542*91f16700Schasinglulu * Ensure all translation table writes have drained 543*91f16700Schasinglulu * into memory, the TLB invalidation is complete, 544*91f16700Schasinglulu * and translation register writes are committed 545*91f16700Schasinglulu * before enabling the MMU 546*91f16700Schasinglulu */ 547*91f16700Schasinglulu dsb(); 548*91f16700Schasinglulu isb(); 549*91f16700Schasinglulu 550*91f16700Schasinglulu sctlr = read_sctlr(); 551*91f16700Schasinglulu sctlr |= SCTLR_M_BIT; 552*91f16700Schasinglulu #ifdef ARMV7_SUPPORTS_VIRTUALIZATION 553*91f16700Schasinglulu sctlr |= SCTLR_WXN_BIT; 554*91f16700Schasinglulu #endif 555*91f16700Schasinglulu 556*91f16700Schasinglulu if ((flags & DISABLE_DCACHE) != 0U) { 557*91f16700Schasinglulu sctlr &= ~SCTLR_C_BIT; 558*91f16700Schasinglulu } else { 559*91f16700Schasinglulu sctlr |= SCTLR_C_BIT; 560*91f16700Schasinglulu } 561*91f16700Schasinglulu 562*91f16700Schasinglulu write_sctlr(sctlr); 563*91f16700Schasinglulu 564*91f16700Schasinglulu /* Ensure the MMU enable takes effect immediately */ 565*91f16700Schasinglulu isb(); 566*91f16700Schasinglulu } 567