1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <assert.h> 8*91f16700Schasinglulu #include <stdint.h> 9*91f16700Schasinglulu 10*91f16700Schasinglulu #include <platform_def.h> 11*91f16700Schasinglulu 12*91f16700Schasinglulu #include <arch.h> 13*91f16700Schasinglulu #include <arch_features.h> 14*91f16700Schasinglulu #include <common/bl_common.h> 15*91f16700Schasinglulu #include <lib/utils.h> 16*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables.h> 17*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_arch.h> 18*91f16700Schasinglulu #include <plat/common/common_def.h> 19*91f16700Schasinglulu 20*91f16700Schasinglulu #include "../xlat_tables_private.h" 21*91f16700Schasinglulu 22*91f16700Schasinglulu #define XLAT_TABLE_LEVEL_BASE \ 23*91f16700Schasinglulu GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE) 24*91f16700Schasinglulu 25*91f16700Schasinglulu #define NUM_BASE_LEVEL_ENTRIES \ 26*91f16700Schasinglulu GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) 27*91f16700Schasinglulu 28*91f16700Schasinglulu static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] 29*91f16700Schasinglulu __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); 30*91f16700Schasinglulu 31*91f16700Schasinglulu static unsigned long long tcr_ps_bits; 32*91f16700Schasinglulu 33*91f16700Schasinglulu static unsigned long long calc_physical_addr_size_bits( 34*91f16700Schasinglulu unsigned long long max_addr) 35*91f16700Schasinglulu { 36*91f16700Schasinglulu /* Physical address can't exceed 48 bits */ 37*91f16700Schasinglulu assert((max_addr & ADDR_MASK_48_TO_63) == 0U); 38*91f16700Schasinglulu 39*91f16700Schasinglulu /* 48 bits address */ 40*91f16700Schasinglulu if ((max_addr & ADDR_MASK_44_TO_47) != 0U) 41*91f16700Schasinglulu return TCR_PS_BITS_256TB; 42*91f16700Schasinglulu 43*91f16700Schasinglulu /* 44 bits address */ 44*91f16700Schasinglulu if ((max_addr & ADDR_MASK_42_TO_43) != 0U) 45*91f16700Schasinglulu return TCR_PS_BITS_16TB; 46*91f16700Schasinglulu 47*91f16700Schasinglulu /* 42 bits address */ 48*91f16700Schasinglulu if ((max_addr & ADDR_MASK_40_TO_41) != 0U) 49*91f16700Schasinglulu return TCR_PS_BITS_4TB; 50*91f16700Schasinglulu 51*91f16700Schasinglulu /* 40 bits address */ 52*91f16700Schasinglulu if ((max_addr & ADDR_MASK_36_TO_39) != 0U) 53*91f16700Schasinglulu return TCR_PS_BITS_1TB; 54*91f16700Schasinglulu 55*91f16700Schasinglulu /* 36 bits address */ 56*91f16700Schasinglulu if ((max_addr & ADDR_MASK_32_TO_35) != 0U) 57*91f16700Schasinglulu return TCR_PS_BITS_64GB; 58*91f16700Schasinglulu 59*91f16700Schasinglulu return TCR_PS_BITS_4GB; 60*91f16700Schasinglulu } 61*91f16700Schasinglulu 62*91f16700Schasinglulu #if ENABLE_ASSERTIONS 63*91f16700Schasinglulu /* 64*91f16700Schasinglulu * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is 65*91f16700Schasinglulu * supported in ARMv8.2 onwards. 66*91f16700Schasinglulu */ 67*91f16700Schasinglulu static const unsigned int pa_range_bits_arr[] = { 68*91f16700Schasinglulu PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, 69*91f16700Schasinglulu PARANGE_0101, PARANGE_0110 70*91f16700Schasinglulu }; 71*91f16700Schasinglulu 72*91f16700Schasinglulu static unsigned long long get_max_supported_pa(void) 73*91f16700Schasinglulu { 74*91f16700Schasinglulu u_register_t pa_range = read_id_aa64mmfr0_el1() & 75*91f16700Schasinglulu ID_AA64MMFR0_EL1_PARANGE_MASK; 76*91f16700Schasinglulu 77*91f16700Schasinglulu /* All other values are reserved */ 78*91f16700Schasinglulu assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); 79*91f16700Schasinglulu 80*91f16700Schasinglulu return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; 81*91f16700Schasinglulu } 82*91f16700Schasinglulu 83*91f16700Schasinglulu /* 84*91f16700Schasinglulu * Return minimum virtual address space size supported by the architecture 85*91f16700Schasinglulu */ 86*91f16700Schasinglulu static uintptr_t xlat_get_min_virt_addr_space_size(void) 87*91f16700Schasinglulu { 88*91f16700Schasinglulu uintptr_t ret; 89*91f16700Schasinglulu 90*91f16700Schasinglulu if (is_armv8_4_ttst_present()) 91*91f16700Schasinglulu ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; 92*91f16700Schasinglulu else 93*91f16700Schasinglulu ret = MIN_VIRT_ADDR_SPACE_SIZE; 94*91f16700Schasinglulu 95*91f16700Schasinglulu return ret; 96*91f16700Schasinglulu } 97*91f16700Schasinglulu #endif /* ENABLE_ASSERTIONS */ 98*91f16700Schasinglulu 99*91f16700Schasinglulu unsigned int xlat_arch_current_el(void) 100*91f16700Schasinglulu { 101*91f16700Schasinglulu unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); 102*91f16700Schasinglulu 103*91f16700Schasinglulu assert(el > 0U); 104*91f16700Schasinglulu 105*91f16700Schasinglulu return el; 106*91f16700Schasinglulu } 107*91f16700Schasinglulu 108*91f16700Schasinglulu uint64_t xlat_arch_get_xn_desc(unsigned int el) 109*91f16700Schasinglulu { 110*91f16700Schasinglulu if (el == 3U) { 111*91f16700Schasinglulu return UPPER_ATTRS(XN); 112*91f16700Schasinglulu } else { 113*91f16700Schasinglulu assert(el == 1U); 114*91f16700Schasinglulu return UPPER_ATTRS(PXN); 115*91f16700Schasinglulu } 116*91f16700Schasinglulu } 117*91f16700Schasinglulu 118*91f16700Schasinglulu void init_xlat_tables(void) 119*91f16700Schasinglulu { 120*91f16700Schasinglulu unsigned long long max_pa; 121*91f16700Schasinglulu uintptr_t max_va; 122*91f16700Schasinglulu 123*91f16700Schasinglulu assert(PLAT_VIRT_ADDR_SPACE_SIZE >= 124*91f16700Schasinglulu (xlat_get_min_virt_addr_space_size() - 1U)); 125*91f16700Schasinglulu assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE); 126*91f16700Schasinglulu assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE)); 127*91f16700Schasinglulu 128*91f16700Schasinglulu print_mmap(); 129*91f16700Schasinglulu init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE, 130*91f16700Schasinglulu &max_va, &max_pa); 131*91f16700Schasinglulu 132*91f16700Schasinglulu assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)); 133*91f16700Schasinglulu assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U)); 134*91f16700Schasinglulu assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa()); 135*91f16700Schasinglulu 136*91f16700Schasinglulu tcr_ps_bits = calc_physical_addr_size_bits(max_pa); 137*91f16700Schasinglulu } 138*91f16700Schasinglulu 139*91f16700Schasinglulu /******************************************************************************* 140*91f16700Schasinglulu * Macro generating the code for the function enabling the MMU in the given 141*91f16700Schasinglulu * exception level, assuming that the pagetables have already been created. 142*91f16700Schasinglulu * 143*91f16700Schasinglulu * _el: Exception level at which the function will run 144*91f16700Schasinglulu * _tcr_extra: Extra bits to set in the TCR register. This mask will 145*91f16700Schasinglulu * be OR'ed with the default TCR value. 146*91f16700Schasinglulu * _tlbi_fct: Function to invalidate the TLBs at the current 147*91f16700Schasinglulu * exception level 148*91f16700Schasinglulu ******************************************************************************/ 149*91f16700Schasinglulu #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \ 150*91f16700Schasinglulu void enable_mmu_el##_el(unsigned int flags) \ 151*91f16700Schasinglulu { \ 152*91f16700Schasinglulu uint64_t mair, tcr, ttbr; \ 153*91f16700Schasinglulu uint32_t sctlr; \ 154*91f16700Schasinglulu \ 155*91f16700Schasinglulu assert(IS_IN_EL(_el)); \ 156*91f16700Schasinglulu assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \ 157*91f16700Schasinglulu \ 158*91f16700Schasinglulu /* Set attributes in the right indices of the MAIR */ \ 159*91f16700Schasinglulu mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \ 160*91f16700Schasinglulu mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \ 161*91f16700Schasinglulu ATTR_IWBWA_OWBWA_NTR_INDEX); \ 162*91f16700Schasinglulu mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \ 163*91f16700Schasinglulu ATTR_NON_CACHEABLE_INDEX); \ 164*91f16700Schasinglulu write_mair_el##_el(mair); \ 165*91f16700Schasinglulu \ 166*91f16700Schasinglulu /* Invalidate TLBs at the current exception level */ \ 167*91f16700Schasinglulu _tlbi_fct(); \ 168*91f16700Schasinglulu \ 169*91f16700Schasinglulu /* Set TCR bits as well. */ \ 170*91f16700Schasinglulu /* Set T0SZ to (64 - width of virtual address space) */ \ 171*91f16700Schasinglulu int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\ 172*91f16700Schasinglulu \ 173*91f16700Schasinglulu if ((flags & XLAT_TABLE_NC) != 0U) { \ 174*91f16700Schasinglulu /* Inner & outer non-cacheable non-shareable. */\ 175*91f16700Schasinglulu tcr = TCR_SH_NON_SHAREABLE | \ 176*91f16700Schasinglulu TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ 177*91f16700Schasinglulu ((uint64_t)t0sz << TCR_T0SZ_SHIFT); \ 178*91f16700Schasinglulu } else { \ 179*91f16700Schasinglulu /* Inner & outer WBWA & shareable. */ \ 180*91f16700Schasinglulu tcr = TCR_SH_INNER_SHAREABLE | \ 181*91f16700Schasinglulu TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ 182*91f16700Schasinglulu ((uint64_t)t0sz << TCR_T0SZ_SHIFT); \ 183*91f16700Schasinglulu } \ 184*91f16700Schasinglulu tcr |= _tcr_extra; \ 185*91f16700Schasinglulu write_tcr_el##_el(tcr); \ 186*91f16700Schasinglulu \ 187*91f16700Schasinglulu /* Set TTBR bits as well */ \ 188*91f16700Schasinglulu ttbr = (uint64_t) base_xlation_table; \ 189*91f16700Schasinglulu write_ttbr0_el##_el(ttbr); \ 190*91f16700Schasinglulu \ 191*91f16700Schasinglulu /* Ensure all translation table writes have drained */ \ 192*91f16700Schasinglulu /* into memory, the TLB invalidation is complete, */ \ 193*91f16700Schasinglulu /* and translation register writes are committed */ \ 194*91f16700Schasinglulu /* before enabling the MMU */ \ 195*91f16700Schasinglulu dsbish(); \ 196*91f16700Schasinglulu isb(); \ 197*91f16700Schasinglulu \ 198*91f16700Schasinglulu sctlr = read_sctlr_el##_el(); \ 199*91f16700Schasinglulu sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \ 200*91f16700Schasinglulu \ 201*91f16700Schasinglulu if ((flags & DISABLE_DCACHE) != 0U) \ 202*91f16700Schasinglulu sctlr &= ~SCTLR_C_BIT; \ 203*91f16700Schasinglulu else \ 204*91f16700Schasinglulu sctlr |= SCTLR_C_BIT; \ 205*91f16700Schasinglulu \ 206*91f16700Schasinglulu write_sctlr_el##_el(sctlr); \ 207*91f16700Schasinglulu \ 208*91f16700Schasinglulu /* Ensure the MMU enable takes effect immediately */ \ 209*91f16700Schasinglulu isb(); \ 210*91f16700Schasinglulu } \ 211*91f16700Schasinglulu \ 212*91f16700Schasinglulu void enable_mmu_direct_el##_el(unsigned int flags) \ 213*91f16700Schasinglulu { \ 214*91f16700Schasinglulu enable_mmu_el##_el(flags); \ 215*91f16700Schasinglulu } 216*91f16700Schasinglulu 217*91f16700Schasinglulu /* Define EL1 and EL3 variants of the function enabling the MMU */ 218*91f16700Schasinglulu DEFINE_ENABLE_MMU_EL(1, 219*91f16700Schasinglulu /* 220*91f16700Schasinglulu * TCR_EL1.EPD1: Disable translation table walk for addresses 221*91f16700Schasinglulu * that are translated using TTBR1_EL1. 222*91f16700Schasinglulu */ 223*91f16700Schasinglulu TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT), 224*91f16700Schasinglulu tlbivmalle1) 225*91f16700Schasinglulu DEFINE_ENABLE_MMU_EL(3, 226*91f16700Schasinglulu TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT), 227*91f16700Schasinglulu tlbialle3) 228