1*91f16700Schasinglulu/* 2*91f16700Schasinglulu * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu#ifndef EL3_COMMON_MACROS_S 8*91f16700Schasinglulu#define EL3_COMMON_MACROS_S 9*91f16700Schasinglulu 10*91f16700Schasinglulu#include <arch.h> 11*91f16700Schasinglulu#include <asm_macros.S> 12*91f16700Schasinglulu#include <assert_macros.S> 13*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h> 14*91f16700Schasinglulu 15*91f16700Schasinglulu#define PAGE_START_MASK ~(PAGE_SIZE_MASK) 16*91f16700Schasinglulu 17*91f16700Schasinglulu /* 18*91f16700Schasinglulu * Helper macro to initialise EL3 registers we care about. 19*91f16700Schasinglulu */ 20*91f16700Schasinglulu .macro el3_arch_init_common 21*91f16700Schasinglulu /* --------------------------------------------------------------------- 22*91f16700Schasinglulu * SCTLR has already been initialised - read current value before 23*91f16700Schasinglulu * modifying. 24*91f16700Schasinglulu * 25*91f16700Schasinglulu * SCTLR.I: Enable the instruction cache. 26*91f16700Schasinglulu * 27*91f16700Schasinglulu * SCTLR.A: Enable Alignment fault checking. All instructions that load 28*91f16700Schasinglulu * or store one or more registers have an alignment check that the 29*91f16700Schasinglulu * address being accessed is aligned to the size of the data element(s) 30*91f16700Schasinglulu * being accessed. 31*91f16700Schasinglulu * --------------------------------------------------------------------- 32*91f16700Schasinglulu */ 33*91f16700Schasinglulu ldr r1, =(SCTLR_I_BIT | SCTLR_A_BIT) 34*91f16700Schasinglulu ldcopr r0, SCTLR 35*91f16700Schasinglulu orr r0, r0, r1 36*91f16700Schasinglulu stcopr r0, SCTLR 37*91f16700Schasinglulu isb 38*91f16700Schasinglulu 39*91f16700Schasinglulu /* --------------------------------------------------------------------- 40*91f16700Schasinglulu * Initialise SCR, setting all fields rather than relying on the hw. 41*91f16700Schasinglulu * 42*91f16700Schasinglulu * SCR.SIF: Enabled so that Secure state instruction fetches from 43*91f16700Schasinglulu * Non-secure memory are not permitted. 44*91f16700Schasinglulu * --------------------------------------------------------------------- 45*91f16700Schasinglulu */ 46*91f16700Schasinglulu ldr r0, =(SCR_RESET_VAL | SCR_SIF_BIT) 47*91f16700Schasinglulu stcopr r0, SCR 48*91f16700Schasinglulu 49*91f16700Schasinglulu /* ----------------------------------------------------- 50*91f16700Schasinglulu * Enable the Asynchronous data abort now that the 51*91f16700Schasinglulu * exception vectors have been setup. 52*91f16700Schasinglulu * ----------------------------------------------------- 53*91f16700Schasinglulu */ 54*91f16700Schasinglulu cpsie a 55*91f16700Schasinglulu isb 56*91f16700Schasinglulu 57*91f16700Schasinglulu /* --------------------------------------------------------------------- 58*91f16700Schasinglulu * Initialise NSACR, setting all the fields, except for the 59*91f16700Schasinglulu * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some 60*91f16700Schasinglulu * fields are architecturally UNKNOWN on reset. 61*91f16700Schasinglulu * 62*91f16700Schasinglulu * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The 63*91f16700Schasinglulu * cp11 field is ignored, but is set to same value as cp10. The cp10 64*91f16700Schasinglulu * field is set to allow access to Advanced SIMD and floating point 65*91f16700Schasinglulu * features from both Security states. 66*91f16700Schasinglulu * 67*91f16700Schasinglulu * NSACR.NSTRCDIS: When system register trace implemented, Set to one 68*91f16700Schasinglulu * so that NS System register accesses to all implemented trace 69*91f16700Schasinglulu * registers are disabled. 70*91f16700Schasinglulu * When system register trace is not implemented, this bit is RES0 and 71*91f16700Schasinglulu * hence set to zero. 72*91f16700Schasinglulu * --------------------------------------------------------------------- 73*91f16700Schasinglulu */ 74*91f16700Schasinglulu ldcopr r0, NSACR 75*91f16700Schasinglulu and r0, r0, #NSACR_IMP_DEF_MASK 76*91f16700Schasinglulu orr r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS) 77*91f16700Schasinglulu ldcopr r1, ID_DFR0 78*91f16700Schasinglulu ubfx r1, r1, #ID_DFR0_COPTRC_SHIFT, #ID_DFR0_COPTRC_LENGTH 79*91f16700Schasinglulu cmp r1, #ID_DFR0_COPTRC_SUPPORTED 80*91f16700Schasinglulu bne 1f 81*91f16700Schasinglulu orr r0, r0, #NSTRCDIS_BIT 82*91f16700Schasinglulu1: 83*91f16700Schasinglulu stcopr r0, NSACR 84*91f16700Schasinglulu isb 85*91f16700Schasinglulu 86*91f16700Schasinglulu /* --------------------------------------------------------------------- 87*91f16700Schasinglulu * Initialise CPACR, setting all fields rather than relying on hw. Some 88*91f16700Schasinglulu * fields are architecturally UNKNOWN on reset. 89*91f16700Schasinglulu * 90*91f16700Schasinglulu * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses 91*91f16700Schasinglulu * to trace registers. Set to zero to allow access. 92*91f16700Schasinglulu * 93*91f16700Schasinglulu * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The 94*91f16700Schasinglulu * cp11 field is ignored, but is set to same value as cp10. The cp10 95*91f16700Schasinglulu * field is set to allow full access from PL0 and PL1 to floating-point 96*91f16700Schasinglulu * and Advanced SIMD features. 97*91f16700Schasinglulu * --------------------------------------------------------------------- 98*91f16700Schasinglulu */ 99*91f16700Schasinglulu ldr r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT)) 100*91f16700Schasinglulu stcopr r0, CPACR 101*91f16700Schasinglulu isb 102*91f16700Schasinglulu 103*91f16700Schasinglulu /* --------------------------------------------------------------------- 104*91f16700Schasinglulu * Initialise FPEXC, setting all fields rather than relying on hw. Some 105*91f16700Schasinglulu * fields are architecturally UNKNOWN on reset and are set to zero 106*91f16700Schasinglulu * except for field(s) listed below. 107*91f16700Schasinglulu * 108*91f16700Schasinglulu * FPEXC.EN: Enable access to Advanced SIMD and floating point features 109*91f16700Schasinglulu * from all exception levels. 110*91f16700Schasinglulu * 111*91f16700Schasinglulu * __SOFTFP__: Predefined macro exposed by soft-float toolchain. 112*91f16700Schasinglulu * ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and 113*91f16700Schasinglulu * hard-float variants of toolchain, avoid compiling below code with 114*91f16700Schasinglulu * soft-float toolchain as "vmsr" instruction will not be recognized. 115*91f16700Schasinglulu * --------------------------------------------------------------------- 116*91f16700Schasinglulu */ 117*91f16700Schasinglulu#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__) 118*91f16700Schasinglulu ldr r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT) 119*91f16700Schasinglulu vmsr FPEXC, r0 120*91f16700Schasinglulu isb 121*91f16700Schasinglulu#endif 122*91f16700Schasinglulu 123*91f16700Schasinglulu#if (ARM_ARCH_MAJOR > 7) 124*91f16700Schasinglulu /* --------------------------------------------------------------------- 125*91f16700Schasinglulu * Initialise SDCR, setting all the fields rather than relying on hw. 126*91f16700Schasinglulu * 127*91f16700Schasinglulu * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from 128*91f16700Schasinglulu * Secure EL1 are disabled. 129*91f16700Schasinglulu * 130*91f16700Schasinglulu * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited 131*91f16700Schasinglulu * in Secure state. This bit is RES0 in versions of the architecture 132*91f16700Schasinglulu * earlier than ARMv8.5, setting it to 1 doesn't have any effect on 133*91f16700Schasinglulu * them. 134*91f16700Schasinglulu * 135*91f16700Schasinglulu * SDCR.TTRF: Set to one so that access to trace filter control 136*91f16700Schasinglulu * registers in non-monitor mode generate Monitor trap exception, 137*91f16700Schasinglulu * unless the access generates a higher priority exception when 138*91f16700Schasinglulu * trace filter control(FEAT_TRF) is implemented. 139*91f16700Schasinglulu * When FEAT_TRF is not implemented, this bit is RES0. 140*91f16700Schasinglulu * --------------------------------------------------------------------- 141*91f16700Schasinglulu */ 142*91f16700Schasinglulu ldr r0, =((SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | \ 143*91f16700Schasinglulu SDCR_SCCD_BIT) & ~SDCR_TTRF_BIT) 144*91f16700Schasinglulu ldcopr r1, ID_DFR0 145*91f16700Schasinglulu ubfx r1, r1, #ID_DFR0_TRACEFILT_SHIFT, #ID_DFR0_TRACEFILT_LENGTH 146*91f16700Schasinglulu cmp r1, #ID_DFR0_TRACEFILT_SUPPORTED 147*91f16700Schasinglulu bne 1f 148*91f16700Schasinglulu orr r0, r0, #SDCR_TTRF_BIT 149*91f16700Schasinglulu1: 150*91f16700Schasinglulu stcopr r0, SDCR 151*91f16700Schasinglulu 152*91f16700Schasinglulu /* --------------------------------------------------------------------- 153*91f16700Schasinglulu * Initialise PMCR, setting all fields rather than relying 154*91f16700Schasinglulu * on hw. Some fields are architecturally UNKNOWN on reset. 155*91f16700Schasinglulu * 156*91f16700Schasinglulu * PMCR.LP: Set to one so that event counter overflow, that 157*91f16700Schasinglulu * is recorded in PMOVSCLR[0-30], occurs on the increment 158*91f16700Schasinglulu * that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU 159*91f16700Schasinglulu * is implemented. This bit is RES0 in versions of the architecture 160*91f16700Schasinglulu * earlier than ARMv8.5, setting it to 1 doesn't have any effect 161*91f16700Schasinglulu * on them. 162*91f16700Schasinglulu * This bit is Reserved, UNK/SBZP in ARMv7. 163*91f16700Schasinglulu * 164*91f16700Schasinglulu * PMCR.LC: Set to one so that cycle counter overflow, that 165*91f16700Schasinglulu * is recorded in PMOVSCLR[31], occurs on the increment 166*91f16700Schasinglulu * that changes PMCCNTR[63] from 1 to 0. 167*91f16700Schasinglulu * This bit is Reserved, UNK/SBZP in ARMv7. 168*91f16700Schasinglulu * 169*91f16700Schasinglulu * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode. 170*91f16700Schasinglulu * --------------------------------------------------------------------- 171*91f16700Schasinglulu */ 172*91f16700Schasinglulu ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \ 173*91f16700Schasinglulu PMCR_LP_BIT) 174*91f16700Schasinglulu#else 175*91f16700Schasinglulu ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT) 176*91f16700Schasinglulu#endif 177*91f16700Schasinglulu stcopr r0, PMCR 178*91f16700Schasinglulu 179*91f16700Schasinglulu /* 180*91f16700Schasinglulu * If Data Independent Timing (DIT) functionality is implemented, 181*91f16700Schasinglulu * always enable DIT in EL3 182*91f16700Schasinglulu */ 183*91f16700Schasinglulu ldcopr r0, ID_PFR0 184*91f16700Schasinglulu and r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT) 185*91f16700Schasinglulu cmp r0, #ID_PFR0_DIT_SUPPORTED 186*91f16700Schasinglulu bne 1f 187*91f16700Schasinglulu mrs r0, cpsr 188*91f16700Schasinglulu orr r0, r0, #CPSR_DIT_BIT 189*91f16700Schasinglulu msr cpsr_cxsf, r0 190*91f16700Schasinglulu1: 191*91f16700Schasinglulu .endm 192*91f16700Schasinglulu 193*91f16700Schasinglulu/* ----------------------------------------------------------------------------- 194*91f16700Schasinglulu * This is the super set of actions that need to be performed during a cold boot 195*91f16700Schasinglulu * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN). 196*91f16700Schasinglulu * 197*91f16700Schasinglulu * This macro will always perform reset handling, architectural initialisations 198*91f16700Schasinglulu * and stack setup. The rest of the actions are optional because they might not 199*91f16700Schasinglulu * be needed, depending on the context in which this macro is called. This is 200*91f16700Schasinglulu * why this macro is parameterised ; each parameter allows to enable/disable 201*91f16700Schasinglulu * some actions. 202*91f16700Schasinglulu * 203*91f16700Schasinglulu * _init_sctlr: 204*91f16700Schasinglulu * Whether the macro needs to initialise the SCTLR register including 205*91f16700Schasinglulu * configuring the endianness of data accesses. 206*91f16700Schasinglulu * 207*91f16700Schasinglulu * _warm_boot_mailbox: 208*91f16700Schasinglulu * Whether the macro needs to detect the type of boot (cold/warm). The 209*91f16700Schasinglulu * detection is based on the platform entrypoint address : if it is zero 210*91f16700Schasinglulu * then it is a cold boot, otherwise it is a warm boot. In the latter case, 211*91f16700Schasinglulu * this macro jumps on the platform entrypoint address. 212*91f16700Schasinglulu * 213*91f16700Schasinglulu * _secondary_cold_boot: 214*91f16700Schasinglulu * Whether the macro needs to identify the CPU that is calling it: primary 215*91f16700Schasinglulu * CPU or secondary CPU. The primary CPU will be allowed to carry on with 216*91f16700Schasinglulu * the platform initialisations, while the secondaries will be put in a 217*91f16700Schasinglulu * platform-specific state in the meantime. 218*91f16700Schasinglulu * 219*91f16700Schasinglulu * If the caller knows this macro will only be called by the primary CPU 220*91f16700Schasinglulu * then this parameter can be defined to 0 to skip this step. 221*91f16700Schasinglulu * 222*91f16700Schasinglulu * _init_memory: 223*91f16700Schasinglulu * Whether the macro needs to initialise the memory. 224*91f16700Schasinglulu * 225*91f16700Schasinglulu * _init_c_runtime: 226*91f16700Schasinglulu * Whether the macro needs to initialise the C runtime environment. 227*91f16700Schasinglulu * 228*91f16700Schasinglulu * _exception_vectors: 229*91f16700Schasinglulu * Address of the exception vectors to program in the VBAR_EL3 register. 230*91f16700Schasinglulu * 231*91f16700Schasinglulu * _pie_fixup_size: 232*91f16700Schasinglulu * Size of memory region to fixup Global Descriptor Table (GDT). 233*91f16700Schasinglulu * 234*91f16700Schasinglulu * A non-zero value is expected when firmware needs GDT to be fixed-up. 235*91f16700Schasinglulu * 236*91f16700Schasinglulu * ----------------------------------------------------------------------------- 237*91f16700Schasinglulu */ 238*91f16700Schasinglulu .macro el3_entrypoint_common \ 239*91f16700Schasinglulu _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \ 240*91f16700Schasinglulu _init_memory, _init_c_runtime, _exception_vectors, \ 241*91f16700Schasinglulu _pie_fixup_size 242*91f16700Schasinglulu 243*91f16700Schasinglulu /* Make sure we are in Secure Mode */ 244*91f16700Schasinglulu#if ENABLE_ASSERTIONS 245*91f16700Schasinglulu ldcopr r0, SCR 246*91f16700Schasinglulu tst r0, #SCR_NS_BIT 247*91f16700Schasinglulu ASM_ASSERT(eq) 248*91f16700Schasinglulu#endif 249*91f16700Schasinglulu 250*91f16700Schasinglulu .if \_init_sctlr 251*91f16700Schasinglulu /* ------------------------------------------------------------- 252*91f16700Schasinglulu * This is the initialisation of SCTLR and so must ensure that 253*91f16700Schasinglulu * all fields are explicitly set rather than relying on hw. Some 254*91f16700Schasinglulu * fields reset to an IMPLEMENTATION DEFINED value. 255*91f16700Schasinglulu * 256*91f16700Schasinglulu * SCTLR.TE: Set to zero so that exceptions to an Exception 257*91f16700Schasinglulu * Level executing at PL1 are taken to A32 state. 258*91f16700Schasinglulu * 259*91f16700Schasinglulu * SCTLR.EE: Set the CPU endianness before doing anything that 260*91f16700Schasinglulu * might involve memory reads or writes. Set to zero to select 261*91f16700Schasinglulu * Little Endian. 262*91f16700Schasinglulu * 263*91f16700Schasinglulu * SCTLR.V: Set to zero to select the normal exception vectors 264*91f16700Schasinglulu * with base address held in VBAR. 265*91f16700Schasinglulu * 266*91f16700Schasinglulu * SCTLR.DSSBS: Set to zero to disable speculation store bypass 267*91f16700Schasinglulu * safe behaviour upon exception entry to EL3. 268*91f16700Schasinglulu * ------------------------------------------------------------- 269*91f16700Schasinglulu */ 270*91f16700Schasinglulu ldr r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \ 271*91f16700Schasinglulu SCTLR_V_BIT | SCTLR_DSSBS_BIT)) 272*91f16700Schasinglulu stcopr r0, SCTLR 273*91f16700Schasinglulu isb 274*91f16700Schasinglulu .endif /* _init_sctlr */ 275*91f16700Schasinglulu 276*91f16700Schasinglulu /* Switch to monitor mode */ 277*91f16700Schasinglulu cps #MODE32_mon 278*91f16700Schasinglulu isb 279*91f16700Schasinglulu 280*91f16700Schasinglulu .if \_warm_boot_mailbox 281*91f16700Schasinglulu /* ------------------------------------------------------------- 282*91f16700Schasinglulu * This code will be executed for both warm and cold resets. 283*91f16700Schasinglulu * Now is the time to distinguish between the two. 284*91f16700Schasinglulu * Query the platform entrypoint address and if it is not zero 285*91f16700Schasinglulu * then it means it is a warm boot so jump to this address. 286*91f16700Schasinglulu * ------------------------------------------------------------- 287*91f16700Schasinglulu */ 288*91f16700Schasinglulu bl plat_get_my_entrypoint 289*91f16700Schasinglulu cmp r0, #0 290*91f16700Schasinglulu bxne r0 291*91f16700Schasinglulu .endif /* _warm_boot_mailbox */ 292*91f16700Schasinglulu 293*91f16700Schasinglulu .if \_pie_fixup_size 294*91f16700Schasinglulu#if ENABLE_PIE 295*91f16700Schasinglulu /* 296*91f16700Schasinglulu * ------------------------------------------------------------ 297*91f16700Schasinglulu * If PIE is enabled fixup the Global descriptor Table only 298*91f16700Schasinglulu * once during primary core cold boot path. 299*91f16700Schasinglulu * 300*91f16700Schasinglulu * Compile time base address, required for fixup, is calculated 301*91f16700Schasinglulu * using "pie_fixup" label present within first page. 302*91f16700Schasinglulu * ------------------------------------------------------------ 303*91f16700Schasinglulu */ 304*91f16700Schasinglulu pie_fixup: 305*91f16700Schasinglulu ldr r0, =pie_fixup 306*91f16700Schasinglulu ldr r1, =PAGE_START_MASK 307*91f16700Schasinglulu and r0, r0, r1 308*91f16700Schasinglulu mov_imm r1, \_pie_fixup_size 309*91f16700Schasinglulu add r1, r1, r0 310*91f16700Schasinglulu bl fixup_gdt_reloc 311*91f16700Schasinglulu#endif /* ENABLE_PIE */ 312*91f16700Schasinglulu .endif /* _pie_fixup_size */ 313*91f16700Schasinglulu 314*91f16700Schasinglulu /* --------------------------------------------------------------------- 315*91f16700Schasinglulu * Set the exception vectors (VBAR/MVBAR). 316*91f16700Schasinglulu * --------------------------------------------------------------------- 317*91f16700Schasinglulu */ 318*91f16700Schasinglulu ldr r0, =\_exception_vectors 319*91f16700Schasinglulu stcopr r0, VBAR 320*91f16700Schasinglulu stcopr r0, MVBAR 321*91f16700Schasinglulu isb 322*91f16700Schasinglulu 323*91f16700Schasinglulu /* --------------------------------------------------------------------- 324*91f16700Schasinglulu * It is a cold boot. 325*91f16700Schasinglulu * Perform any processor specific actions upon reset e.g. cache, TLB 326*91f16700Schasinglulu * invalidations etc. 327*91f16700Schasinglulu * --------------------------------------------------------------------- 328*91f16700Schasinglulu */ 329*91f16700Schasinglulu bl reset_handler 330*91f16700Schasinglulu 331*91f16700Schasinglulu el3_arch_init_common 332*91f16700Schasinglulu 333*91f16700Schasinglulu .if \_secondary_cold_boot 334*91f16700Schasinglulu /* ------------------------------------------------------------- 335*91f16700Schasinglulu * Check if this is a primary or secondary CPU cold boot. 336*91f16700Schasinglulu * The primary CPU will set up the platform while the 337*91f16700Schasinglulu * secondaries are placed in a platform-specific state until the 338*91f16700Schasinglulu * primary CPU performs the necessary actions to bring them out 339*91f16700Schasinglulu * of that state and allows entry into the OS. 340*91f16700Schasinglulu * ------------------------------------------------------------- 341*91f16700Schasinglulu */ 342*91f16700Schasinglulu bl plat_is_my_cpu_primary 343*91f16700Schasinglulu cmp r0, #0 344*91f16700Schasinglulu bne do_primary_cold_boot 345*91f16700Schasinglulu 346*91f16700Schasinglulu /* This is a cold boot on a secondary CPU */ 347*91f16700Schasinglulu bl plat_secondary_cold_boot_setup 348*91f16700Schasinglulu /* plat_secondary_cold_boot_setup() is not supposed to return */ 349*91f16700Schasinglulu no_ret plat_panic_handler 350*91f16700Schasinglulu 351*91f16700Schasinglulu do_primary_cold_boot: 352*91f16700Schasinglulu .endif /* _secondary_cold_boot */ 353*91f16700Schasinglulu 354*91f16700Schasinglulu /* --------------------------------------------------------------------- 355*91f16700Schasinglulu * Initialize memory now. Secondary CPU initialization won't get to this 356*91f16700Schasinglulu * point. 357*91f16700Schasinglulu * --------------------------------------------------------------------- 358*91f16700Schasinglulu */ 359*91f16700Schasinglulu 360*91f16700Schasinglulu .if \_init_memory 361*91f16700Schasinglulu bl platform_mem_init 362*91f16700Schasinglulu .endif /* _init_memory */ 363*91f16700Schasinglulu 364*91f16700Schasinglulu /* --------------------------------------------------------------------- 365*91f16700Schasinglulu * Init C runtime environment: 366*91f16700Schasinglulu * - Zero-initialise the NOBITS sections. There are 2 of them: 367*91f16700Schasinglulu * - the .bss section; 368*91f16700Schasinglulu * - the coherent memory section (if any). 369*91f16700Schasinglulu * - Relocate the data section from ROM to RAM, if required. 370*91f16700Schasinglulu * --------------------------------------------------------------------- 371*91f16700Schasinglulu */ 372*91f16700Schasinglulu .if \_init_c_runtime 373*91f16700Schasinglulu#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && RESET_TO_BL2) 374*91f16700Schasinglulu /* ----------------------------------------------------------------- 375*91f16700Schasinglulu * Invalidate the RW memory used by the image. This 376*91f16700Schasinglulu * includes the data and NOBITS sections. This is done to 377*91f16700Schasinglulu * safeguard against possible corruption of this memory by 378*91f16700Schasinglulu * dirty cache lines in a system cache as a result of use by 379*91f16700Schasinglulu * an earlier boot loader stage. If PIE is enabled however, 380*91f16700Schasinglulu * RO sections including the GOT may be modified during 381*91f16700Schasinglulu * pie fixup. Therefore, to be on the safe side, invalidate 382*91f16700Schasinglulu * the entire image region if PIE is enabled. 383*91f16700Schasinglulu * ----------------------------------------------------------------- 384*91f16700Schasinglulu */ 385*91f16700Schasinglulu#if ENABLE_PIE 386*91f16700Schasinglulu#if SEPARATE_CODE_AND_RODATA 387*91f16700Schasinglulu ldr r0, =__TEXT_START__ 388*91f16700Schasinglulu#else 389*91f16700Schasinglulu ldr r0, =__RO_START__ 390*91f16700Schasinglulu#endif /* SEPARATE_CODE_AND_RODATA */ 391*91f16700Schasinglulu#else 392*91f16700Schasinglulu ldr r0, =__RW_START__ 393*91f16700Schasinglulu#endif /* ENABLE_PIE */ 394*91f16700Schasinglulu ldr r1, =__RW_END__ 395*91f16700Schasinglulu sub r1, r1, r0 396*91f16700Schasinglulu bl inv_dcache_range 397*91f16700Schasinglulu#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION 398*91f16700Schasinglulu ldr r0, =__BL2_NOLOAD_START__ 399*91f16700Schasinglulu ldr r1, =__BL2_NOLOAD_END__ 400*91f16700Schasinglulu sub r1, r1, r0 401*91f16700Schasinglulu bl inv_dcache_range 402*91f16700Schasinglulu#endif 403*91f16700Schasinglulu#endif 404*91f16700Schasinglulu 405*91f16700Schasinglulu /* 406*91f16700Schasinglulu * zeromem uses r12 whereas it is used to save previous BL arg3, 407*91f16700Schasinglulu * save it in r7 408*91f16700Schasinglulu */ 409*91f16700Schasinglulu mov r7, r12 410*91f16700Schasinglulu ldr r0, =__BSS_START__ 411*91f16700Schasinglulu ldr r1, =__BSS_END__ 412*91f16700Schasinglulu sub r1, r1, r0 413*91f16700Schasinglulu bl zeromem 414*91f16700Schasinglulu 415*91f16700Schasinglulu#if USE_COHERENT_MEM 416*91f16700Schasinglulu ldr r0, =__COHERENT_RAM_START__ 417*91f16700Schasinglulu ldr r1, =__COHERENT_RAM_END_UNALIGNED__ 418*91f16700Schasinglulu sub r1, r1, r0 419*91f16700Schasinglulu bl zeromem 420*91f16700Schasinglulu#endif 421*91f16700Schasinglulu 422*91f16700Schasinglulu /* Restore r12 */ 423*91f16700Schasinglulu mov r12, r7 424*91f16700Schasinglulu 425*91f16700Schasinglulu#if defined(IMAGE_BL1) || \ 426*91f16700Schasinglulu (defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM) 427*91f16700Schasinglulu /* ----------------------------------------------------- 428*91f16700Schasinglulu * Copy data from ROM to RAM. 429*91f16700Schasinglulu * ----------------------------------------------------- 430*91f16700Schasinglulu */ 431*91f16700Schasinglulu ldr r0, =__DATA_RAM_START__ 432*91f16700Schasinglulu ldr r1, =__DATA_ROM_START__ 433*91f16700Schasinglulu ldr r2, =__DATA_RAM_END__ 434*91f16700Schasinglulu sub r2, r2, r0 435*91f16700Schasinglulu bl memcpy4 436*91f16700Schasinglulu#endif 437*91f16700Schasinglulu .endif /* _init_c_runtime */ 438*91f16700Schasinglulu 439*91f16700Schasinglulu /* --------------------------------------------------------------------- 440*91f16700Schasinglulu * Allocate a stack whose memory will be marked as Normal-IS-WBWA when 441*91f16700Schasinglulu * the MMU is enabled. There is no risk of reading stale stack memory 442*91f16700Schasinglulu * after enabling the MMU as only the primary CPU is running at the 443*91f16700Schasinglulu * moment. 444*91f16700Schasinglulu * --------------------------------------------------------------------- 445*91f16700Schasinglulu */ 446*91f16700Schasinglulu bl plat_set_my_stack 447*91f16700Schasinglulu 448*91f16700Schasinglulu#if STACK_PROTECTOR_ENABLED 449*91f16700Schasinglulu .if \_init_c_runtime 450*91f16700Schasinglulu bl update_stack_protector_canary 451*91f16700Schasinglulu .endif /* _init_c_runtime */ 452*91f16700Schasinglulu#endif 453*91f16700Schasinglulu .endm 454*91f16700Schasinglulu 455*91f16700Schasinglulu#endif /* EL3_COMMON_MACROS_S */ 456