1*91f16700Schasinglulu/* 2*91f16700Schasinglulu * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu#include <common/bl_common.ld.h> 8*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h> 9*91f16700Schasinglulu 10*91f16700SchasingluluOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 11*91f16700SchasingluluOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 12*91f16700SchasingluluENTRY(bl2_entrypoint) 13*91f16700Schasinglulu 14*91f16700SchasingluluMEMORY { 15*91f16700Schasinglulu RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE 16*91f16700Schasinglulu} 17*91f16700Schasinglulu 18*91f16700SchasingluluSECTIONS { 19*91f16700Schasinglulu RAM_REGION_START = ORIGIN(RAM); 20*91f16700Schasinglulu RAM_REGION_LENGTH = LENGTH(RAM); 21*91f16700Schasinglulu . = BL2_BASE; 22*91f16700Schasinglulu 23*91f16700Schasinglulu ASSERT(. == ALIGN(PAGE_SIZE), 24*91f16700Schasinglulu "BL2_BASE address is not aligned on a page boundary.") 25*91f16700Schasinglulu 26*91f16700Schasinglulu#if SEPARATE_CODE_AND_RODATA 27*91f16700Schasinglulu .text . : { 28*91f16700Schasinglulu __TEXT_START__ = .; 29*91f16700Schasinglulu 30*91f16700Schasinglulu#if ENABLE_RME 31*91f16700Schasinglulu *bl2_rme_entrypoint.o(.text*) 32*91f16700Schasinglulu#else /* ENABLE_RME */ 33*91f16700Schasinglulu *bl2_entrypoint.o(.text*) 34*91f16700Schasinglulu#endif /* ENABLE_RME */ 35*91f16700Schasinglulu 36*91f16700Schasinglulu *(SORT_BY_ALIGNMENT(.text*)) 37*91f16700Schasinglulu *(.vectors) 38*91f16700Schasinglulu __TEXT_END_UNALIGNED__ = .; 39*91f16700Schasinglulu 40*91f16700Schasinglulu . = ALIGN(PAGE_SIZE); 41*91f16700Schasinglulu 42*91f16700Schasinglulu __TEXT_END__ = .; 43*91f16700Schasinglulu } >RAM 44*91f16700Schasinglulu 45*91f16700Schasinglulu /* .ARM.extab and .ARM.exidx are only added because Clang needs them */ 46*91f16700Schasinglulu .ARM.extab . : { 47*91f16700Schasinglulu *(.ARM.extab* .gnu.linkonce.armextab.*) 48*91f16700Schasinglulu } >RAM 49*91f16700Schasinglulu 50*91f16700Schasinglulu .ARM.exidx . : { 51*91f16700Schasinglulu *(.ARM.exidx* .gnu.linkonce.armexidx.*) 52*91f16700Schasinglulu } >RAM 53*91f16700Schasinglulu 54*91f16700Schasinglulu .rodata . : { 55*91f16700Schasinglulu __RODATA_START__ = .; 56*91f16700Schasinglulu 57*91f16700Schasinglulu *(SORT_BY_ALIGNMENT(.rodata*)) 58*91f16700Schasinglulu 59*91f16700Schasinglulu RODATA_COMMON 60*91f16700Schasinglulu 61*91f16700Schasinglulu __RODATA_END_UNALIGNED__ = .; 62*91f16700Schasinglulu . = ALIGN(PAGE_SIZE); 63*91f16700Schasinglulu 64*91f16700Schasinglulu __RODATA_END__ = .; 65*91f16700Schasinglulu } >RAM 66*91f16700Schasinglulu#else /* SEPARATE_CODE_AND_RODATA */ 67*91f16700Schasinglulu .ro . : { 68*91f16700Schasinglulu __RO_START__ = .; 69*91f16700Schasinglulu 70*91f16700Schasinglulu *bl2_entrypoint.o(.text*) 71*91f16700Schasinglulu *(SORT_BY_ALIGNMENT(.text*)) 72*91f16700Schasinglulu *(SORT_BY_ALIGNMENT(.rodata*)) 73*91f16700Schasinglulu 74*91f16700Schasinglulu RODATA_COMMON 75*91f16700Schasinglulu 76*91f16700Schasinglulu *(.vectors) 77*91f16700Schasinglulu 78*91f16700Schasinglulu __RO_END_UNALIGNED__ = .; 79*91f16700Schasinglulu 80*91f16700Schasinglulu /* 81*91f16700Schasinglulu * Memory page(s) mapped to this section will be marked as read-only, 82*91f16700Schasinglulu * executable. No RW data from the next section must creep in. Ensure 83*91f16700Schasinglulu * that the rest of the current memory page is unused. 84*91f16700Schasinglulu */ 85*91f16700Schasinglulu . = ALIGN(PAGE_SIZE); 86*91f16700Schasinglulu 87*91f16700Schasinglulu __RO_END__ = .; 88*91f16700Schasinglulu } >RAM 89*91f16700Schasinglulu#endif /* SEPARATE_CODE_AND_RODATA */ 90*91f16700Schasinglulu 91*91f16700Schasinglulu __RW_START__ = .; 92*91f16700Schasinglulu 93*91f16700Schasinglulu DATA_SECTION >RAM 94*91f16700Schasinglulu STACK_SECTION >RAM 95*91f16700Schasinglulu BSS_SECTION >RAM 96*91f16700Schasinglulu XLAT_TABLE_SECTION >RAM 97*91f16700Schasinglulu 98*91f16700Schasinglulu#if USE_COHERENT_MEM 99*91f16700Schasinglulu /* 100*91f16700Schasinglulu * The base address of the coherent memory section must be page-aligned to 101*91f16700Schasinglulu * guarantee that the coherent data are stored on their own pages and are 102*91f16700Schasinglulu * not mixed with normal data. This is required to set up the correct 103*91f16700Schasinglulu * memory attributes for the coherent data page tables. 104*91f16700Schasinglulu */ 105*91f16700Schasinglulu .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 106*91f16700Schasinglulu __COHERENT_RAM_START__ = .; 107*91f16700Schasinglulu *(.tzfw_coherent_mem) 108*91f16700Schasinglulu __COHERENT_RAM_END_UNALIGNED__ = .; 109*91f16700Schasinglulu 110*91f16700Schasinglulu /* 111*91f16700Schasinglulu * Memory page(s) mapped to this section will be marked as device 112*91f16700Schasinglulu * memory. No other unexpected data must creep in. Ensure the rest of 113*91f16700Schasinglulu * the current memory page is unused. 114*91f16700Schasinglulu */ 115*91f16700Schasinglulu . = ALIGN(PAGE_SIZE); 116*91f16700Schasinglulu 117*91f16700Schasinglulu __COHERENT_RAM_END__ = .; 118*91f16700Schasinglulu } >RAM 119*91f16700Schasinglulu#endif /* USE_COHERENT_MEM */ 120*91f16700Schasinglulu 121*91f16700Schasinglulu __RW_END__ = .; 122*91f16700Schasinglulu __BL2_END__ = .; 123*91f16700Schasinglulu RAM_REGION_END = .; 124*91f16700Schasinglulu 125*91f16700Schasinglulu __BSS_SIZE__ = SIZEOF(.bss); 126*91f16700Schasinglulu 127*91f16700Schasinglulu#if USE_COHERENT_MEM 128*91f16700Schasinglulu __COHERENT_RAM_UNALIGNED_SIZE__ = 129*91f16700Schasinglulu __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 130*91f16700Schasinglulu#endif /* USE_COHERENT_MEM */ 131*91f16700Schasinglulu 132*91f16700Schasinglulu ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.") 133*91f16700Schasinglulu} 134