1*91f16700Schasinglulu/* 2*91f16700Schasinglulu * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu#include <arch.h> 8*91f16700Schasinglulu#include <asm_macros.S> 9*91f16700Schasinglulu#include <common/bl_common.h> 10*91f16700Schasinglulu#include <common/runtime_svc.h> 11*91f16700Schasinglulu#include <context.h> 12*91f16700Schasinglulu#include <el3_common_macros.S> 13*91f16700Schasinglulu#include <lib/el3_runtime/cpu_data.h> 14*91f16700Schasinglulu#include <lib/pmf/aarch32/pmf_asm_macros.S> 15*91f16700Schasinglulu#include <lib/runtime_instr.h> 16*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h> 17*91f16700Schasinglulu#include <smccc_helpers.h> 18*91f16700Schasinglulu#include <smccc_macros.S> 19*91f16700Schasinglulu 20*91f16700Schasinglulu .globl sp_min_vector_table 21*91f16700Schasinglulu .globl sp_min_entrypoint 22*91f16700Schasinglulu .globl sp_min_warm_entrypoint 23*91f16700Schasinglulu .globl sp_min_handle_smc 24*91f16700Schasinglulu .globl sp_min_handle_fiq 25*91f16700Schasinglulu 26*91f16700Schasinglulu#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE)) 27*91f16700Schasinglulu 28*91f16700Schasinglulu .macro route_fiq_to_sp_min reg 29*91f16700Schasinglulu /* ----------------------------------------------------- 30*91f16700Schasinglulu * FIQs are secure interrupts trapped by Monitor and non 31*91f16700Schasinglulu * secure is not allowed to mask the FIQs. 32*91f16700Schasinglulu * ----------------------------------------------------- 33*91f16700Schasinglulu */ 34*91f16700Schasinglulu ldcopr \reg, SCR 35*91f16700Schasinglulu orr \reg, \reg, #SCR_FIQ_BIT 36*91f16700Schasinglulu bic \reg, \reg, #SCR_FW_BIT 37*91f16700Schasinglulu stcopr \reg, SCR 38*91f16700Schasinglulu .endm 39*91f16700Schasinglulu 40*91f16700Schasinglulu .macro clrex_on_monitor_entry 41*91f16700Schasinglulu#if (ARM_ARCH_MAJOR == 7) 42*91f16700Schasinglulu /* 43*91f16700Schasinglulu * ARMv7 architectures need to clear the exclusive access when 44*91f16700Schasinglulu * entering Monitor mode. 45*91f16700Schasinglulu */ 46*91f16700Schasinglulu clrex 47*91f16700Schasinglulu#endif 48*91f16700Schasinglulu .endm 49*91f16700Schasinglulu 50*91f16700Schasingluluvector_base sp_min_vector_table 51*91f16700Schasinglulu b sp_min_entrypoint 52*91f16700Schasinglulu b plat_panic_handler /* Undef */ 53*91f16700Schasinglulu b sp_min_handle_smc /* Syscall */ 54*91f16700Schasinglulu b report_prefetch_abort /* Prefetch abort */ 55*91f16700Schasinglulu b report_data_abort /* Data abort */ 56*91f16700Schasinglulu b plat_panic_handler /* Reserved */ 57*91f16700Schasinglulu b plat_panic_handler /* IRQ */ 58*91f16700Schasinglulu b sp_min_handle_fiq /* FIQ */ 59*91f16700Schasinglulu 60*91f16700Schasinglulu 61*91f16700Schasinglulu/* 62*91f16700Schasinglulu * The Cold boot/Reset entrypoint for SP_MIN 63*91f16700Schasinglulu */ 64*91f16700Schasinglulufunc sp_min_entrypoint 65*91f16700Schasinglulu /* --------------------------------------------------------------- 66*91f16700Schasinglulu * Stash the previous bootloader arguments r0 - r3 for later use. 67*91f16700Schasinglulu * --------------------------------------------------------------- 68*91f16700Schasinglulu */ 69*91f16700Schasinglulu mov r9, r0 70*91f16700Schasinglulu mov r10, r1 71*91f16700Schasinglulu mov r11, r2 72*91f16700Schasinglulu mov r12, r3 73*91f16700Schasinglulu 74*91f16700Schasinglulu#if !RESET_TO_SP_MIN 75*91f16700Schasinglulu /* --------------------------------------------------------------------- 76*91f16700Schasinglulu * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 77*91f16700Schasinglulu * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 78*91f16700Schasinglulu * and primary/secondary CPU logic should not be executed in this case. 79*91f16700Schasinglulu * 80*91f16700Schasinglulu * Also, assume that the previous bootloader has already initialised the 81*91f16700Schasinglulu * SCTLR, including the CPU endianness, and has initialised the memory. 82*91f16700Schasinglulu * --------------------------------------------------------------------- 83*91f16700Schasinglulu */ 84*91f16700Schasinglulu el3_entrypoint_common \ 85*91f16700Schasinglulu _init_sctlr=0 \ 86*91f16700Schasinglulu _warm_boot_mailbox=0 \ 87*91f16700Schasinglulu _secondary_cold_boot=0 \ 88*91f16700Schasinglulu _init_memory=0 \ 89*91f16700Schasinglulu _init_c_runtime=1 \ 90*91f16700Schasinglulu _exception_vectors=sp_min_vector_table \ 91*91f16700Schasinglulu _pie_fixup_size=FIXUP_SIZE 92*91f16700Schasinglulu#else 93*91f16700Schasinglulu /* --------------------------------------------------------------------- 94*91f16700Schasinglulu * For RESET_TO_SP_MIN systems which have a programmable reset address, 95*91f16700Schasinglulu * sp_min_entrypoint() is executed only on the cold boot path so we can 96*91f16700Schasinglulu * skip the warm boot mailbox mechanism. 97*91f16700Schasinglulu * --------------------------------------------------------------------- 98*91f16700Schasinglulu */ 99*91f16700Schasinglulu el3_entrypoint_common \ 100*91f16700Schasinglulu _init_sctlr=1 \ 101*91f16700Schasinglulu _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 102*91f16700Schasinglulu _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 103*91f16700Schasinglulu _init_memory=1 \ 104*91f16700Schasinglulu _init_c_runtime=1 \ 105*91f16700Schasinglulu _exception_vectors=sp_min_vector_table \ 106*91f16700Schasinglulu _pie_fixup_size=FIXUP_SIZE 107*91f16700Schasinglulu#endif /* RESET_TO_SP_MIN */ 108*91f16700Schasinglulu 109*91f16700Schasinglulu#if SP_MIN_WITH_SECURE_FIQ 110*91f16700Schasinglulu route_fiq_to_sp_min r4 111*91f16700Schasinglulu#endif 112*91f16700Schasinglulu 113*91f16700Schasinglulu /* --------------------------------------------------------------------- 114*91f16700Schasinglulu * Relay the previous bootloader's arguments to the platform layer 115*91f16700Schasinglulu * --------------------------------------------------------------------- 116*91f16700Schasinglulu */ 117*91f16700Schasinglulu mov r0, r9 118*91f16700Schasinglulu mov r1, r10 119*91f16700Schasinglulu mov r2, r11 120*91f16700Schasinglulu mov r3, r12 121*91f16700Schasinglulu bl sp_min_early_platform_setup2 122*91f16700Schasinglulu bl sp_min_plat_arch_setup 123*91f16700Schasinglulu 124*91f16700Schasinglulu /* Jump to the main function */ 125*91f16700Schasinglulu bl sp_min_main 126*91f16700Schasinglulu 127*91f16700Schasinglulu /* ------------------------------------------------------------- 128*91f16700Schasinglulu * Clean the .data & .bss sections to main memory. This ensures 129*91f16700Schasinglulu * that any global data which was initialised by the primary CPU 130*91f16700Schasinglulu * is visible to secondary CPUs before they enable their data 131*91f16700Schasinglulu * caches and participate in coherency. 132*91f16700Schasinglulu * ------------------------------------------------------------- 133*91f16700Schasinglulu */ 134*91f16700Schasinglulu ldr r0, =__DATA_START__ 135*91f16700Schasinglulu ldr r1, =__DATA_END__ 136*91f16700Schasinglulu sub r1, r1, r0 137*91f16700Schasinglulu bl clean_dcache_range 138*91f16700Schasinglulu 139*91f16700Schasinglulu ldr r0, =__BSS_START__ 140*91f16700Schasinglulu ldr r1, =__BSS_END__ 141*91f16700Schasinglulu sub r1, r1, r0 142*91f16700Schasinglulu bl clean_dcache_range 143*91f16700Schasinglulu 144*91f16700Schasinglulu bl smc_get_next_ctx 145*91f16700Schasinglulu 146*91f16700Schasinglulu /* r0 points to `smc_ctx_t` */ 147*91f16700Schasinglulu /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 148*91f16700Schasinglulu b sp_min_exit 149*91f16700Schasingluluendfunc sp_min_entrypoint 150*91f16700Schasinglulu 151*91f16700Schasinglulu 152*91f16700Schasinglulu/* 153*91f16700Schasinglulu * SMC handling function for SP_MIN. 154*91f16700Schasinglulu */ 155*91f16700Schasinglulufunc sp_min_handle_smc 156*91f16700Schasinglulu /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 157*91f16700Schasinglulu str lr, [sp, #SMC_CTX_LR_MON] 158*91f16700Schasinglulu 159*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION 160*91f16700Schasinglulu /* 161*91f16700Schasinglulu * Read the timestamp value and store it on top of the C runtime stack. 162*91f16700Schasinglulu * The value will be saved to the per-cpu data once the C stack is 163*91f16700Schasinglulu * available, as a valid stack is needed to call _cpu_data() 164*91f16700Schasinglulu */ 165*91f16700Schasinglulu strd r0, r1, [sp, #SMC_CTX_GPREG_R0] 166*91f16700Schasinglulu ldcopr16 r0, r1, CNTPCT_64 167*91f16700Schasinglulu ldr lr, [sp, #SMC_CTX_SP_MON] 168*91f16700Schasinglulu strd r0, r1, [lr, #-8]! 169*91f16700Schasinglulu str lr, [sp, #SMC_CTX_SP_MON] 170*91f16700Schasinglulu ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0] 171*91f16700Schasinglulu#endif 172*91f16700Schasinglulu 173*91f16700Schasinglulu smccc_save_gp_mode_regs 174*91f16700Schasinglulu 175*91f16700Schasinglulu clrex_on_monitor_entry 176*91f16700Schasinglulu 177*91f16700Schasinglulu /* 178*91f16700Schasinglulu * `sp` still points to `smc_ctx_t`. Save it to a register 179*91f16700Schasinglulu * and restore the C runtime stack pointer to `sp`. 180*91f16700Schasinglulu */ 181*91f16700Schasinglulu mov r2, sp /* handle */ 182*91f16700Schasinglulu ldr sp, [r2, #SMC_CTX_SP_MON] 183*91f16700Schasinglulu 184*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION 185*91f16700Schasinglulu /* Save handle to a callee saved register */ 186*91f16700Schasinglulu mov r6, r2 187*91f16700Schasinglulu 188*91f16700Schasinglulu /* 189*91f16700Schasinglulu * Restore the timestamp value and store it in per-cpu data. The value 190*91f16700Schasinglulu * will be extracted from per-cpu data by the C level SMC handler and 191*91f16700Schasinglulu * saved to the PMF timestamp region. 192*91f16700Schasinglulu */ 193*91f16700Schasinglulu ldrd r4, r5, [sp], #8 194*91f16700Schasinglulu bl _cpu_data 195*91f16700Schasinglulu strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET] 196*91f16700Schasinglulu 197*91f16700Schasinglulu /* Restore handle */ 198*91f16700Schasinglulu mov r2, r6 199*91f16700Schasinglulu#endif 200*91f16700Schasinglulu 201*91f16700Schasinglulu ldr r0, [r2, #SMC_CTX_SCR] 202*91f16700Schasinglulu and r3, r0, #SCR_NS_BIT /* flags */ 203*91f16700Schasinglulu 204*91f16700Schasinglulu /* Switch to Secure Mode*/ 205*91f16700Schasinglulu bic r0, #SCR_NS_BIT 206*91f16700Schasinglulu stcopr r0, SCR 207*91f16700Schasinglulu isb 208*91f16700Schasinglulu 209*91f16700Schasinglulu ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 210*91f16700Schasinglulu /* Check whether an SMC64 is issued */ 211*91f16700Schasinglulu tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 212*91f16700Schasinglulu beq 1f 213*91f16700Schasinglulu /* SMC32 is not detected. Return error back to caller */ 214*91f16700Schasinglulu mov r0, #SMC_UNK 215*91f16700Schasinglulu str r0, [r2, #SMC_CTX_GPREG_R0] 216*91f16700Schasinglulu mov r0, r2 217*91f16700Schasinglulu b sp_min_exit 218*91f16700Schasinglulu1: 219*91f16700Schasinglulu /* SMC32 is detected */ 220*91f16700Schasinglulu mov r1, #0 /* cookie */ 221*91f16700Schasinglulu bl handle_runtime_svc 222*91f16700Schasinglulu 223*91f16700Schasinglulu /* `r0` points to `smc_ctx_t` */ 224*91f16700Schasinglulu b sp_min_exit 225*91f16700Schasingluluendfunc sp_min_handle_smc 226*91f16700Schasinglulu 227*91f16700Schasinglulu/* 228*91f16700Schasinglulu * Secure Interrupts handling function for SP_MIN. 229*91f16700Schasinglulu */ 230*91f16700Schasinglulufunc sp_min_handle_fiq 231*91f16700Schasinglulu#if !SP_MIN_WITH_SECURE_FIQ 232*91f16700Schasinglulu b plat_panic_handler 233*91f16700Schasinglulu#else 234*91f16700Schasinglulu /* FIQ has a +4 offset for lr compared to preferred return address */ 235*91f16700Schasinglulu sub lr, lr, #4 236*91f16700Schasinglulu /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 237*91f16700Schasinglulu str lr, [sp, #SMC_CTX_LR_MON] 238*91f16700Schasinglulu 239*91f16700Schasinglulu smccc_save_gp_mode_regs 240*91f16700Schasinglulu 241*91f16700Schasinglulu clrex_on_monitor_entry 242*91f16700Schasinglulu 243*91f16700Schasinglulu /* load run-time stack */ 244*91f16700Schasinglulu mov r2, sp 245*91f16700Schasinglulu ldr sp, [r2, #SMC_CTX_SP_MON] 246*91f16700Schasinglulu 247*91f16700Schasinglulu /* Switch to Secure Mode */ 248*91f16700Schasinglulu ldr r0, [r2, #SMC_CTX_SCR] 249*91f16700Schasinglulu bic r0, #SCR_NS_BIT 250*91f16700Schasinglulu stcopr r0, SCR 251*91f16700Schasinglulu isb 252*91f16700Schasinglulu 253*91f16700Schasinglulu push {r2, r3} 254*91f16700Schasinglulu bl sp_min_fiq 255*91f16700Schasinglulu pop {r0, r3} 256*91f16700Schasinglulu 257*91f16700Schasinglulu b sp_min_exit 258*91f16700Schasinglulu#endif 259*91f16700Schasingluluendfunc sp_min_handle_fiq 260*91f16700Schasinglulu 261*91f16700Schasinglulu/* 262*91f16700Schasinglulu * The Warm boot entrypoint for SP_MIN. 263*91f16700Schasinglulu */ 264*91f16700Schasinglulufunc sp_min_warm_entrypoint 265*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION 266*91f16700Schasinglulu /* 267*91f16700Schasinglulu * This timestamp update happens with cache off. The next 268*91f16700Schasinglulu * timestamp collection will need to do cache maintenance prior 269*91f16700Schasinglulu * to timestamp update. 270*91f16700Schasinglulu */ 271*91f16700Schasinglulu pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 272*91f16700Schasinglulu ldcopr16 r2, r3, CNTPCT_64 273*91f16700Schasinglulu strd r2, r3, [r0] 274*91f16700Schasinglulu#endif 275*91f16700Schasinglulu /* 276*91f16700Schasinglulu * On the warm boot path, most of the EL3 initialisations performed by 277*91f16700Schasinglulu * 'el3_entrypoint_common' must be skipped: 278*91f16700Schasinglulu * 279*91f16700Schasinglulu * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 280*91f16700Schasinglulu * programming the reset address do we need to initialied the SCTLR. 281*91f16700Schasinglulu * In other cases, we assume this has been taken care by the 282*91f16700Schasinglulu * entrypoint code. 283*91f16700Schasinglulu * 284*91f16700Schasinglulu * - No need to determine the type of boot, we know it is a warm boot. 285*91f16700Schasinglulu * 286*91f16700Schasinglulu * - Do not try to distinguish between primary and secondary CPUs, this 287*91f16700Schasinglulu * notion only exists for a cold boot. 288*91f16700Schasinglulu * 289*91f16700Schasinglulu * - No need to initialise the memory or the C runtime environment, 290*91f16700Schasinglulu * it has been done once and for all on the cold boot path. 291*91f16700Schasinglulu */ 292*91f16700Schasinglulu el3_entrypoint_common \ 293*91f16700Schasinglulu _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 294*91f16700Schasinglulu _warm_boot_mailbox=0 \ 295*91f16700Schasinglulu _secondary_cold_boot=0 \ 296*91f16700Schasinglulu _init_memory=0 \ 297*91f16700Schasinglulu _init_c_runtime=0 \ 298*91f16700Schasinglulu _exception_vectors=sp_min_vector_table \ 299*91f16700Schasinglulu _pie_fixup_size=0 300*91f16700Schasinglulu 301*91f16700Schasinglulu /* 302*91f16700Schasinglulu * We're about to enable MMU and participate in PSCI state coordination. 303*91f16700Schasinglulu * 304*91f16700Schasinglulu * The PSCI implementation invokes platform routines that enable CPUs to 305*91f16700Schasinglulu * participate in coherency. On a system where CPUs are not 306*91f16700Schasinglulu * cache-coherent without appropriate platform specific programming, 307*91f16700Schasinglulu * having caches enabled until such time might lead to coherency issues 308*91f16700Schasinglulu * (resulting from stale data getting speculatively fetched, among 309*91f16700Schasinglulu * others). Therefore we keep data caches disabled even after enabling 310*91f16700Schasinglulu * the MMU for such platforms. 311*91f16700Schasinglulu * 312*91f16700Schasinglulu * On systems with hardware-assisted coherency, or on single cluster 313*91f16700Schasinglulu * platforms, such platform specific programming is not required to 314*91f16700Schasinglulu * enter coherency (as CPUs already are); and there's no reason to have 315*91f16700Schasinglulu * caches disabled either. 316*91f16700Schasinglulu */ 317*91f16700Schasinglulu#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 318*91f16700Schasinglulu mov r0, #0 319*91f16700Schasinglulu#else 320*91f16700Schasinglulu mov r0, #DISABLE_DCACHE 321*91f16700Schasinglulu#endif 322*91f16700Schasinglulu bl bl32_plat_enable_mmu 323*91f16700Schasinglulu 324*91f16700Schasinglulu#if SP_MIN_WITH_SECURE_FIQ 325*91f16700Schasinglulu route_fiq_to_sp_min r0 326*91f16700Schasinglulu#endif 327*91f16700Schasinglulu 328*91f16700Schasinglulu bl sp_min_warm_boot 329*91f16700Schasinglulu bl smc_get_next_ctx 330*91f16700Schasinglulu /* r0 points to `smc_ctx_t` */ 331*91f16700Schasinglulu /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 332*91f16700Schasinglulu 333*91f16700Schasinglulu#if ENABLE_RUNTIME_INSTRUMENTATION 334*91f16700Schasinglulu /* Save smc_ctx_t */ 335*91f16700Schasinglulu mov r5, r0 336*91f16700Schasinglulu 337*91f16700Schasinglulu pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 338*91f16700Schasinglulu mov r4, r0 339*91f16700Schasinglulu 340*91f16700Schasinglulu /* 341*91f16700Schasinglulu * Invalidate before updating timestamp to ensure previous timestamp 342*91f16700Schasinglulu * updates on the same cache line with caches disabled are properly 343*91f16700Schasinglulu * seen by the same core. Without the cache invalidate, the core might 344*91f16700Schasinglulu * write into a stale cache line. 345*91f16700Schasinglulu */ 346*91f16700Schasinglulu mov r1, #PMF_TS_SIZE 347*91f16700Schasinglulu bl inv_dcache_range 348*91f16700Schasinglulu 349*91f16700Schasinglulu ldcopr16 r0, r1, CNTPCT_64 350*91f16700Schasinglulu strd r0, r1, [r4] 351*91f16700Schasinglulu 352*91f16700Schasinglulu /* Restore smc_ctx_t */ 353*91f16700Schasinglulu mov r0, r5 354*91f16700Schasinglulu#endif 355*91f16700Schasinglulu 356*91f16700Schasinglulu b sp_min_exit 357*91f16700Schasingluluendfunc sp_min_warm_entrypoint 358*91f16700Schasinglulu 359*91f16700Schasinglulu/* 360*91f16700Schasinglulu * The function to restore the registers from SMC context and return 361*91f16700Schasinglulu * to the mode restored to SPSR. 362*91f16700Schasinglulu * 363*91f16700Schasinglulu * Arguments : r0 must point to the SMC context to restore from. 364*91f16700Schasinglulu */ 365*91f16700Schasinglulufunc sp_min_exit 366*91f16700Schasinglulu monitor_exit 367*91f16700Schasingluluendfunc sp_min_exit 368