1*91f16700Schasinglulu/* 2*91f16700Schasinglulu * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu#include <platform_def.h> 8*91f16700Schasinglulu 9*91f16700Schasinglulu#include <arch.h> 10*91f16700Schasinglulu#include <asm_macros.S> 11*91f16700Schasinglulu#include <bl32/tsp/tsp.h> 12*91f16700Schasinglulu#include <lib/xlat_tables/xlat_tables_defs.h> 13*91f16700Schasinglulu#include <smccc_helpers.h> 14*91f16700Schasinglulu 15*91f16700Schasinglulu#include "../tsp_private.h" 16*91f16700Schasinglulu 17*91f16700Schasinglulu 18*91f16700Schasinglulu .globl tsp_entrypoint 19*91f16700Schasinglulu .globl tsp_vector_table 20*91f16700Schasinglulu#if SPMC_AT_EL3 21*91f16700Schasinglulu .globl tsp_cpu_on_entry 22*91f16700Schasinglulu#endif 23*91f16700Schasinglulu 24*91f16700Schasinglulu 25*91f16700Schasinglulu 26*91f16700Schasinglulu /* --------------------------------------------- 27*91f16700Schasinglulu * Populate the params in x0-x7 from the pointer 28*91f16700Schasinglulu * to the smc args structure in x0. 29*91f16700Schasinglulu * --------------------------------------------- 30*91f16700Schasinglulu */ 31*91f16700Schasinglulu .macro restore_args_call_smc 32*91f16700Schasinglulu ldp x6, x7, [x0, #SMC_ARG6] 33*91f16700Schasinglulu ldp x4, x5, [x0, #SMC_ARG4] 34*91f16700Schasinglulu ldp x2, x3, [x0, #SMC_ARG2] 35*91f16700Schasinglulu ldp x0, x1, [x0, #SMC_ARG0] 36*91f16700Schasinglulu smc #0 37*91f16700Schasinglulu .endm 38*91f16700Schasinglulu 39*91f16700Schasinglulu .macro save_eret_context reg1 reg2 40*91f16700Schasinglulu mrs \reg1, elr_el1 41*91f16700Schasinglulu mrs \reg2, spsr_el1 42*91f16700Schasinglulu stp \reg1, \reg2, [sp, #-0x10]! 43*91f16700Schasinglulu stp x30, x18, [sp, #-0x10]! 44*91f16700Schasinglulu .endm 45*91f16700Schasinglulu 46*91f16700Schasinglulu .macro restore_eret_context reg1 reg2 47*91f16700Schasinglulu ldp x30, x18, [sp], #0x10 48*91f16700Schasinglulu ldp \reg1, \reg2, [sp], #0x10 49*91f16700Schasinglulu msr elr_el1, \reg1 50*91f16700Schasinglulu msr spsr_el1, \reg2 51*91f16700Schasinglulu .endm 52*91f16700Schasinglulu 53*91f16700Schasinglulufunc tsp_entrypoint _align=3 54*91f16700Schasinglulu 55*91f16700Schasinglulu#if ENABLE_PIE 56*91f16700Schasinglulu /* 57*91f16700Schasinglulu * ------------------------------------------------------------ 58*91f16700Schasinglulu * If PIE is enabled fixup the Global descriptor Table only 59*91f16700Schasinglulu * once during primary core cold boot path. 60*91f16700Schasinglulu * 61*91f16700Schasinglulu * Compile time base address, required for fixup, is calculated 62*91f16700Schasinglulu * using "pie_fixup" label present within first page. 63*91f16700Schasinglulu * ------------------------------------------------------------ 64*91f16700Schasinglulu */ 65*91f16700Schasinglulu pie_fixup: 66*91f16700Schasinglulu ldr x0, =pie_fixup 67*91f16700Schasinglulu and x0, x0, #~(PAGE_SIZE_MASK) 68*91f16700Schasinglulu mov_imm x1, (BL32_LIMIT - BL32_BASE) 69*91f16700Schasinglulu add x1, x1, x0 70*91f16700Schasinglulu bl fixup_gdt_reloc 71*91f16700Schasinglulu#endif /* ENABLE_PIE */ 72*91f16700Schasinglulu 73*91f16700Schasinglulu /* --------------------------------------------- 74*91f16700Schasinglulu * Set the exception vector to something sane. 75*91f16700Schasinglulu * --------------------------------------------- 76*91f16700Schasinglulu */ 77*91f16700Schasinglulu adr x0, tsp_exceptions 78*91f16700Schasinglulu msr vbar_el1, x0 79*91f16700Schasinglulu isb 80*91f16700Schasinglulu 81*91f16700Schasinglulu /* --------------------------------------------- 82*91f16700Schasinglulu * Enable the SError interrupt now that the 83*91f16700Schasinglulu * exception vectors have been setup. 84*91f16700Schasinglulu * --------------------------------------------- 85*91f16700Schasinglulu */ 86*91f16700Schasinglulu msr daifclr, #DAIF_ABT_BIT 87*91f16700Schasinglulu 88*91f16700Schasinglulu /* --------------------------------------------- 89*91f16700Schasinglulu * Enable the instruction cache, stack pointer 90*91f16700Schasinglulu * and data access alignment checks and disable 91*91f16700Schasinglulu * speculative loads. 92*91f16700Schasinglulu * --------------------------------------------- 93*91f16700Schasinglulu */ 94*91f16700Schasinglulu mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 95*91f16700Schasinglulu mrs x0, sctlr_el1 96*91f16700Schasinglulu orr x0, x0, x1 97*91f16700Schasinglulu bic x0, x0, #SCTLR_DSSBS_BIT 98*91f16700Schasinglulu msr sctlr_el1, x0 99*91f16700Schasinglulu isb 100*91f16700Schasinglulu 101*91f16700Schasinglulu /* --------------------------------------------- 102*91f16700Schasinglulu * Invalidate the RW memory used by the BL32 103*91f16700Schasinglulu * image. This includes the data and NOBITS 104*91f16700Schasinglulu * sections. This is done to safeguard against 105*91f16700Schasinglulu * possible corruption of this memory by dirty 106*91f16700Schasinglulu * cache lines in a system cache as a result of 107*91f16700Schasinglulu * use by an earlier boot loader stage. If PIE 108*91f16700Schasinglulu * is enabled however, RO sections including the 109*91f16700Schasinglulu * GOT may be modified during pie fixup. 110*91f16700Schasinglulu * Therefore, to be on the safe side, invalidate 111*91f16700Schasinglulu * the entire image region if PIE is enabled. 112*91f16700Schasinglulu * --------------------------------------------- 113*91f16700Schasinglulu */ 114*91f16700Schasinglulu#if ENABLE_PIE 115*91f16700Schasinglulu#if SEPARATE_CODE_AND_RODATA 116*91f16700Schasinglulu adrp x0, __TEXT_START__ 117*91f16700Schasinglulu add x0, x0, :lo12:__TEXT_START__ 118*91f16700Schasinglulu#else 119*91f16700Schasinglulu adrp x0, __RO_START__ 120*91f16700Schasinglulu add x0, x0, :lo12:__RO_START__ 121*91f16700Schasinglulu#endif /* SEPARATE_CODE_AND_RODATA */ 122*91f16700Schasinglulu#else 123*91f16700Schasinglulu adrp x0, __RW_START__ 124*91f16700Schasinglulu add x0, x0, :lo12:__RW_START__ 125*91f16700Schasinglulu#endif /* ENABLE_PIE */ 126*91f16700Schasinglulu adrp x1, __RW_END__ 127*91f16700Schasinglulu add x1, x1, :lo12:__RW_END__ 128*91f16700Schasinglulu sub x1, x1, x0 129*91f16700Schasinglulu bl inv_dcache_range 130*91f16700Schasinglulu 131*91f16700Schasinglulu /* --------------------------------------------- 132*91f16700Schasinglulu * Zero out NOBITS sections. There are 2 of them: 133*91f16700Schasinglulu * - the .bss section; 134*91f16700Schasinglulu * - the coherent memory section. 135*91f16700Schasinglulu * --------------------------------------------- 136*91f16700Schasinglulu */ 137*91f16700Schasinglulu adrp x0, __BSS_START__ 138*91f16700Schasinglulu add x0, x0, :lo12:__BSS_START__ 139*91f16700Schasinglulu adrp x1, __BSS_END__ 140*91f16700Schasinglulu add x1, x1, :lo12:__BSS_END__ 141*91f16700Schasinglulu sub x1, x1, x0 142*91f16700Schasinglulu bl zeromem 143*91f16700Schasinglulu 144*91f16700Schasinglulu#if USE_COHERENT_MEM 145*91f16700Schasinglulu adrp x0, __COHERENT_RAM_START__ 146*91f16700Schasinglulu add x0, x0, :lo12:__COHERENT_RAM_START__ 147*91f16700Schasinglulu adrp x1, __COHERENT_RAM_END_UNALIGNED__ 148*91f16700Schasinglulu add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__ 149*91f16700Schasinglulu sub x1, x1, x0 150*91f16700Schasinglulu bl zeromem 151*91f16700Schasinglulu#endif 152*91f16700Schasinglulu 153*91f16700Schasinglulu /* -------------------------------------------- 154*91f16700Schasinglulu * Allocate a stack whose memory will be marked 155*91f16700Schasinglulu * as Normal-IS-WBWA when the MMU is enabled. 156*91f16700Schasinglulu * There is no risk of reading stale stack 157*91f16700Schasinglulu * memory after enabling the MMU as only the 158*91f16700Schasinglulu * primary cpu is running at the moment. 159*91f16700Schasinglulu * -------------------------------------------- 160*91f16700Schasinglulu */ 161*91f16700Schasinglulu bl plat_set_my_stack 162*91f16700Schasinglulu 163*91f16700Schasinglulu /* --------------------------------------------- 164*91f16700Schasinglulu * Initialize the stack protector canary before 165*91f16700Schasinglulu * any C code is called. 166*91f16700Schasinglulu * --------------------------------------------- 167*91f16700Schasinglulu */ 168*91f16700Schasinglulu#if STACK_PROTECTOR_ENABLED 169*91f16700Schasinglulu bl update_stack_protector_canary 170*91f16700Schasinglulu#endif 171*91f16700Schasinglulu 172*91f16700Schasinglulu /* --------------------------------------------- 173*91f16700Schasinglulu * Perform TSP setup 174*91f16700Schasinglulu * --------------------------------------------- 175*91f16700Schasinglulu */ 176*91f16700Schasinglulu bl tsp_setup 177*91f16700Schasinglulu 178*91f16700Schasinglulu#if ENABLE_PAUTH 179*91f16700Schasinglulu /* --------------------------------------------- 180*91f16700Schasinglulu * Program APIAKey_EL1 181*91f16700Schasinglulu * and enable pointer authentication 182*91f16700Schasinglulu * --------------------------------------------- 183*91f16700Schasinglulu */ 184*91f16700Schasinglulu bl pauth_init_enable_el1 185*91f16700Schasinglulu#endif /* ENABLE_PAUTH */ 186*91f16700Schasinglulu 187*91f16700Schasinglulu /* --------------------------------------------- 188*91f16700Schasinglulu * Jump to main function. 189*91f16700Schasinglulu * --------------------------------------------- 190*91f16700Schasinglulu */ 191*91f16700Schasinglulu bl tsp_main 192*91f16700Schasinglulu 193*91f16700Schasinglulu /* --------------------------------------------- 194*91f16700Schasinglulu * Tell TSPD that we are done initialising 195*91f16700Schasinglulu * --------------------------------------------- 196*91f16700Schasinglulu */ 197*91f16700Schasinglulu mov x1, x0 198*91f16700Schasinglulu mov x0, #TSP_ENTRY_DONE 199*91f16700Schasinglulu smc #0 200*91f16700Schasinglulu 201*91f16700Schasinglulutsp_entrypoint_panic: 202*91f16700Schasinglulu b tsp_entrypoint_panic 203*91f16700Schasingluluendfunc tsp_entrypoint 204*91f16700Schasinglulu 205*91f16700Schasinglulu 206*91f16700Schasinglulu /* ------------------------------------------- 207*91f16700Schasinglulu * Table of entrypoint vectors provided to the 208*91f16700Schasinglulu * TSPD for the various entrypoints 209*91f16700Schasinglulu * ------------------------------------------- 210*91f16700Schasinglulu */ 211*91f16700Schasingluluvector_base tsp_vector_table 212*91f16700Schasinglulu b tsp_yield_smc_entry 213*91f16700Schasinglulu b tsp_fast_smc_entry 214*91f16700Schasinglulu b tsp_cpu_on_entry 215*91f16700Schasinglulu b tsp_cpu_off_entry 216*91f16700Schasinglulu b tsp_cpu_resume_entry 217*91f16700Schasinglulu b tsp_cpu_suspend_entry 218*91f16700Schasinglulu b tsp_sel1_intr_entry 219*91f16700Schasinglulu b tsp_system_off_entry 220*91f16700Schasinglulu b tsp_system_reset_entry 221*91f16700Schasinglulu b tsp_abort_yield_smc_entry 222*91f16700Schasinglulu 223*91f16700Schasinglulu /*--------------------------------------------- 224*91f16700Schasinglulu * This entrypoint is used by the TSPD when this 225*91f16700Schasinglulu * cpu is to be turned off through a CPU_OFF 226*91f16700Schasinglulu * psci call to ask the TSP to perform any 227*91f16700Schasinglulu * bookeeping necessary. In the current 228*91f16700Schasinglulu * implementation, the TSPD expects the TSP to 229*91f16700Schasinglulu * re-initialise its state so nothing is done 230*91f16700Schasinglulu * here except for acknowledging the request. 231*91f16700Schasinglulu * --------------------------------------------- 232*91f16700Schasinglulu */ 233*91f16700Schasinglulufunc tsp_cpu_off_entry 234*91f16700Schasinglulu bl tsp_cpu_off_main 235*91f16700Schasinglulu restore_args_call_smc 236*91f16700Schasingluluendfunc tsp_cpu_off_entry 237*91f16700Schasinglulu 238*91f16700Schasinglulu /*--------------------------------------------- 239*91f16700Schasinglulu * This entrypoint is used by the TSPD when the 240*91f16700Schasinglulu * system is about to be switched off (through 241*91f16700Schasinglulu * a SYSTEM_OFF psci call) to ask the TSP to 242*91f16700Schasinglulu * perform any necessary bookkeeping. 243*91f16700Schasinglulu * --------------------------------------------- 244*91f16700Schasinglulu */ 245*91f16700Schasinglulufunc tsp_system_off_entry 246*91f16700Schasinglulu bl tsp_system_off_main 247*91f16700Schasinglulu restore_args_call_smc 248*91f16700Schasingluluendfunc tsp_system_off_entry 249*91f16700Schasinglulu 250*91f16700Schasinglulu /*--------------------------------------------- 251*91f16700Schasinglulu * This entrypoint is used by the TSPD when the 252*91f16700Schasinglulu * system is about to be reset (through a 253*91f16700Schasinglulu * SYSTEM_RESET psci call) to ask the TSP to 254*91f16700Schasinglulu * perform any necessary bookkeeping. 255*91f16700Schasinglulu * --------------------------------------------- 256*91f16700Schasinglulu */ 257*91f16700Schasinglulufunc tsp_system_reset_entry 258*91f16700Schasinglulu bl tsp_system_reset_main 259*91f16700Schasinglulu restore_args_call_smc 260*91f16700Schasingluluendfunc tsp_system_reset_entry 261*91f16700Schasinglulu 262*91f16700Schasinglulu /*--------------------------------------------- 263*91f16700Schasinglulu * This entrypoint is used by the TSPD when this 264*91f16700Schasinglulu * cpu is turned on using a CPU_ON psci call to 265*91f16700Schasinglulu * ask the TSP to initialise itself i.e. setup 266*91f16700Schasinglulu * the mmu, stacks etc. Minimal architectural 267*91f16700Schasinglulu * state will be initialised by the TSPD when 268*91f16700Schasinglulu * this function is entered i.e. Caches and MMU 269*91f16700Schasinglulu * will be turned off, the execution state 270*91f16700Schasinglulu * will be aarch64 and exceptions masked. 271*91f16700Schasinglulu * --------------------------------------------- 272*91f16700Schasinglulu */ 273*91f16700Schasinglulufunc tsp_cpu_on_entry 274*91f16700Schasinglulu /* --------------------------------------------- 275*91f16700Schasinglulu * Set the exception vector to something sane. 276*91f16700Schasinglulu * --------------------------------------------- 277*91f16700Schasinglulu */ 278*91f16700Schasinglulu adr x0, tsp_exceptions 279*91f16700Schasinglulu msr vbar_el1, x0 280*91f16700Schasinglulu isb 281*91f16700Schasinglulu 282*91f16700Schasinglulu /* Enable the SError interrupt */ 283*91f16700Schasinglulu msr daifclr, #DAIF_ABT_BIT 284*91f16700Schasinglulu 285*91f16700Schasinglulu /* --------------------------------------------- 286*91f16700Schasinglulu * Enable the instruction cache, stack pointer 287*91f16700Schasinglulu * and data access alignment checks 288*91f16700Schasinglulu * --------------------------------------------- 289*91f16700Schasinglulu */ 290*91f16700Schasinglulu mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 291*91f16700Schasinglulu mrs x0, sctlr_el1 292*91f16700Schasinglulu orr x0, x0, x1 293*91f16700Schasinglulu msr sctlr_el1, x0 294*91f16700Schasinglulu isb 295*91f16700Schasinglulu 296*91f16700Schasinglulu /* -------------------------------------------- 297*91f16700Schasinglulu * Give ourselves a stack whose memory will be 298*91f16700Schasinglulu * marked as Normal-IS-WBWA when the MMU is 299*91f16700Schasinglulu * enabled. 300*91f16700Schasinglulu * -------------------------------------------- 301*91f16700Schasinglulu */ 302*91f16700Schasinglulu bl plat_set_my_stack 303*91f16700Schasinglulu 304*91f16700Schasinglulu /* -------------------------------------------- 305*91f16700Schasinglulu * Enable MMU and D-caches together. 306*91f16700Schasinglulu * -------------------------------------------- 307*91f16700Schasinglulu */ 308*91f16700Schasinglulu mov x0, #0 309*91f16700Schasinglulu bl bl32_plat_enable_mmu 310*91f16700Schasinglulu 311*91f16700Schasinglulu#if ENABLE_PAUTH 312*91f16700Schasinglulu /* --------------------------------------------- 313*91f16700Schasinglulu * Program APIAKey_EL1 314*91f16700Schasinglulu * and enable pointer authentication 315*91f16700Schasinglulu * --------------------------------------------- 316*91f16700Schasinglulu */ 317*91f16700Schasinglulu bl pauth_init_enable_el1 318*91f16700Schasinglulu#endif /* ENABLE_PAUTH */ 319*91f16700Schasinglulu 320*91f16700Schasinglulu /* --------------------------------------------- 321*91f16700Schasinglulu * Enter C runtime to perform any remaining 322*91f16700Schasinglulu * book keeping 323*91f16700Schasinglulu * --------------------------------------------- 324*91f16700Schasinglulu */ 325*91f16700Schasinglulu bl tsp_cpu_on_main 326*91f16700Schasinglulu restore_args_call_smc 327*91f16700Schasinglulu 328*91f16700Schasinglulu /* Should never reach here */ 329*91f16700Schasinglulutsp_cpu_on_entry_panic: 330*91f16700Schasinglulu b tsp_cpu_on_entry_panic 331*91f16700Schasingluluendfunc tsp_cpu_on_entry 332*91f16700Schasinglulu 333*91f16700Schasinglulu /*--------------------------------------------- 334*91f16700Schasinglulu * This entrypoint is used by the TSPD when this 335*91f16700Schasinglulu * cpu is to be suspended through a CPU_SUSPEND 336*91f16700Schasinglulu * psci call to ask the TSP to perform any 337*91f16700Schasinglulu * bookeeping necessary. In the current 338*91f16700Schasinglulu * implementation, the TSPD saves and restores 339*91f16700Schasinglulu * the EL1 state. 340*91f16700Schasinglulu * --------------------------------------------- 341*91f16700Schasinglulu */ 342*91f16700Schasinglulufunc tsp_cpu_suspend_entry 343*91f16700Schasinglulu bl tsp_cpu_suspend_main 344*91f16700Schasinglulu restore_args_call_smc 345*91f16700Schasingluluendfunc tsp_cpu_suspend_entry 346*91f16700Schasinglulu 347*91f16700Schasinglulu /*------------------------------------------------- 348*91f16700Schasinglulu * This entrypoint is used by the TSPD to pass 349*91f16700Schasinglulu * control for `synchronously` handling a S-EL1 350*91f16700Schasinglulu * Interrupt which was triggered while executing 351*91f16700Schasinglulu * in normal world. 'x0' contains a magic number 352*91f16700Schasinglulu * which indicates this. TSPD expects control to 353*91f16700Schasinglulu * be handed back at the end of interrupt 354*91f16700Schasinglulu * processing. This is done through an SMC. 355*91f16700Schasinglulu * The handover agreement is: 356*91f16700Schasinglulu * 357*91f16700Schasinglulu * 1. PSTATE.DAIF are set upon entry. 'x1' has 358*91f16700Schasinglulu * the ELR_EL3 from the non-secure state. 359*91f16700Schasinglulu * 2. TSP has to preserve the callee saved 360*91f16700Schasinglulu * general purpose registers, SP_EL1/EL0 and 361*91f16700Schasinglulu * LR. 362*91f16700Schasinglulu * 3. TSP has to preserve the system and vfp 363*91f16700Schasinglulu * registers (if applicable). 364*91f16700Schasinglulu * 4. TSP can use 'x0-x18' to enable its C 365*91f16700Schasinglulu * runtime. 366*91f16700Schasinglulu * 5. TSP returns to TSPD using an SMC with 367*91f16700Schasinglulu * 'x0' = TSP_HANDLED_S_EL1_INTR 368*91f16700Schasinglulu * ------------------------------------------------ 369*91f16700Schasinglulu */ 370*91f16700Schasinglulufunc tsp_sel1_intr_entry 371*91f16700Schasinglulu#if DEBUG 372*91f16700Schasinglulu mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN 373*91f16700Schasinglulu cmp x0, x2 374*91f16700Schasinglulu b.ne tsp_sel1_int_entry_panic 375*91f16700Schasinglulu#endif 376*91f16700Schasinglulu /*------------------------------------------------- 377*91f16700Schasinglulu * Save any previous context needed to perform 378*91f16700Schasinglulu * an exception return from S-EL1 e.g. context 379*91f16700Schasinglulu * from a previous Non secure Interrupt. 380*91f16700Schasinglulu * Update statistics and handle the S-EL1 381*91f16700Schasinglulu * interrupt before returning to the TSPD. 382*91f16700Schasinglulu * IRQ/FIQs are not enabled since that will 383*91f16700Schasinglulu * complicate the implementation. Execution 384*91f16700Schasinglulu * will be transferred back to the normal world 385*91f16700Schasinglulu * in any case. The handler can return 0 386*91f16700Schasinglulu * if the interrupt was handled or TSP_PREEMPTED 387*91f16700Schasinglulu * if the expected interrupt was preempted 388*91f16700Schasinglulu * by an interrupt that should be handled in EL3 389*91f16700Schasinglulu * e.g. Group 0 interrupt in GICv3. In both 390*91f16700Schasinglulu * the cases switch to EL3 using SMC with id 391*91f16700Schasinglulu * TSP_HANDLED_S_EL1_INTR. Any other return value 392*91f16700Schasinglulu * from the handler will result in panic. 393*91f16700Schasinglulu * ------------------------------------------------ 394*91f16700Schasinglulu */ 395*91f16700Schasinglulu save_eret_context x2 x3 396*91f16700Schasinglulu bl tsp_update_sync_sel1_intr_stats 397*91f16700Schasinglulu bl tsp_common_int_handler 398*91f16700Schasinglulu /* Check if the S-EL1 interrupt has been handled */ 399*91f16700Schasinglulu cbnz x0, tsp_sel1_intr_check_preemption 400*91f16700Schasinglulu b tsp_sel1_intr_return 401*91f16700Schasinglulutsp_sel1_intr_check_preemption: 402*91f16700Schasinglulu /* Check if the S-EL1 interrupt has been preempted */ 403*91f16700Schasinglulu mov_imm x1, TSP_PREEMPTED 404*91f16700Schasinglulu cmp x0, x1 405*91f16700Schasinglulu b.ne tsp_sel1_int_entry_panic 406*91f16700Schasinglulutsp_sel1_intr_return: 407*91f16700Schasinglulu mov_imm x0, TSP_HANDLED_S_EL1_INTR 408*91f16700Schasinglulu restore_eret_context x2 x3 409*91f16700Schasinglulu smc #0 410*91f16700Schasinglulu 411*91f16700Schasinglulu /* Should never reach here */ 412*91f16700Schasinglulutsp_sel1_int_entry_panic: 413*91f16700Schasinglulu no_ret plat_panic_handler 414*91f16700Schasingluluendfunc tsp_sel1_intr_entry 415*91f16700Schasinglulu 416*91f16700Schasinglulu /*--------------------------------------------- 417*91f16700Schasinglulu * This entrypoint is used by the TSPD when this 418*91f16700Schasinglulu * cpu resumes execution after an earlier 419*91f16700Schasinglulu * CPU_SUSPEND psci call to ask the TSP to 420*91f16700Schasinglulu * restore its saved context. In the current 421*91f16700Schasinglulu * implementation, the TSPD saves and restores 422*91f16700Schasinglulu * EL1 state so nothing is done here apart from 423*91f16700Schasinglulu * acknowledging the request. 424*91f16700Schasinglulu * --------------------------------------------- 425*91f16700Schasinglulu */ 426*91f16700Schasinglulufunc tsp_cpu_resume_entry 427*91f16700Schasinglulu bl tsp_cpu_resume_main 428*91f16700Schasinglulu restore_args_call_smc 429*91f16700Schasinglulu 430*91f16700Schasinglulu /* Should never reach here */ 431*91f16700Schasinglulu no_ret plat_panic_handler 432*91f16700Schasingluluendfunc tsp_cpu_resume_entry 433*91f16700Schasinglulu 434*91f16700Schasinglulu /*--------------------------------------------- 435*91f16700Schasinglulu * This entrypoint is used by the TSPD to ask 436*91f16700Schasinglulu * the TSP to service a fast smc request. 437*91f16700Schasinglulu * --------------------------------------------- 438*91f16700Schasinglulu */ 439*91f16700Schasinglulufunc tsp_fast_smc_entry 440*91f16700Schasinglulu bl tsp_smc_handler 441*91f16700Schasinglulu restore_args_call_smc 442*91f16700Schasinglulu 443*91f16700Schasinglulu /* Should never reach here */ 444*91f16700Schasinglulu no_ret plat_panic_handler 445*91f16700Schasingluluendfunc tsp_fast_smc_entry 446*91f16700Schasinglulu 447*91f16700Schasinglulu /*--------------------------------------------- 448*91f16700Schasinglulu * This entrypoint is used by the TSPD to ask 449*91f16700Schasinglulu * the TSP to service a Yielding SMC request. 450*91f16700Schasinglulu * We will enable preemption during execution 451*91f16700Schasinglulu * of tsp_smc_handler. 452*91f16700Schasinglulu * --------------------------------------------- 453*91f16700Schasinglulu */ 454*91f16700Schasinglulufunc tsp_yield_smc_entry 455*91f16700Schasinglulu msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 456*91f16700Schasinglulu bl tsp_smc_handler 457*91f16700Schasinglulu msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 458*91f16700Schasinglulu restore_args_call_smc 459*91f16700Schasinglulu 460*91f16700Schasinglulu /* Should never reach here */ 461*91f16700Schasinglulu no_ret plat_panic_handler 462*91f16700Schasingluluendfunc tsp_yield_smc_entry 463*91f16700Schasinglulu 464*91f16700Schasinglulu /*--------------------------------------------------------------------- 465*91f16700Schasinglulu * This entrypoint is used by the TSPD to abort a pre-empted Yielding 466*91f16700Schasinglulu * SMC. It could be on behalf of non-secure world or because a CPU 467*91f16700Schasinglulu * suspend/CPU off request needs to abort the preempted SMC. 468*91f16700Schasinglulu * -------------------------------------------------------------------- 469*91f16700Schasinglulu */ 470*91f16700Schasinglulufunc tsp_abort_yield_smc_entry 471*91f16700Schasinglulu 472*91f16700Schasinglulu /* 473*91f16700Schasinglulu * Exceptions masking is already done by the TSPD when entering this 474*91f16700Schasinglulu * hook so there is no need to do it here. 475*91f16700Schasinglulu */ 476*91f16700Schasinglulu 477*91f16700Schasinglulu /* Reset the stack used by the pre-empted SMC */ 478*91f16700Schasinglulu bl plat_set_my_stack 479*91f16700Schasinglulu 480*91f16700Schasinglulu /* 481*91f16700Schasinglulu * Allow some cleanup such as releasing locks. 482*91f16700Schasinglulu */ 483*91f16700Schasinglulu bl tsp_abort_smc_handler 484*91f16700Schasinglulu 485*91f16700Schasinglulu restore_args_call_smc 486*91f16700Schasinglulu 487*91f16700Schasinglulu /* Should never reach here */ 488*91f16700Schasinglulu bl plat_panic_handler 489*91f16700Schasingluluendfunc tsp_abort_yield_smc_entry 490