1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu 8*91f16700Schasinglulu /******************************************************************************* 9*91f16700Schasinglulu * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 10*91f16700Schasinglulu * plug-in component to the Secure Monitor, registered as a runtime service. The 11*91f16700Schasinglulu * SPD is expected to be a functional extension of the Secure Payload (SP) that 12*91f16700Schasinglulu * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 13*91f16700Schasinglulu * the Trusted OS/Applications range to the dispatcher. The SPD will either 14*91f16700Schasinglulu * handle the request locally or delegate it to the Secure Payload. It is also 15*91f16700Schasinglulu * responsible for initialising and maintaining communication with the SP. 16*91f16700Schasinglulu ******************************************************************************/ 17*91f16700Schasinglulu #include <assert.h> 18*91f16700Schasinglulu #include <errno.h> 19*91f16700Schasinglulu #include <stddef.h> 20*91f16700Schasinglulu #include <string.h> 21*91f16700Schasinglulu 22*91f16700Schasinglulu #include <arch_helpers.h> 23*91f16700Schasinglulu #include <bl31/bl31.h> 24*91f16700Schasinglulu #include <bl31/ehf.h> 25*91f16700Schasinglulu #include <bl32/tsp/tsp.h> 26*91f16700Schasinglulu #include <common/bl_common.h> 27*91f16700Schasinglulu #include <common/debug.h> 28*91f16700Schasinglulu #include <common/runtime_svc.h> 29*91f16700Schasinglulu #include <lib/el3_runtime/context_mgmt.h> 30*91f16700Schasinglulu #include <plat/common/platform.h> 31*91f16700Schasinglulu #include <tools_share/uuid.h> 32*91f16700Schasinglulu 33*91f16700Schasinglulu #include "tspd_private.h" 34*91f16700Schasinglulu 35*91f16700Schasinglulu /******************************************************************************* 36*91f16700Schasinglulu * Address of the entrypoint vector table in the Secure Payload. It is 37*91f16700Schasinglulu * initialised once on the primary core after a cold boot. 38*91f16700Schasinglulu ******************************************************************************/ 39*91f16700Schasinglulu tsp_vectors_t *tsp_vectors; 40*91f16700Schasinglulu 41*91f16700Schasinglulu /******************************************************************************* 42*91f16700Schasinglulu * Array to keep track of per-cpu Secure Payload state 43*91f16700Schasinglulu ******************************************************************************/ 44*91f16700Schasinglulu tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 45*91f16700Schasinglulu 46*91f16700Schasinglulu 47*91f16700Schasinglulu /* TSP UID */ 48*91f16700Schasinglulu DEFINE_SVC_UUID2(tsp_uuid, 49*91f16700Schasinglulu 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11, 50*91f16700Schasinglulu 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 51*91f16700Schasinglulu 52*91f16700Schasinglulu int32_t tspd_init(void); 53*91f16700Schasinglulu 54*91f16700Schasinglulu /* 55*91f16700Schasinglulu * This helper function handles Secure EL1 preemption. The preemption could be 56*91f16700Schasinglulu * due Non Secure interrupts or EL3 interrupts. In both the cases we context 57*91f16700Schasinglulu * switch to the normal world and in case of EL3 interrupts, it will again be 58*91f16700Schasinglulu * routed to EL3 which will get handled at the exception vectors. 59*91f16700Schasinglulu */ 60*91f16700Schasinglulu uint64_t tspd_handle_sp_preemption(void *handle) 61*91f16700Schasinglulu { 62*91f16700Schasinglulu cpu_context_t *ns_cpu_context; 63*91f16700Schasinglulu 64*91f16700Schasinglulu assert(handle == cm_get_context(SECURE)); 65*91f16700Schasinglulu cm_el1_sysregs_context_save(SECURE); 66*91f16700Schasinglulu /* Get a reference to the non-secure context */ 67*91f16700Schasinglulu ns_cpu_context = cm_get_context(NON_SECURE); 68*91f16700Schasinglulu assert(ns_cpu_context); 69*91f16700Schasinglulu 70*91f16700Schasinglulu /* 71*91f16700Schasinglulu * To allow Secure EL1 interrupt handler to re-enter TSP while TSP 72*91f16700Schasinglulu * is preempted, the secure system register context which will get 73*91f16700Schasinglulu * overwritten must be additionally saved. This is currently done 74*91f16700Schasinglulu * by the TSPD S-EL1 interrupt handler. 75*91f16700Schasinglulu */ 76*91f16700Schasinglulu 77*91f16700Schasinglulu /* 78*91f16700Schasinglulu * Restore non-secure state. 79*91f16700Schasinglulu */ 80*91f16700Schasinglulu cm_el1_sysregs_context_restore(NON_SECURE); 81*91f16700Schasinglulu cm_set_next_eret_context(NON_SECURE); 82*91f16700Schasinglulu 83*91f16700Schasinglulu /* 84*91f16700Schasinglulu * The TSP was preempted during execution of a Yielding SMC Call. 85*91f16700Schasinglulu * Return back to the normal world with SMC_PREEMPTED as error 86*91f16700Schasinglulu * code in x0. 87*91f16700Schasinglulu */ 88*91f16700Schasinglulu SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 89*91f16700Schasinglulu } 90*91f16700Schasinglulu 91*91f16700Schasinglulu /******************************************************************************* 92*91f16700Schasinglulu * This function is the handler registered for S-EL1 interrupts by the TSPD. It 93*91f16700Schasinglulu * validates the interrupt and upon success arranges entry into the TSP at 94*91f16700Schasinglulu * 'tsp_sel1_intr_entry()' for handling the interrupt. 95*91f16700Schasinglulu * Typically, interrupts for a specific security state get handled in the same 96*91f16700Schasinglulu * security execption level if the execution is in the same security state. For 97*91f16700Schasinglulu * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2 98*91f16700Schasinglulu * it gets handled in the non-secure world. 99*91f16700Schasinglulu * However, interrupts belonging to the opposite security state typically demand 100*91f16700Schasinglulu * a world(context) switch. This is inline with the security principle which 101*91f16700Schasinglulu * states a secure interrupt has to be handled in the secure world. 102*91f16700Schasinglulu * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to 103*91f16700Schasinglulu * be non-secure and vice versa. 104*91f16700Schasinglulu * However, a race condition between non-secure and secure interrupts can lead to 105*91f16700Schasinglulu * a scenario where the above assumptions do not hold true. This is demonstrated 106*91f16700Schasinglulu * below through Note 1. 107*91f16700Schasinglulu ******************************************************************************/ 108*91f16700Schasinglulu static uint64_t tspd_sel1_interrupt_handler(uint32_t id, 109*91f16700Schasinglulu uint32_t flags, 110*91f16700Schasinglulu void *handle, 111*91f16700Schasinglulu void *cookie) 112*91f16700Schasinglulu { 113*91f16700Schasinglulu uint32_t linear_id; 114*91f16700Schasinglulu tsp_context_t *tsp_ctx; 115*91f16700Schasinglulu 116*91f16700Schasinglulu /* Get a reference to this cpu's TSP context */ 117*91f16700Schasinglulu linear_id = plat_my_core_pos(); 118*91f16700Schasinglulu tsp_ctx = &tspd_sp_context[linear_id]; 119*91f16700Schasinglulu 120*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 121*91f16700Schasinglulu 122*91f16700Schasinglulu /* 123*91f16700Schasinglulu * Note 1: 124*91f16700Schasinglulu * Under the current interrupt routing model, interrupts from other 125*91f16700Schasinglulu * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled. 126*91f16700Schasinglulu * Consider the following scenario: 127*91f16700Schasinglulu * 1/ A non-secure payload(like tftf) requests a secure service from 128*91f16700Schasinglulu * TSP by invoking a yielding SMC call. 129*91f16700Schasinglulu * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP 130*91f16700Schasinglulu * Dispatcher in Secure Monitor(EL3). 131*91f16700Schasinglulu * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired. 132*91f16700Schasinglulu * this demands a context switch to the non-secure world through 133*91f16700Schasinglulu * secure monitor. 134*91f16700Schasinglulu * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and 135*91f16700Schasinglulu * execution switches to secure monitor(EL3). 136*91f16700Schasinglulu * 5/ EL3 tries to triage the (Non-secure) interrupt based on the 137*91f16700Schasinglulu * highest pending interrupt. 138*91f16700Schasinglulu * 6/ However, while the NS Interrupt was pending, secure timer gets 139*91f16700Schasinglulu * fired which makes a S-EL1 interrupt to be pending. 140*91f16700Schasinglulu * 7/ Hence, execution jumps to this companion handler of S-EL1 141*91f16700Schasinglulu * interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP 142*91f16700Schasinglulu * was pre-empted due to non-secure interrupt. 143*91f16700Schasinglulu * 8/ The above sequence of events explain how TSP was pre-empted by 144*91f16700Schasinglulu * S-EL1 interrupt indirectly in an asynchronous way. 145*91f16700Schasinglulu * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a 146*91f16700Schasinglulu * boolean variable per each core. 147*91f16700Schasinglulu * 10/ This helps us to indicate that SMC call for TSP service was 148*91f16700Schasinglulu * pre-empted when execution resumes in non-secure world. 149*91f16700Schasinglulu */ 150*91f16700Schasinglulu 151*91f16700Schasinglulu /* Check the security state when the exception was generated */ 152*91f16700Schasinglulu if (get_interrupt_src_ss(flags) == NON_SECURE) { 153*91f16700Schasinglulu /* Sanity check the pointer to this cpu's context */ 154*91f16700Schasinglulu assert(handle == cm_get_context(NON_SECURE)); 155*91f16700Schasinglulu 156*91f16700Schasinglulu /* Save the non-secure context before entering the TSP */ 157*91f16700Schasinglulu cm_el1_sysregs_context_save(NON_SECURE); 158*91f16700Schasinglulu tsp_ctx->preempted_by_sel1_intr = false; 159*91f16700Schasinglulu } else { 160*91f16700Schasinglulu /* Sanity check the pointer to this cpu's context */ 161*91f16700Schasinglulu assert(handle == cm_get_context(SECURE)); 162*91f16700Schasinglulu 163*91f16700Schasinglulu /* Save the secure context before entering the TSP for S-EL1 164*91f16700Schasinglulu * interrupt handling 165*91f16700Schasinglulu */ 166*91f16700Schasinglulu cm_el1_sysregs_context_save(SECURE); 167*91f16700Schasinglulu tsp_ctx->preempted_by_sel1_intr = true; 168*91f16700Schasinglulu } 169*91f16700Schasinglulu #else 170*91f16700Schasinglulu /* Check the security state when the exception was generated */ 171*91f16700Schasinglulu assert(get_interrupt_src_ss(flags) == NON_SECURE); 172*91f16700Schasinglulu 173*91f16700Schasinglulu /* Sanity check the pointer to this cpu's context */ 174*91f16700Schasinglulu assert(handle == cm_get_context(NON_SECURE)); 175*91f16700Schasinglulu 176*91f16700Schasinglulu /* Save the non-secure context before entering the TSP */ 177*91f16700Schasinglulu cm_el1_sysregs_context_save(NON_SECURE); 178*91f16700Schasinglulu #endif 179*91f16700Schasinglulu 180*91f16700Schasinglulu assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 181*91f16700Schasinglulu 182*91f16700Schasinglulu /* 183*91f16700Schasinglulu * Determine if the TSP was previously preempted. Its last known 184*91f16700Schasinglulu * context has to be preserved in this case. 185*91f16700Schasinglulu * The TSP should return control to the TSPD after handling this 186*91f16700Schasinglulu * S-EL1 interrupt. Preserve essential EL3 context to allow entry into 187*91f16700Schasinglulu * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' 188*91f16700Schasinglulu * structure. There is no need to save the secure system register 189*91f16700Schasinglulu * context since the TSP is supposed to preserve it during S-EL1 190*91f16700Schasinglulu * interrupt handling. 191*91f16700Schasinglulu */ 192*91f16700Schasinglulu if (get_yield_smc_active_flag(tsp_ctx->state)) { 193*91f16700Schasinglulu tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx, 194*91f16700Schasinglulu CTX_SPSR_EL3); 195*91f16700Schasinglulu tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 196*91f16700Schasinglulu CTX_ELR_EL3); 197*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 198*91f16700Schasinglulu memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); 199*91f16700Schasinglulu #endif 200*91f16700Schasinglulu } 201*91f16700Schasinglulu 202*91f16700Schasinglulu cm_el1_sysregs_context_restore(SECURE); 203*91f16700Schasinglulu cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, 204*91f16700Schasinglulu SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 205*91f16700Schasinglulu 206*91f16700Schasinglulu cm_set_next_eret_context(SECURE); 207*91f16700Schasinglulu 208*91f16700Schasinglulu /* 209*91f16700Schasinglulu * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. 210*91f16700Schasinglulu * Also the instruction in normal world where the interrupt was 211*91f16700Schasinglulu * generated is passed for debugging purposes. It is safe to retrieve 212*91f16700Schasinglulu * this address from ELR_EL3 as the secure context will not take effect 213*91f16700Schasinglulu * until el3_exit(). 214*91f16700Schasinglulu */ 215*91f16700Schasinglulu SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); 216*91f16700Schasinglulu } 217*91f16700Schasinglulu 218*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 219*91f16700Schasinglulu /******************************************************************************* 220*91f16700Schasinglulu * This function is the handler registered for Non secure interrupts by the 221*91f16700Schasinglulu * TSPD. It validates the interrupt and upon success arranges entry into the 222*91f16700Schasinglulu * normal world for handling the interrupt. 223*91f16700Schasinglulu ******************************************************************************/ 224*91f16700Schasinglulu static uint64_t tspd_ns_interrupt_handler(uint32_t id, 225*91f16700Schasinglulu uint32_t flags, 226*91f16700Schasinglulu void *handle, 227*91f16700Schasinglulu void *cookie) 228*91f16700Schasinglulu { 229*91f16700Schasinglulu /* Check the security state when the exception was generated */ 230*91f16700Schasinglulu assert(get_interrupt_src_ss(flags) == SECURE); 231*91f16700Schasinglulu 232*91f16700Schasinglulu /* 233*91f16700Schasinglulu * Disable the routing of NS interrupts from secure world to EL3 while 234*91f16700Schasinglulu * interrupted on this core. 235*91f16700Schasinglulu */ 236*91f16700Schasinglulu disable_intr_rm_local(INTR_TYPE_NS, SECURE); 237*91f16700Schasinglulu 238*91f16700Schasinglulu return tspd_handle_sp_preemption(handle); 239*91f16700Schasinglulu } 240*91f16700Schasinglulu #endif 241*91f16700Schasinglulu 242*91f16700Schasinglulu /******************************************************************************* 243*91f16700Schasinglulu * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 244*91f16700Schasinglulu * (aarch32/aarch64) if not already known and initialises the context for entry 245*91f16700Schasinglulu * into the SP for its initialisation. 246*91f16700Schasinglulu ******************************************************************************/ 247*91f16700Schasinglulu static int32_t tspd_setup(void) 248*91f16700Schasinglulu { 249*91f16700Schasinglulu entry_point_info_t *tsp_ep_info; 250*91f16700Schasinglulu uint32_t linear_id; 251*91f16700Schasinglulu 252*91f16700Schasinglulu linear_id = plat_my_core_pos(); 253*91f16700Schasinglulu 254*91f16700Schasinglulu /* 255*91f16700Schasinglulu * Get information about the Secure Payload (BL32) image. Its 256*91f16700Schasinglulu * absence is a critical failure. TODO: Add support to 257*91f16700Schasinglulu * conditionally include the SPD service 258*91f16700Schasinglulu */ 259*91f16700Schasinglulu tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 260*91f16700Schasinglulu if (!tsp_ep_info) { 261*91f16700Schasinglulu WARN("No TSP provided by BL2 boot loader, Booting device" 262*91f16700Schasinglulu " without TSP initialization. SMC`s destined for TSP" 263*91f16700Schasinglulu " will return SMC_UNK\n"); 264*91f16700Schasinglulu return 1; 265*91f16700Schasinglulu } 266*91f16700Schasinglulu 267*91f16700Schasinglulu /* 268*91f16700Schasinglulu * If there's no valid entry point for SP, we return a non-zero value 269*91f16700Schasinglulu * signalling failure initializing the service. We bail out without 270*91f16700Schasinglulu * registering any handlers 271*91f16700Schasinglulu */ 272*91f16700Schasinglulu if (!tsp_ep_info->pc) 273*91f16700Schasinglulu return 1; 274*91f16700Schasinglulu 275*91f16700Schasinglulu /* 276*91f16700Schasinglulu * We could inspect the SP image and determine its execution 277*91f16700Schasinglulu * state i.e whether AArch32 or AArch64. Assuming it's AArch64 278*91f16700Schasinglulu * for the time being. 279*91f16700Schasinglulu */ 280*91f16700Schasinglulu tspd_init_tsp_ep_state(tsp_ep_info, 281*91f16700Schasinglulu TSP_AARCH64, 282*91f16700Schasinglulu tsp_ep_info->pc, 283*91f16700Schasinglulu &tspd_sp_context[linear_id]); 284*91f16700Schasinglulu 285*91f16700Schasinglulu #if TSP_INIT_ASYNC 286*91f16700Schasinglulu bl31_set_next_image_type(SECURE); 287*91f16700Schasinglulu #else 288*91f16700Schasinglulu /* 289*91f16700Schasinglulu * All TSPD initialization done. Now register our init function with 290*91f16700Schasinglulu * BL31 for deferred invocation 291*91f16700Schasinglulu */ 292*91f16700Schasinglulu bl31_register_bl32_init(&tspd_init); 293*91f16700Schasinglulu #endif 294*91f16700Schasinglulu return 0; 295*91f16700Schasinglulu } 296*91f16700Schasinglulu 297*91f16700Schasinglulu /******************************************************************************* 298*91f16700Schasinglulu * This function passes control to the Secure Payload image (BL32) for the first 299*91f16700Schasinglulu * time on the primary cpu after a cold boot. It assumes that a valid secure 300*91f16700Schasinglulu * context has already been created by tspd_setup() which can be directly used. 301*91f16700Schasinglulu * It also assumes that a valid non-secure context has been initialised by PSCI 302*91f16700Schasinglulu * so it does not need to save and restore any non-secure state. This function 303*91f16700Schasinglulu * performs a synchronous entry into the Secure payload. The SP passes control 304*91f16700Schasinglulu * back to this routine through a SMC. 305*91f16700Schasinglulu ******************************************************************************/ 306*91f16700Schasinglulu int32_t tspd_init(void) 307*91f16700Schasinglulu { 308*91f16700Schasinglulu uint32_t linear_id = plat_my_core_pos(); 309*91f16700Schasinglulu tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 310*91f16700Schasinglulu entry_point_info_t *tsp_entry_point; 311*91f16700Schasinglulu uint64_t rc; 312*91f16700Schasinglulu 313*91f16700Schasinglulu /* 314*91f16700Schasinglulu * Get information about the Secure Payload (BL32) image. Its 315*91f16700Schasinglulu * absence is a critical failure. 316*91f16700Schasinglulu */ 317*91f16700Schasinglulu tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 318*91f16700Schasinglulu assert(tsp_entry_point); 319*91f16700Schasinglulu 320*91f16700Schasinglulu cm_init_my_context(tsp_entry_point); 321*91f16700Schasinglulu 322*91f16700Schasinglulu /* 323*91f16700Schasinglulu * Arrange for an entry into the test secure payload. It will be 324*91f16700Schasinglulu * returned via TSP_ENTRY_DONE case 325*91f16700Schasinglulu */ 326*91f16700Schasinglulu rc = tspd_synchronous_sp_entry(tsp_ctx); 327*91f16700Schasinglulu assert(rc != 0); 328*91f16700Schasinglulu 329*91f16700Schasinglulu return rc; 330*91f16700Schasinglulu } 331*91f16700Schasinglulu 332*91f16700Schasinglulu 333*91f16700Schasinglulu /******************************************************************************* 334*91f16700Schasinglulu * This function is responsible for handling all SMCs in the Trusted OS/App 335*91f16700Schasinglulu * range from the non-secure state as defined in the SMC Calling Convention 336*91f16700Schasinglulu * Document. It is also responsible for communicating with the Secure payload 337*91f16700Schasinglulu * to delegate work and return results back to the non-secure state. Lastly it 338*91f16700Schasinglulu * will also return any information that the secure payload needs to do the 339*91f16700Schasinglulu * work assigned to it. 340*91f16700Schasinglulu ******************************************************************************/ 341*91f16700Schasinglulu static uintptr_t tspd_smc_handler(uint32_t smc_fid, 342*91f16700Schasinglulu u_register_t x1, 343*91f16700Schasinglulu u_register_t x2, 344*91f16700Schasinglulu u_register_t x3, 345*91f16700Schasinglulu u_register_t x4, 346*91f16700Schasinglulu void *cookie, 347*91f16700Schasinglulu void *handle, 348*91f16700Schasinglulu u_register_t flags) 349*91f16700Schasinglulu { 350*91f16700Schasinglulu cpu_context_t *ns_cpu_context; 351*91f16700Schasinglulu uint32_t linear_id = plat_my_core_pos(), ns; 352*91f16700Schasinglulu tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 353*91f16700Schasinglulu uint64_t rc; 354*91f16700Schasinglulu #if TSP_INIT_ASYNC 355*91f16700Schasinglulu entry_point_info_t *next_image_info; 356*91f16700Schasinglulu #endif 357*91f16700Schasinglulu 358*91f16700Schasinglulu /* Determine which security state this SMC originated from */ 359*91f16700Schasinglulu ns = is_caller_non_secure(flags); 360*91f16700Schasinglulu 361*91f16700Schasinglulu switch (smc_fid) { 362*91f16700Schasinglulu 363*91f16700Schasinglulu /* 364*91f16700Schasinglulu * This function ID is used by TSP to indicate that it was 365*91f16700Schasinglulu * preempted by a normal world IRQ. 366*91f16700Schasinglulu * 367*91f16700Schasinglulu */ 368*91f16700Schasinglulu case TSP_PREEMPTED: 369*91f16700Schasinglulu if (ns) 370*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 371*91f16700Schasinglulu 372*91f16700Schasinglulu return tspd_handle_sp_preemption(handle); 373*91f16700Schasinglulu 374*91f16700Schasinglulu /* 375*91f16700Schasinglulu * This function ID is used only by the TSP to indicate that it has 376*91f16700Schasinglulu * finished handling a S-EL1 interrupt or was preempted by a higher 377*91f16700Schasinglulu * priority pending EL3 interrupt. Execution should resume 378*91f16700Schasinglulu * in the normal world. 379*91f16700Schasinglulu */ 380*91f16700Schasinglulu case TSP_HANDLED_S_EL1_INTR: 381*91f16700Schasinglulu if (ns) 382*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 383*91f16700Schasinglulu 384*91f16700Schasinglulu assert(handle == cm_get_context(SECURE)); 385*91f16700Schasinglulu 386*91f16700Schasinglulu /* 387*91f16700Schasinglulu * Restore the relevant EL3 state which saved to service 388*91f16700Schasinglulu * this SMC. 389*91f16700Schasinglulu */ 390*91f16700Schasinglulu if (get_yield_smc_active_flag(tsp_ctx->state)) { 391*91f16700Schasinglulu SMC_SET_EL3(&tsp_ctx->cpu_ctx, 392*91f16700Schasinglulu CTX_SPSR_EL3, 393*91f16700Schasinglulu tsp_ctx->saved_spsr_el3); 394*91f16700Schasinglulu SMC_SET_EL3(&tsp_ctx->cpu_ctx, 395*91f16700Schasinglulu CTX_ELR_EL3, 396*91f16700Schasinglulu tsp_ctx->saved_elr_el3); 397*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 398*91f16700Schasinglulu /* 399*91f16700Schasinglulu * Need to restore the previously interrupted 400*91f16700Schasinglulu * secure context. 401*91f16700Schasinglulu */ 402*91f16700Schasinglulu memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, 403*91f16700Schasinglulu TSPD_SP_CTX_SIZE); 404*91f16700Schasinglulu #endif 405*91f16700Schasinglulu } 406*91f16700Schasinglulu 407*91f16700Schasinglulu /* Get a reference to the non-secure context */ 408*91f16700Schasinglulu ns_cpu_context = cm_get_context(NON_SECURE); 409*91f16700Schasinglulu assert(ns_cpu_context); 410*91f16700Schasinglulu 411*91f16700Schasinglulu /* 412*91f16700Schasinglulu * Restore non-secure state. There is no need to save the 413*91f16700Schasinglulu * secure system register context since the TSP was supposed 414*91f16700Schasinglulu * to preserve it during S-EL1 interrupt handling. 415*91f16700Schasinglulu */ 416*91f16700Schasinglulu cm_el1_sysregs_context_restore(NON_SECURE); 417*91f16700Schasinglulu cm_set_next_eret_context(NON_SECURE); 418*91f16700Schasinglulu 419*91f16700Schasinglulu /* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/ 420*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 421*91f16700Schasinglulu if (tsp_ctx->preempted_by_sel1_intr) { 422*91f16700Schasinglulu /* Reset the flag */ 423*91f16700Schasinglulu tsp_ctx->preempted_by_sel1_intr = false; 424*91f16700Schasinglulu 425*91f16700Schasinglulu SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 426*91f16700Schasinglulu } else { 427*91f16700Schasinglulu SMC_RET0((uint64_t) ns_cpu_context); 428*91f16700Schasinglulu } 429*91f16700Schasinglulu #else 430*91f16700Schasinglulu SMC_RET0((uint64_t) ns_cpu_context); 431*91f16700Schasinglulu #endif 432*91f16700Schasinglulu 433*91f16700Schasinglulu 434*91f16700Schasinglulu /* 435*91f16700Schasinglulu * This function ID is used only by the SP to indicate it has 436*91f16700Schasinglulu * finished initialising itself after a cold boot 437*91f16700Schasinglulu */ 438*91f16700Schasinglulu case TSP_ENTRY_DONE: 439*91f16700Schasinglulu if (ns) 440*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 441*91f16700Schasinglulu 442*91f16700Schasinglulu /* 443*91f16700Schasinglulu * Stash the SP entry points information. This is done 444*91f16700Schasinglulu * only once on the primary cpu 445*91f16700Schasinglulu */ 446*91f16700Schasinglulu assert(tsp_vectors == NULL); 447*91f16700Schasinglulu tsp_vectors = (tsp_vectors_t *) x1; 448*91f16700Schasinglulu 449*91f16700Schasinglulu if (tsp_vectors) { 450*91f16700Schasinglulu set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); 451*91f16700Schasinglulu 452*91f16700Schasinglulu /* 453*91f16700Schasinglulu * TSP has been successfully initialized. Register power 454*91f16700Schasinglulu * management hooks with PSCI 455*91f16700Schasinglulu */ 456*91f16700Schasinglulu psci_register_spd_pm_hook(&tspd_pm); 457*91f16700Schasinglulu 458*91f16700Schasinglulu /* 459*91f16700Schasinglulu * Register an interrupt handler for S-EL1 interrupts 460*91f16700Schasinglulu * when generated during code executing in the 461*91f16700Schasinglulu * non-secure state. 462*91f16700Schasinglulu */ 463*91f16700Schasinglulu flags = 0; 464*91f16700Schasinglulu set_interrupt_rm_flag(flags, NON_SECURE); 465*91f16700Schasinglulu rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 466*91f16700Schasinglulu tspd_sel1_interrupt_handler, 467*91f16700Schasinglulu flags); 468*91f16700Schasinglulu if (rc) 469*91f16700Schasinglulu panic(); 470*91f16700Schasinglulu 471*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 472*91f16700Schasinglulu /* 473*91f16700Schasinglulu * Register an interrupt handler for NS interrupts when 474*91f16700Schasinglulu * generated during code executing in secure state are 475*91f16700Schasinglulu * routed to EL3. 476*91f16700Schasinglulu */ 477*91f16700Schasinglulu flags = 0; 478*91f16700Schasinglulu set_interrupt_rm_flag(flags, SECURE); 479*91f16700Schasinglulu 480*91f16700Schasinglulu rc = register_interrupt_type_handler(INTR_TYPE_NS, 481*91f16700Schasinglulu tspd_ns_interrupt_handler, 482*91f16700Schasinglulu flags); 483*91f16700Schasinglulu if (rc) 484*91f16700Schasinglulu panic(); 485*91f16700Schasinglulu 486*91f16700Schasinglulu /* 487*91f16700Schasinglulu * Disable the NS interrupt locally. 488*91f16700Schasinglulu */ 489*91f16700Schasinglulu disable_intr_rm_local(INTR_TYPE_NS, SECURE); 490*91f16700Schasinglulu #endif 491*91f16700Schasinglulu } 492*91f16700Schasinglulu 493*91f16700Schasinglulu 494*91f16700Schasinglulu #if TSP_INIT_ASYNC 495*91f16700Schasinglulu /* Save the Secure EL1 system register context */ 496*91f16700Schasinglulu assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 497*91f16700Schasinglulu cm_el1_sysregs_context_save(SECURE); 498*91f16700Schasinglulu 499*91f16700Schasinglulu /* Program EL3 registers to enable entry into the next EL */ 500*91f16700Schasinglulu next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); 501*91f16700Schasinglulu assert(next_image_info); 502*91f16700Schasinglulu assert(NON_SECURE == 503*91f16700Schasinglulu GET_SECURITY_STATE(next_image_info->h.attr)); 504*91f16700Schasinglulu 505*91f16700Schasinglulu cm_init_my_context(next_image_info); 506*91f16700Schasinglulu cm_prepare_el3_exit(NON_SECURE); 507*91f16700Schasinglulu SMC_RET0(cm_get_context(NON_SECURE)); 508*91f16700Schasinglulu #else 509*91f16700Schasinglulu /* 510*91f16700Schasinglulu * SP reports completion. The SPD must have initiated 511*91f16700Schasinglulu * the original request through a synchronous entry 512*91f16700Schasinglulu * into the SP. Jump back to the original C runtime 513*91f16700Schasinglulu * context. 514*91f16700Schasinglulu */ 515*91f16700Schasinglulu tspd_synchronous_sp_exit(tsp_ctx, x1); 516*91f16700Schasinglulu break; 517*91f16700Schasinglulu #endif 518*91f16700Schasinglulu /* 519*91f16700Schasinglulu * This function ID is used only by the SP to indicate it has finished 520*91f16700Schasinglulu * aborting a preempted Yielding SMC Call. 521*91f16700Schasinglulu */ 522*91f16700Schasinglulu case TSP_ABORT_DONE: 523*91f16700Schasinglulu 524*91f16700Schasinglulu /* 525*91f16700Schasinglulu * These function IDs are used only by the SP to indicate it has 526*91f16700Schasinglulu * finished: 527*91f16700Schasinglulu * 1. turning itself on in response to an earlier psci 528*91f16700Schasinglulu * cpu_on request 529*91f16700Schasinglulu * 2. resuming itself after an earlier psci cpu_suspend 530*91f16700Schasinglulu * request. 531*91f16700Schasinglulu */ 532*91f16700Schasinglulu case TSP_ON_DONE: 533*91f16700Schasinglulu case TSP_RESUME_DONE: 534*91f16700Schasinglulu 535*91f16700Schasinglulu /* 536*91f16700Schasinglulu * These function IDs are used only by the SP to indicate it has 537*91f16700Schasinglulu * finished: 538*91f16700Schasinglulu * 1. suspending itself after an earlier psci cpu_suspend 539*91f16700Schasinglulu * request. 540*91f16700Schasinglulu * 2. turning itself off in response to an earlier psci 541*91f16700Schasinglulu * cpu_off request. 542*91f16700Schasinglulu */ 543*91f16700Schasinglulu case TSP_OFF_DONE: 544*91f16700Schasinglulu case TSP_SUSPEND_DONE: 545*91f16700Schasinglulu case TSP_SYSTEM_OFF_DONE: 546*91f16700Schasinglulu case TSP_SYSTEM_RESET_DONE: 547*91f16700Schasinglulu if (ns) 548*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 549*91f16700Schasinglulu 550*91f16700Schasinglulu /* 551*91f16700Schasinglulu * SP reports completion. The SPD must have initiated the 552*91f16700Schasinglulu * original request through a synchronous entry into the SP. 553*91f16700Schasinglulu * Jump back to the original C runtime context, and pass x1 as 554*91f16700Schasinglulu * return value to the caller 555*91f16700Schasinglulu */ 556*91f16700Schasinglulu tspd_synchronous_sp_exit(tsp_ctx, x1); 557*91f16700Schasinglulu break; 558*91f16700Schasinglulu 559*91f16700Schasinglulu /* 560*91f16700Schasinglulu * Request from non-secure client to perform an 561*91f16700Schasinglulu * arithmetic operation or response from secure 562*91f16700Schasinglulu * payload to an earlier request. 563*91f16700Schasinglulu */ 564*91f16700Schasinglulu case TSP_FAST_FID(TSP_ADD): 565*91f16700Schasinglulu case TSP_FAST_FID(TSP_SUB): 566*91f16700Schasinglulu case TSP_FAST_FID(TSP_MUL): 567*91f16700Schasinglulu case TSP_FAST_FID(TSP_DIV): 568*91f16700Schasinglulu 569*91f16700Schasinglulu case TSP_YIELD_FID(TSP_ADD): 570*91f16700Schasinglulu case TSP_YIELD_FID(TSP_SUB): 571*91f16700Schasinglulu case TSP_YIELD_FID(TSP_MUL): 572*91f16700Schasinglulu case TSP_YIELD_FID(TSP_DIV): 573*91f16700Schasinglulu /* 574*91f16700Schasinglulu * Request from non-secure client to perform a check 575*91f16700Schasinglulu * of the DIT PSTATE bit. 576*91f16700Schasinglulu */ 577*91f16700Schasinglulu case TSP_YIELD_FID(TSP_CHECK_DIT): 578*91f16700Schasinglulu if (ns) { 579*91f16700Schasinglulu /* 580*91f16700Schasinglulu * This is a fresh request from the non-secure client. 581*91f16700Schasinglulu * The parameters are in x1 and x2. Figure out which 582*91f16700Schasinglulu * registers need to be preserved, save the non-secure 583*91f16700Schasinglulu * state and send the request to the secure payload. 584*91f16700Schasinglulu */ 585*91f16700Schasinglulu assert(handle == cm_get_context(NON_SECURE)); 586*91f16700Schasinglulu 587*91f16700Schasinglulu /* Check if we are already preempted */ 588*91f16700Schasinglulu if (get_yield_smc_active_flag(tsp_ctx->state)) 589*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 590*91f16700Schasinglulu 591*91f16700Schasinglulu cm_el1_sysregs_context_save(NON_SECURE); 592*91f16700Schasinglulu 593*91f16700Schasinglulu /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 594*91f16700Schasinglulu store_tsp_args(tsp_ctx, x1, x2); 595*91f16700Schasinglulu 596*91f16700Schasinglulu /* 597*91f16700Schasinglulu * We are done stashing the non-secure context. Ask the 598*91f16700Schasinglulu * secure payload to do the work now. 599*91f16700Schasinglulu */ 600*91f16700Schasinglulu 601*91f16700Schasinglulu /* 602*91f16700Schasinglulu * Verify if there is a valid context to use, copy the 603*91f16700Schasinglulu * operation type and parameters to the secure context 604*91f16700Schasinglulu * and jump to the fast smc entry point in the secure 605*91f16700Schasinglulu * payload. Entry into S-EL1 will take place upon exit 606*91f16700Schasinglulu * from this function. 607*91f16700Schasinglulu */ 608*91f16700Schasinglulu assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 609*91f16700Schasinglulu 610*91f16700Schasinglulu /* Set appropriate entry for SMC. 611*91f16700Schasinglulu * We expect the TSP to manage the PSTATE.I and PSTATE.F 612*91f16700Schasinglulu * flags as appropriate. 613*91f16700Schasinglulu */ 614*91f16700Schasinglulu if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 615*91f16700Schasinglulu cm_set_elr_el3(SECURE, (uint64_t) 616*91f16700Schasinglulu &tsp_vectors->fast_smc_entry); 617*91f16700Schasinglulu } else { 618*91f16700Schasinglulu set_yield_smc_active_flag(tsp_ctx->state); 619*91f16700Schasinglulu cm_set_elr_el3(SECURE, (uint64_t) 620*91f16700Schasinglulu &tsp_vectors->yield_smc_entry); 621*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 622*91f16700Schasinglulu /* 623*91f16700Schasinglulu * Enable the routing of NS interrupts to EL3 624*91f16700Schasinglulu * during processing of a Yielding SMC Call on 625*91f16700Schasinglulu * this core. 626*91f16700Schasinglulu */ 627*91f16700Schasinglulu enable_intr_rm_local(INTR_TYPE_NS, SECURE); 628*91f16700Schasinglulu #endif 629*91f16700Schasinglulu 630*91f16700Schasinglulu #if EL3_EXCEPTION_HANDLING 631*91f16700Schasinglulu /* 632*91f16700Schasinglulu * With EL3 exception handling, while an SMC is 633*91f16700Schasinglulu * being processed, Non-secure interrupts can't 634*91f16700Schasinglulu * preempt Secure execution. However, for 635*91f16700Schasinglulu * yielding SMCs, we want preemption to happen; 636*91f16700Schasinglulu * so explicitly allow NS preemption in this 637*91f16700Schasinglulu * case, and supply the preemption return code 638*91f16700Schasinglulu * for TSP. 639*91f16700Schasinglulu */ 640*91f16700Schasinglulu ehf_allow_ns_preemption(TSP_PREEMPTED); 641*91f16700Schasinglulu #endif 642*91f16700Schasinglulu } 643*91f16700Schasinglulu 644*91f16700Schasinglulu cm_el1_sysregs_context_restore(SECURE); 645*91f16700Schasinglulu cm_set_next_eret_context(SECURE); 646*91f16700Schasinglulu SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); 647*91f16700Schasinglulu } else { 648*91f16700Schasinglulu /* 649*91f16700Schasinglulu * This is the result from the secure client of an 650*91f16700Schasinglulu * earlier request. The results are in x1-x3. Copy it 651*91f16700Schasinglulu * into the non-secure context, save the secure state 652*91f16700Schasinglulu * and return to the non-secure state. 653*91f16700Schasinglulu */ 654*91f16700Schasinglulu assert(handle == cm_get_context(SECURE)); 655*91f16700Schasinglulu cm_el1_sysregs_context_save(SECURE); 656*91f16700Schasinglulu 657*91f16700Schasinglulu /* Get a reference to the non-secure context */ 658*91f16700Schasinglulu ns_cpu_context = cm_get_context(NON_SECURE); 659*91f16700Schasinglulu assert(ns_cpu_context); 660*91f16700Schasinglulu 661*91f16700Schasinglulu /* Restore non-secure state */ 662*91f16700Schasinglulu cm_el1_sysregs_context_restore(NON_SECURE); 663*91f16700Schasinglulu cm_set_next_eret_context(NON_SECURE); 664*91f16700Schasinglulu if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) { 665*91f16700Schasinglulu clr_yield_smc_active_flag(tsp_ctx->state); 666*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 667*91f16700Schasinglulu /* 668*91f16700Schasinglulu * Disable the routing of NS interrupts to EL3 669*91f16700Schasinglulu * after processing of a Yielding SMC Call on 670*91f16700Schasinglulu * this core is finished. 671*91f16700Schasinglulu */ 672*91f16700Schasinglulu disable_intr_rm_local(INTR_TYPE_NS, SECURE); 673*91f16700Schasinglulu #endif 674*91f16700Schasinglulu } 675*91f16700Schasinglulu 676*91f16700Schasinglulu SMC_RET3(ns_cpu_context, x1, x2, x3); 677*91f16700Schasinglulu } 678*91f16700Schasinglulu assert(0); /* Unreachable */ 679*91f16700Schasinglulu 680*91f16700Schasinglulu /* 681*91f16700Schasinglulu * Request from the non-secure world to abort a preempted Yielding SMC 682*91f16700Schasinglulu * Call. 683*91f16700Schasinglulu */ 684*91f16700Schasinglulu case TSP_FID_ABORT: 685*91f16700Schasinglulu /* ABORT should only be invoked by normal world */ 686*91f16700Schasinglulu if (!ns) { 687*91f16700Schasinglulu assert(0); 688*91f16700Schasinglulu break; 689*91f16700Schasinglulu } 690*91f16700Schasinglulu 691*91f16700Schasinglulu assert(handle == cm_get_context(NON_SECURE)); 692*91f16700Schasinglulu cm_el1_sysregs_context_save(NON_SECURE); 693*91f16700Schasinglulu 694*91f16700Schasinglulu /* Abort the preempted SMC request */ 695*91f16700Schasinglulu if (!tspd_abort_preempted_smc(tsp_ctx)) { 696*91f16700Schasinglulu /* 697*91f16700Schasinglulu * If there was no preempted SMC to abort, return 698*91f16700Schasinglulu * SMC_UNK. 699*91f16700Schasinglulu * 700*91f16700Schasinglulu * Restoring the NON_SECURE context is not necessary as 701*91f16700Schasinglulu * the synchronous entry did not take place if the 702*91f16700Schasinglulu * return code of tspd_abort_preempted_smc is zero. 703*91f16700Schasinglulu */ 704*91f16700Schasinglulu cm_set_next_eret_context(NON_SECURE); 705*91f16700Schasinglulu break; 706*91f16700Schasinglulu } 707*91f16700Schasinglulu 708*91f16700Schasinglulu cm_el1_sysregs_context_restore(NON_SECURE); 709*91f16700Schasinglulu cm_set_next_eret_context(NON_SECURE); 710*91f16700Schasinglulu SMC_RET1(handle, SMC_OK); 711*91f16700Schasinglulu 712*91f16700Schasinglulu /* 713*91f16700Schasinglulu * Request from non secure world to resume the preempted 714*91f16700Schasinglulu * Yielding SMC Call. 715*91f16700Schasinglulu */ 716*91f16700Schasinglulu case TSP_FID_RESUME: 717*91f16700Schasinglulu /* RESUME should be invoked only by normal world */ 718*91f16700Schasinglulu if (!ns) { 719*91f16700Schasinglulu assert(0); 720*91f16700Schasinglulu break; 721*91f16700Schasinglulu } 722*91f16700Schasinglulu 723*91f16700Schasinglulu /* 724*91f16700Schasinglulu * This is a resume request from the non-secure client. 725*91f16700Schasinglulu * save the non-secure state and send the request to 726*91f16700Schasinglulu * the secure payload. 727*91f16700Schasinglulu */ 728*91f16700Schasinglulu assert(handle == cm_get_context(NON_SECURE)); 729*91f16700Schasinglulu 730*91f16700Schasinglulu /* Check if we are already preempted before resume */ 731*91f16700Schasinglulu if (!get_yield_smc_active_flag(tsp_ctx->state)) 732*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 733*91f16700Schasinglulu 734*91f16700Schasinglulu cm_el1_sysregs_context_save(NON_SECURE); 735*91f16700Schasinglulu 736*91f16700Schasinglulu /* 737*91f16700Schasinglulu * We are done stashing the non-secure context. Ask the 738*91f16700Schasinglulu * secure payload to do the work now. 739*91f16700Schasinglulu */ 740*91f16700Schasinglulu #if TSP_NS_INTR_ASYNC_PREEMPT 741*91f16700Schasinglulu /* 742*91f16700Schasinglulu * Enable the routing of NS interrupts to EL3 during resumption 743*91f16700Schasinglulu * of a Yielding SMC Call on this core. 744*91f16700Schasinglulu */ 745*91f16700Schasinglulu enable_intr_rm_local(INTR_TYPE_NS, SECURE); 746*91f16700Schasinglulu #endif 747*91f16700Schasinglulu 748*91f16700Schasinglulu #if EL3_EXCEPTION_HANDLING 749*91f16700Schasinglulu /* 750*91f16700Schasinglulu * Allow the resumed yielding SMC processing to be preempted by 751*91f16700Schasinglulu * Non-secure interrupts. Also, supply the preemption return 752*91f16700Schasinglulu * code for TSP. 753*91f16700Schasinglulu */ 754*91f16700Schasinglulu ehf_allow_ns_preemption(TSP_PREEMPTED); 755*91f16700Schasinglulu #endif 756*91f16700Schasinglulu 757*91f16700Schasinglulu /* We just need to return to the preempted point in 758*91f16700Schasinglulu * TSP and the execution will resume as normal. 759*91f16700Schasinglulu */ 760*91f16700Schasinglulu cm_el1_sysregs_context_restore(SECURE); 761*91f16700Schasinglulu cm_set_next_eret_context(SECURE); 762*91f16700Schasinglulu SMC_RET0(&tsp_ctx->cpu_ctx); 763*91f16700Schasinglulu 764*91f16700Schasinglulu /* 765*91f16700Schasinglulu * This is a request from the secure payload for more arguments 766*91f16700Schasinglulu * for an ongoing arithmetic operation requested by the 767*91f16700Schasinglulu * non-secure world. Simply return the arguments from the non- 768*91f16700Schasinglulu * secure client in the original call. 769*91f16700Schasinglulu */ 770*91f16700Schasinglulu case TSP_GET_ARGS: 771*91f16700Schasinglulu if (ns) 772*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 773*91f16700Schasinglulu 774*91f16700Schasinglulu get_tsp_args(tsp_ctx, x1, x2); 775*91f16700Schasinglulu SMC_RET2(handle, x1, x2); 776*91f16700Schasinglulu 777*91f16700Schasinglulu case TOS_CALL_COUNT: 778*91f16700Schasinglulu /* 779*91f16700Schasinglulu * Return the number of service function IDs implemented to 780*91f16700Schasinglulu * provide service to non-secure 781*91f16700Schasinglulu */ 782*91f16700Schasinglulu SMC_RET1(handle, TSP_NUM_FID); 783*91f16700Schasinglulu 784*91f16700Schasinglulu case TOS_UID: 785*91f16700Schasinglulu /* Return TSP UID to the caller */ 786*91f16700Schasinglulu SMC_UUID_RET(handle, tsp_uuid); 787*91f16700Schasinglulu 788*91f16700Schasinglulu case TOS_CALL_VERSION: 789*91f16700Schasinglulu /* Return the version of current implementation */ 790*91f16700Schasinglulu SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 791*91f16700Schasinglulu 792*91f16700Schasinglulu default: 793*91f16700Schasinglulu break; 794*91f16700Schasinglulu } 795*91f16700Schasinglulu 796*91f16700Schasinglulu SMC_RET1(handle, SMC_UNK); 797*91f16700Schasinglulu } 798*91f16700Schasinglulu 799*91f16700Schasinglulu /* Define a SPD runtime service descriptor for fast SMC calls */ 800*91f16700Schasinglulu DECLARE_RT_SVC( 801*91f16700Schasinglulu tspd_fast, 802*91f16700Schasinglulu 803*91f16700Schasinglulu OEN_TOS_START, 804*91f16700Schasinglulu OEN_TOS_END, 805*91f16700Schasinglulu SMC_TYPE_FAST, 806*91f16700Schasinglulu tspd_setup, 807*91f16700Schasinglulu tspd_smc_handler 808*91f16700Schasinglulu ); 809*91f16700Schasinglulu 810*91f16700Schasinglulu /* Define a SPD runtime service descriptor for Yielding SMC Calls */ 811*91f16700Schasinglulu DECLARE_RT_SVC( 812*91f16700Schasinglulu tspd_std, 813*91f16700Schasinglulu 814*91f16700Schasinglulu OEN_TOS_START, 815*91f16700Schasinglulu OEN_TOS_END, 816*91f16700Schasinglulu SMC_TYPE_YIELD, 817*91f16700Schasinglulu NULL, 818*91f16700Schasinglulu tspd_smc_handler 819*91f16700Schasinglulu ); 820