1*91f16700Schasinglulu 2*91f16700Schasinglulu/* 3*91f16700Schasinglulu * Copyright 2018-2021 NXP 4*91f16700Schasinglulu * 5*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 6*91f16700Schasinglulu * 7*91f16700Schasinglulu */ 8*91f16700Schasinglulu 9*91f16700Schasinglulu#include <asm_macros.S> 10*91f16700Schasinglulu#include <assert_macros.S> 11*91f16700Schasinglulu 12*91f16700Schasinglulu#include <lib/psci/psci.h> 13*91f16700Schasinglulu 14*91f16700Schasinglulu#include <bl31_data.h> 15*91f16700Schasinglulu#include <plat_psci.h> 16*91f16700Schasinglulu 17*91f16700Schasinglulu 18*91f16700Schasinglulu#define RESET_RETRY_CNT 800 19*91f16700Schasinglulu#define PSCI_ABORT_CNT 100 20*91f16700Schasinglulu 21*91f16700Schasinglulu#if (SOC_CORE_RELEASE) 22*91f16700Schasinglulu 23*91f16700Schasinglulu.global _psci_cpu_on 24*91f16700Schasinglulu 25*91f16700Schasinglulu/* 26*91f16700Schasinglulu * int _psci_cpu_on(u_register_t core_mask) 27*91f16700Schasinglulu * x0 = target cpu core mask 28*91f16700Schasinglulu * 29*91f16700Schasinglulu * Called from C, so save the non-volatile regs 30*91f16700Schasinglulu * save these as pairs of registers to maintain the 31*91f16700Schasinglulu * required 16-byte alignment on the stack 32*91f16700Schasinglulu * 33*91f16700Schasinglulu */ 34*91f16700Schasinglulu 35*91f16700Schasinglulufunc _psci_cpu_on 36*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 37*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 38*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 39*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 40*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 41*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 42*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 43*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 44*91f16700Schasinglulu 45*91f16700Schasinglulu mov x6, x0 46*91f16700Schasinglulu 47*91f16700Schasinglulu /* x0 = core mask (lsb) 48*91f16700Schasinglulu * x6 = core mask (lsb) 49*91f16700Schasinglulu */ 50*91f16700Schasinglulu 51*91f16700Schasinglulu /* check if core disabled */ 52*91f16700Schasinglulu bl _soc_ck_disabled /* 0-2 */ 53*91f16700Schasinglulu cbnz w0, psci_disabled 54*91f16700Schasinglulu 55*91f16700Schasinglulu /* check core data area to see if core cannot be turned on 56*91f16700Schasinglulu * read the core state 57*91f16700Schasinglulu */ 58*91f16700Schasinglulu mov x0, x6 59*91f16700Schasinglulu bl _getCoreState /* 0-5 */ 60*91f16700Schasinglulu mov x9, x0 61*91f16700Schasinglulu 62*91f16700Schasinglulu /* x6 = core mask (lsb) 63*91f16700Schasinglulu * x9 = core state (from data area) 64*91f16700Schasinglulu */ 65*91f16700Schasinglulu 66*91f16700Schasinglulu cmp x9, #CORE_DISABLED 67*91f16700Schasinglulu mov x0, #PSCI_E_DISABLED 68*91f16700Schasinglulu b.eq cpu_on_done 69*91f16700Schasinglulu 70*91f16700Schasinglulu cmp x9, #CORE_PENDING 71*91f16700Schasinglulu mov x0, #PSCI_E_ON_PENDING 72*91f16700Schasinglulu b.eq cpu_on_done 73*91f16700Schasinglulu 74*91f16700Schasinglulu cmp x9, #CORE_RELEASED 75*91f16700Schasinglulu mov x0, #PSCI_E_ALREADY_ON 76*91f16700Schasinglulu b.eq cpu_on_done 77*91f16700Schasinglulu 78*91f16700Schasinglulu8: 79*91f16700Schasinglulu /* x6 = core mask (lsb) 80*91f16700Schasinglulu * x9 = core state (from data area) 81*91f16700Schasinglulu */ 82*91f16700Schasinglulu 83*91f16700Schasinglulu cmp x9, #CORE_WFE 84*91f16700Schasinglulu b.eq core_in_wfe 85*91f16700Schasinglulu cmp x9, #CORE_IN_RESET 86*91f16700Schasinglulu b.eq core_in_reset 87*91f16700Schasinglulu cmp x9, #CORE_OFF 88*91f16700Schasinglulu b.eq core_is_off 89*91f16700Schasinglulu cmp x9, #CORE_OFF_PENDING 90*91f16700Schasinglulu 91*91f16700Schasinglulu /* if state == CORE_OFF_PENDING, set abort */ 92*91f16700Schasinglulu mov x0, x6 93*91f16700Schasinglulu mov x1, #ABORT_FLAG_DATA 94*91f16700Schasinglulu mov x2, #CORE_ABORT_OP 95*91f16700Schasinglulu bl _setCoreData /* 0-3, [13-15] */ 96*91f16700Schasinglulu 97*91f16700Schasinglulu ldr x3, =PSCI_ABORT_CNT 98*91f16700Schasinglulu7: 99*91f16700Schasinglulu /* watch for abort to take effect */ 100*91f16700Schasinglulu mov x0, x6 101*91f16700Schasinglulu bl _getCoreState /* 0-5 */ 102*91f16700Schasinglulu cmp x0, #CORE_OFF 103*91f16700Schasinglulu b.eq core_is_off 104*91f16700Schasinglulu cmp x0, #CORE_PENDING 105*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 106*91f16700Schasinglulu b.eq cpu_on_done 107*91f16700Schasinglulu 108*91f16700Schasinglulu /* loop til finished */ 109*91f16700Schasinglulu sub x3, x3, #1 110*91f16700Schasinglulu cbnz x3, 7b 111*91f16700Schasinglulu 112*91f16700Schasinglulu /* if we didn't see either CORE_OFF or CORE_PENDING, then this 113*91f16700Schasinglulu * core is in CORE_OFF_PENDING - exit with success, as the core will 114*91f16700Schasinglulu * respond to the abort request 115*91f16700Schasinglulu */ 116*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 117*91f16700Schasinglulu b cpu_on_done 118*91f16700Schasinglulu 119*91f16700Schasinglulu/* this is where we start up a core out of reset */ 120*91f16700Schasinglulucore_in_reset: 121*91f16700Schasinglulu /* see if the soc-specific module supports this op */ 122*91f16700Schasinglulu ldr x7, =SOC_CORE_RELEASE 123*91f16700Schasinglulu cbnz x7, 3f 124*91f16700Schasinglulu 125*91f16700Schasinglulu mov x0, #PSCI_E_NOT_SUPPORTED 126*91f16700Schasinglulu b cpu_on_done 127*91f16700Schasinglulu 128*91f16700Schasinglulu /* x6 = core mask (lsb) */ 129*91f16700Schasinglulu3: 130*91f16700Schasinglulu /* set core state in data area */ 131*91f16700Schasinglulu mov x0, x6 132*91f16700Schasinglulu mov x1, #CORE_PENDING 133*91f16700Schasinglulu bl _setCoreState /* 0-3, [13-15] */ 134*91f16700Schasinglulu 135*91f16700Schasinglulu /* release the core from reset */ 136*91f16700Schasinglulu mov x0, x6 137*91f16700Schasinglulu bl _soc_core_release /* 0-3 */ 138*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 139*91f16700Schasinglulu b cpu_on_done 140*91f16700Schasinglulu 141*91f16700Schasinglulu /* Start up the core that has been powered-down via CPU_OFF 142*91f16700Schasinglulu */ 143*91f16700Schasinglulucore_is_off: 144*91f16700Schasinglulu /* see if the soc-specific module supports this op 145*91f16700Schasinglulu */ 146*91f16700Schasinglulu ldr x7, =SOC_CORE_RESTART 147*91f16700Schasinglulu cbnz x7, 2f 148*91f16700Schasinglulu 149*91f16700Schasinglulu mov x0, #PSCI_E_NOT_SUPPORTED 150*91f16700Schasinglulu b cpu_on_done 151*91f16700Schasinglulu 152*91f16700Schasinglulu /* x6 = core mask (lsb) */ 153*91f16700Schasinglulu2: 154*91f16700Schasinglulu /* set core state in data area */ 155*91f16700Schasinglulu mov x0, x6 156*91f16700Schasinglulu mov x1, #CORE_WAKEUP 157*91f16700Schasinglulu bl _setCoreState /* 0-3, [13-15] */ 158*91f16700Schasinglulu 159*91f16700Schasinglulu /* put the core back into service */ 160*91f16700Schasinglulu mov x0, x6 161*91f16700Schasinglulu#if (SOC_CORE_RESTART) 162*91f16700Schasinglulu bl _soc_core_restart /* 0-5 */ 163*91f16700Schasinglulu#endif 164*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 165*91f16700Schasinglulu b cpu_on_done 166*91f16700Schasinglulu 167*91f16700Schasinglulu/* this is where we release a core that is being held in wfe */ 168*91f16700Schasinglulucore_in_wfe: 169*91f16700Schasinglulu /* x6 = core mask (lsb) */ 170*91f16700Schasinglulu 171*91f16700Schasinglulu /* set core state in data area */ 172*91f16700Schasinglulu mov x0, x6 173*91f16700Schasinglulu mov x1, #CORE_PENDING 174*91f16700Schasinglulu bl _setCoreState /* 0-3, [13-15] */ 175*91f16700Schasinglulu dsb sy 176*91f16700Schasinglulu isb 177*91f16700Schasinglulu 178*91f16700Schasinglulu /* put the core back into service */ 179*91f16700Schasinglulu sev 180*91f16700Schasinglulu sev 181*91f16700Schasinglulu isb 182*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 183*91f16700Schasinglulu 184*91f16700Schasinglulucpu_on_done: 185*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers */ 186*91f16700Schasinglulu ldp x18, x30, [sp], #16 187*91f16700Schasinglulu ldp x16, x17, [sp], #16 188*91f16700Schasinglulu ldp x14, x15, [sp], #16 189*91f16700Schasinglulu ldp x12, x13, [sp], #16 190*91f16700Schasinglulu ldp x10, x11, [sp], #16 191*91f16700Schasinglulu ldp x8, x9, [sp], #16 192*91f16700Schasinglulu ldp x6, x7, [sp], #16 193*91f16700Schasinglulu ldp x4, x5, [sp], #16 194*91f16700Schasinglulu b psci_completed 195*91f16700Schasingluluendfunc _psci_cpu_on 196*91f16700Schasinglulu 197*91f16700Schasinglulu#endif 198*91f16700Schasinglulu 199*91f16700Schasinglulu 200*91f16700Schasinglulu#if (SOC_CORE_OFF) 201*91f16700Schasinglulu 202*91f16700Schasinglulu.global _psci_cpu_prep_off 203*91f16700Schasinglulu.global _psci_cpu_off_wfi 204*91f16700Schasinglulu 205*91f16700Schasinglulu/* 206*91f16700Schasinglulu * void _psci_cpu_prep_off(u_register_t core_mask) 207*91f16700Schasinglulu * this function performs the SoC-specific programming prior 208*91f16700Schasinglulu * to shutting the core down 209*91f16700Schasinglulu * x0 = core_mask 210*91f16700Schasinglulu * 211*91f16700Schasinglulu * called from C, so save the non-volatile regs 212*91f16700Schasinglulu * save these as pairs of registers to maintain the 213*91f16700Schasinglulu * required 16-byte alignment on the stack 214*91f16700Schasinglulu */ 215*91f16700Schasinglulu 216*91f16700Schasinglulufunc _psci_cpu_prep_off 217*91f16700Schasinglulu 218*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 219*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 220*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 221*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 222*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 223*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 224*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 225*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 226*91f16700Schasinglulu 227*91f16700Schasinglulu mov x10, x0 /* x10 = core_mask */ 228*91f16700Schasinglulu 229*91f16700Schasinglulu /* the core does not return from cpu_off, so no need 230*91f16700Schasinglulu * to save/restore non-volatile registers 231*91f16700Schasinglulu */ 232*91f16700Schasinglulu 233*91f16700Schasinglulu /* mask interrupts by setting DAIF[7:4] to 'b1111 */ 234*91f16700Schasinglulu msr DAIFSet, #0xF 235*91f16700Schasinglulu 236*91f16700Schasinglulu /* read cpuectlr and save current value */ 237*91f16700Schasinglulu mrs x4, CPUECTLR_EL1 238*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 239*91f16700Schasinglulu mov x2, x4 240*91f16700Schasinglulu mov x0, x10 241*91f16700Schasinglulu bl _setCoreData 242*91f16700Schasinglulu 243*91f16700Schasinglulu /* remove the core from coherency */ 244*91f16700Schasinglulu bic x4, x4, #CPUECTLR_SMPEN_MASK 245*91f16700Schasinglulu msr CPUECTLR_EL1, x4 246*91f16700Schasinglulu 247*91f16700Schasinglulu /* save scr_el3 */ 248*91f16700Schasinglulu mov x0, x10 249*91f16700Schasinglulu mrs x4, SCR_EL3 250*91f16700Schasinglulu mov x2, x4 251*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 252*91f16700Schasinglulu bl _setCoreData 253*91f16700Schasinglulu 254*91f16700Schasinglulu /* x4 = scr_el3 */ 255*91f16700Schasinglulu 256*91f16700Schasinglulu /* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */ 257*91f16700Schasinglulu orr x4, x4, #SCR_FIQ_MASK 258*91f16700Schasinglulu msr scr_el3, x4 259*91f16700Schasinglulu 260*91f16700Schasinglulu /* x10 = core_mask */ 261*91f16700Schasinglulu 262*91f16700Schasinglulu /* prep the core for shutdown */ 263*91f16700Schasinglulu mov x0, x10 264*91f16700Schasinglulu bl _soc_core_prep_off 265*91f16700Schasinglulu 266*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers */ 267*91f16700Schasinglulu ldp x18, x30, [sp], #16 268*91f16700Schasinglulu ldp x16, x17, [sp], #16 269*91f16700Schasinglulu ldp x14, x15, [sp], #16 270*91f16700Schasinglulu ldp x12, x13, [sp], #16 271*91f16700Schasinglulu ldp x10, x11, [sp], #16 272*91f16700Schasinglulu ldp x8, x9, [sp], #16 273*91f16700Schasinglulu ldp x6, x7, [sp], #16 274*91f16700Schasinglulu ldp x4, x5, [sp], #16 275*91f16700Schasinglulu b psci_completed 276*91f16700Schasingluluendfunc _psci_cpu_prep_off 277*91f16700Schasinglulu 278*91f16700Schasinglulu/* 279*91f16700Schasinglulu * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr) 280*91f16700Schasinglulu * - this function shuts down the core 281*91f16700Schasinglulu * - this function does not return!! 282*91f16700Schasinglulu */ 283*91f16700Schasinglulu 284*91f16700Schasinglulufunc _psci_cpu_off_wfi 285*91f16700Schasinglulu /* save the wakeup address */ 286*91f16700Schasinglulu mov x29, x1 287*91f16700Schasinglulu 288*91f16700Schasinglulu /* x0 = core_mask */ 289*91f16700Schasinglulu 290*91f16700Schasinglulu /* shutdown the core */ 291*91f16700Schasinglulu bl _soc_core_entr_off 292*91f16700Schasinglulu 293*91f16700Schasinglulu /* branch to resume execution */ 294*91f16700Schasinglulu br x29 295*91f16700Schasingluluendfunc _psci_cpu_off_wfi 296*91f16700Schasinglulu 297*91f16700Schasinglulu#endif 298*91f16700Schasinglulu 299*91f16700Schasinglulu 300*91f16700Schasinglulu#if (SOC_CORE_RESTART) 301*91f16700Schasinglulu 302*91f16700Schasinglulu.global _psci_wakeup 303*91f16700Schasinglulu 304*91f16700Schasinglulu/* 305*91f16700Schasinglulu * void _psci_wakeup(u_register_t core_mask) 306*91f16700Schasinglulu * this function performs the SoC-specific programming 307*91f16700Schasinglulu * after a core wakes up from OFF 308*91f16700Schasinglulu * x0 = core mask 309*91f16700Schasinglulu * 310*91f16700Schasinglulu * called from C, so save the non-volatile regs 311*91f16700Schasinglulu * save these as pairs of registers to maintain the 312*91f16700Schasinglulu * required 16-byte alignment on the stack 313*91f16700Schasinglulu */ 314*91f16700Schasinglulu 315*91f16700Schasinglulufunc _psci_wakeup 316*91f16700Schasinglulu 317*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 318*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 319*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 320*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 321*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 322*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 323*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 324*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 325*91f16700Schasinglulu 326*91f16700Schasinglulu mov x4, x0 /* x4 = core mask */ 327*91f16700Schasinglulu 328*91f16700Schasinglulu /* restore scr_el3 */ 329*91f16700Schasinglulu mov x0, x4 330*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 331*91f16700Schasinglulu bl _getCoreData 332*91f16700Schasinglulu /* x0 = saved scr_el3 */ 333*91f16700Schasinglulu msr SCR_EL3, x0 334*91f16700Schasinglulu 335*91f16700Schasinglulu /* x4 = core mask */ 336*91f16700Schasinglulu 337*91f16700Schasinglulu /* restore CPUECTLR */ 338*91f16700Schasinglulu mov x0, x4 339*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 340*91f16700Schasinglulu bl _getCoreData 341*91f16700Schasinglulu orr x0, x0, #CPUECTLR_SMPEN_MASK 342*91f16700Schasinglulu msr CPUECTLR_EL1, x0 343*91f16700Schasinglulu 344*91f16700Schasinglulu /* x4 = core mask */ 345*91f16700Schasinglulu 346*91f16700Schasinglulu /* start the core back up */ 347*91f16700Schasinglulu mov x0, x4 348*91f16700Schasinglulu bl _soc_core_exit_off 349*91f16700Schasinglulu 350*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 351*91f16700Schasinglulu */ 352*91f16700Schasinglulu ldp x18, x30, [sp], #16 353*91f16700Schasinglulu ldp x16, x17, [sp], #16 354*91f16700Schasinglulu ldp x14, x15, [sp], #16 355*91f16700Schasinglulu ldp x12, x13, [sp], #16 356*91f16700Schasinglulu ldp x10, x11, [sp], #16 357*91f16700Schasinglulu ldp x8, x9, [sp], #16 358*91f16700Schasinglulu ldp x6, x7, [sp], #16 359*91f16700Schasinglulu ldp x4, x5, [sp], #16 360*91f16700Schasinglulu b psci_completed 361*91f16700Schasingluluendfunc _psci_wakeup 362*91f16700Schasinglulu 363*91f16700Schasinglulu#endif 364*91f16700Schasinglulu 365*91f16700Schasinglulu 366*91f16700Schasinglulu#if (SOC_SYSTEM_RESET) 367*91f16700Schasinglulu 368*91f16700Schasinglulu.global _psci_system_reset 369*91f16700Schasinglulu 370*91f16700Schasinglulufunc _psci_system_reset 371*91f16700Schasinglulu 372*91f16700Schasinglulu /* system reset is mandatory 373*91f16700Schasinglulu * system reset is soc-specific 374*91f16700Schasinglulu * Note: under no circumstances do we return from this call 375*91f16700Schasinglulu */ 376*91f16700Schasinglulu bl _soc_sys_reset 377*91f16700Schasingluluendfunc _psci_system_reset 378*91f16700Schasinglulu 379*91f16700Schasinglulu#endif 380*91f16700Schasinglulu 381*91f16700Schasinglulu 382*91f16700Schasinglulu#if (SOC_SYSTEM_OFF) 383*91f16700Schasinglulu 384*91f16700Schasinglulu.global _psci_system_off 385*91f16700Schasinglulu 386*91f16700Schasinglulufunc _psci_system_off 387*91f16700Schasinglulu 388*91f16700Schasinglulu /* system off is mandatory 389*91f16700Schasinglulu * system off is soc-specific 390*91f16700Schasinglulu * Note: under no circumstances do we return from this call */ 391*91f16700Schasinglulu b _soc_sys_off 392*91f16700Schasingluluendfunc _psci_system_off 393*91f16700Schasinglulu 394*91f16700Schasinglulu#endif 395*91f16700Schasinglulu 396*91f16700Schasinglulu 397*91f16700Schasinglulu#if (SOC_CORE_STANDBY) 398*91f16700Schasinglulu 399*91f16700Schasinglulu.global _psci_core_entr_stdby 400*91f16700Schasinglulu.global _psci_core_prep_stdby 401*91f16700Schasinglulu.global _psci_core_exit_stdby 402*91f16700Schasinglulu 403*91f16700Schasinglulu/* 404*91f16700Schasinglulu * void _psci_core_entr_stdby(u_register_t core_mask) - this 405*91f16700Schasinglulu * is the fast-path for simple core standby 406*91f16700Schasinglulu */ 407*91f16700Schasinglulu 408*91f16700Schasinglulufunc _psci_core_entr_stdby 409*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 410*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 411*91f16700Schasinglulu 412*91f16700Schasinglulu mov x5, x0 /* x5 = core mask */ 413*91f16700Schasinglulu 414*91f16700Schasinglulu /* save scr_el3 */ 415*91f16700Schasinglulu mov x0, x5 416*91f16700Schasinglulu mrs x4, SCR_EL3 417*91f16700Schasinglulu mov x2, x4 418*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 419*91f16700Schasinglulu bl _setCoreData 420*91f16700Schasinglulu 421*91f16700Schasinglulu /* x4 = SCR_EL3 422*91f16700Schasinglulu * x5 = core mask 423*91f16700Schasinglulu */ 424*91f16700Schasinglulu 425*91f16700Schasinglulu /* allow interrupts @ EL3 */ 426*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 427*91f16700Schasinglulu msr SCR_EL3, x4 428*91f16700Schasinglulu 429*91f16700Schasinglulu /* x5 = core mask */ 430*91f16700Schasinglulu 431*91f16700Schasinglulu /* put the core into standby */ 432*91f16700Schasinglulu mov x0, x5 433*91f16700Schasinglulu bl _soc_core_entr_stdby 434*91f16700Schasinglulu 435*91f16700Schasinglulu /* restore scr_el3 */ 436*91f16700Schasinglulu mov x0, x5 437*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 438*91f16700Schasinglulu bl _getCoreData 439*91f16700Schasinglulu /* x0 = saved scr_el3 */ 440*91f16700Schasinglulu msr SCR_EL3, x0 441*91f16700Schasinglulu 442*91f16700Schasinglulu ldp x6, x30, [sp], #16 443*91f16700Schasinglulu ldp x4, x5, [sp], #16 444*91f16700Schasinglulu isb 445*91f16700Schasinglulu ret 446*91f16700Schasingluluendfunc _psci_core_entr_stdby 447*91f16700Schasinglulu 448*91f16700Schasinglulu/* 449*91f16700Schasinglulu * void _psci_core_prep_stdby(u_register_t core_mask) - this 450*91f16700Schasinglulu * sets up the core to enter standby state thru the normal path 451*91f16700Schasinglulu */ 452*91f16700Schasinglulu 453*91f16700Schasinglulufunc _psci_core_prep_stdby 454*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 455*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 456*91f16700Schasinglulu 457*91f16700Schasinglulu mov x5, x0 458*91f16700Schasinglulu 459*91f16700Schasinglulu /* x5 = core mask */ 460*91f16700Schasinglulu 461*91f16700Schasinglulu /* save scr_el3 */ 462*91f16700Schasinglulu mov x0, x5 463*91f16700Schasinglulu mrs x4, SCR_EL3 464*91f16700Schasinglulu mov x2, x4 465*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 466*91f16700Schasinglulu bl _setCoreData 467*91f16700Schasinglulu 468*91f16700Schasinglulu /* allow interrupts @ EL3 */ 469*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 470*91f16700Schasinglulu msr SCR_EL3, x4 471*91f16700Schasinglulu 472*91f16700Schasinglulu /* x5 = core mask */ 473*91f16700Schasinglulu 474*91f16700Schasinglulu /* call for any SoC-specific programming */ 475*91f16700Schasinglulu mov x0, x5 476*91f16700Schasinglulu bl _soc_core_prep_stdby 477*91f16700Schasinglulu 478*91f16700Schasinglulu ldp x6, x30, [sp], #16 479*91f16700Schasinglulu ldp x4, x5, [sp], #16 480*91f16700Schasinglulu isb 481*91f16700Schasinglulu ret 482*91f16700Schasingluluendfunc _psci_core_prep_stdby 483*91f16700Schasinglulu 484*91f16700Schasinglulu/* 485*91f16700Schasinglulu * void _psci_core_exit_stdby(u_register_t core_mask) - this 486*91f16700Schasinglulu * exits the core from standby state thru the normal path 487*91f16700Schasinglulu */ 488*91f16700Schasinglulu 489*91f16700Schasinglulufunc _psci_core_exit_stdby 490*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 491*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 492*91f16700Schasinglulu 493*91f16700Schasinglulu mov x5, x0 494*91f16700Schasinglulu 495*91f16700Schasinglulu /* x5 = core mask */ 496*91f16700Schasinglulu 497*91f16700Schasinglulu /* restore scr_el3 */ 498*91f16700Schasinglulu mov x0, x5 499*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 500*91f16700Schasinglulu bl _getCoreData 501*91f16700Schasinglulu /* x0 = saved scr_el3 */ 502*91f16700Schasinglulu msr SCR_EL3, x0 503*91f16700Schasinglulu 504*91f16700Schasinglulu /* x5 = core mask */ 505*91f16700Schasinglulu 506*91f16700Schasinglulu /* perform any SoC-specific programming after standby state */ 507*91f16700Schasinglulu mov x0, x5 508*91f16700Schasinglulu bl _soc_core_exit_stdby 509*91f16700Schasinglulu 510*91f16700Schasinglulu ldp x6, x30, [sp], #16 511*91f16700Schasinglulu ldp x4, x5, [sp], #16 512*91f16700Schasinglulu isb 513*91f16700Schasinglulu ret 514*91f16700Schasingluluendfunc _psci_core_exit_stdby 515*91f16700Schasinglulu 516*91f16700Schasinglulu#endif 517*91f16700Schasinglulu 518*91f16700Schasinglulu 519*91f16700Schasinglulu#if (SOC_CORE_PWR_DWN) 520*91f16700Schasinglulu 521*91f16700Schasinglulu.global _psci_core_prep_pwrdn 522*91f16700Schasinglulu.global _psci_cpu_pwrdn_wfi 523*91f16700Schasinglulu.global _psci_core_exit_pwrdn 524*91f16700Schasinglulu 525*91f16700Schasinglulu/* 526*91f16700Schasinglulu * void _psci_core_prep_pwrdn_(u_register_t core_mask) 527*91f16700Schasinglulu * this function prepares the core for power-down 528*91f16700Schasinglulu * x0 = core mask 529*91f16700Schasinglulu * 530*91f16700Schasinglulu * called from C, so save the non-volatile regs 531*91f16700Schasinglulu * save these as pairs of registers to maintain the 532*91f16700Schasinglulu * required 16-byte alignment on the stack 533*91f16700Schasinglulu */ 534*91f16700Schasinglulu 535*91f16700Schasinglulufunc _psci_core_prep_pwrdn 536*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 537*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 538*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 539*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 540*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 541*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 542*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 543*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 544*91f16700Schasinglulu 545*91f16700Schasinglulu mov x6, x0 546*91f16700Schasinglulu 547*91f16700Schasinglulu /* x6 = core mask */ 548*91f16700Schasinglulu 549*91f16700Schasinglulu /* mask interrupts by setting DAIF[7:4] to 'b1111 */ 550*91f16700Schasinglulu msr DAIFSet, #0xF 551*91f16700Schasinglulu 552*91f16700Schasinglulu /* save scr_el3 */ 553*91f16700Schasinglulu mov x0, x6 554*91f16700Schasinglulu mrs x4, SCR_EL3 555*91f16700Schasinglulu mov x2, x4 556*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 557*91f16700Schasinglulu bl _setCoreData 558*91f16700Schasinglulu 559*91f16700Schasinglulu /* allow interrupts @ EL3 */ 560*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 561*91f16700Schasinglulu msr SCR_EL3, x4 562*91f16700Schasinglulu 563*91f16700Schasinglulu /* save cpuectlr */ 564*91f16700Schasinglulu mov x0, x6 565*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 566*91f16700Schasinglulu mrs x2, CPUECTLR_EL1 567*91f16700Schasinglulu bl _setCoreData 568*91f16700Schasinglulu 569*91f16700Schasinglulu /* x6 = core mask */ 570*91f16700Schasinglulu 571*91f16700Schasinglulu /* SoC-specific programming for power-down */ 572*91f16700Schasinglulu mov x0, x6 573*91f16700Schasinglulu bl _soc_core_prep_pwrdn 574*91f16700Schasinglulu 575*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 576*91f16700Schasinglulu */ 577*91f16700Schasinglulu ldp x18, x30, [sp], #16 578*91f16700Schasinglulu ldp x16, x17, [sp], #16 579*91f16700Schasinglulu ldp x14, x15, [sp], #16 580*91f16700Schasinglulu ldp x12, x13, [sp], #16 581*91f16700Schasinglulu ldp x10, x11, [sp], #16 582*91f16700Schasinglulu ldp x8, x9, [sp], #16 583*91f16700Schasinglulu ldp x6, x7, [sp], #16 584*91f16700Schasinglulu ldp x4, x5, [sp], #16 585*91f16700Schasinglulu b psci_completed 586*91f16700Schasingluluendfunc _psci_core_prep_pwrdn 587*91f16700Schasinglulu 588*91f16700Schasinglulu/* 589*91f16700Schasinglulu * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr) 590*91f16700Schasinglulu * this function powers down the core 591*91f16700Schasinglulu */ 592*91f16700Schasinglulu 593*91f16700Schasinglulufunc _psci_cpu_pwrdn_wfi 594*91f16700Schasinglulu /* save the wakeup address */ 595*91f16700Schasinglulu mov x29, x1 596*91f16700Schasinglulu 597*91f16700Schasinglulu /* x0 = core mask */ 598*91f16700Schasinglulu 599*91f16700Schasinglulu /* shutdown the core */ 600*91f16700Schasinglulu bl _soc_core_entr_pwrdn 601*91f16700Schasinglulu 602*91f16700Schasinglulu /* branch to resume execution */ 603*91f16700Schasinglulu br x29 604*91f16700Schasingluluendfunc _psci_cpu_pwrdn_wfi 605*91f16700Schasinglulu 606*91f16700Schasinglulu/* 607*91f16700Schasinglulu * void _psci_core_exit_pwrdn_(u_register_t core_mask) 608*91f16700Schasinglulu * this function cleans up after a core power-down 609*91f16700Schasinglulu * x0 = core mask 610*91f16700Schasinglulu * 611*91f16700Schasinglulu * called from C, so save the non-volatile regs 612*91f16700Schasinglulu * save these as pairs of registers to maintain the 613*91f16700Schasinglulu * required 16-byte alignment on the stack 614*91f16700Schasinglulu */ 615*91f16700Schasinglulu 616*91f16700Schasinglulufunc _psci_core_exit_pwrdn 617*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 618*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 619*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 620*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 621*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 622*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 623*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 624*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 625*91f16700Schasinglulu 626*91f16700Schasinglulu mov x5, x0 /* x5 = core mask */ 627*91f16700Schasinglulu 628*91f16700Schasinglulu /* restore scr_el3 */ 629*91f16700Schasinglulu mov x0, x5 630*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 631*91f16700Schasinglulu bl _getCoreData 632*91f16700Schasinglulu /* x0 = saved scr_el3 */ 633*91f16700Schasinglulu msr SCR_EL3, x0 634*91f16700Schasinglulu 635*91f16700Schasinglulu /* x5 = core mask */ 636*91f16700Schasinglulu 637*91f16700Schasinglulu /* restore cpuectlr */ 638*91f16700Schasinglulu mov x0, x5 639*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 640*91f16700Schasinglulu bl _getCoreData 641*91f16700Schasinglulu /* make sure smp is set */ 642*91f16700Schasinglulu orr x0, x0, #CPUECTLR_SMPEN_MASK 643*91f16700Schasinglulu msr CPUECTLR_EL1, x0 644*91f16700Schasinglulu 645*91f16700Schasinglulu /* x5 = core mask */ 646*91f16700Schasinglulu 647*91f16700Schasinglulu /* SoC-specific cleanup */ 648*91f16700Schasinglulu mov x0, x5 649*91f16700Schasinglulu bl _soc_core_exit_pwrdn 650*91f16700Schasinglulu 651*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 652*91f16700Schasinglulu */ 653*91f16700Schasinglulu ldp x18, x30, [sp], #16 654*91f16700Schasinglulu ldp x16, x17, [sp], #16 655*91f16700Schasinglulu ldp x14, x15, [sp], #16 656*91f16700Schasinglulu ldp x12, x13, [sp], #16 657*91f16700Schasinglulu ldp x10, x11, [sp], #16 658*91f16700Schasinglulu ldp x8, x9, [sp], #16 659*91f16700Schasinglulu ldp x6, x7, [sp], #16 660*91f16700Schasinglulu ldp x4, x5, [sp], #16 661*91f16700Schasinglulu b psci_completed 662*91f16700Schasingluluendfunc _psci_core_exit_pwrdn 663*91f16700Schasinglulu 664*91f16700Schasinglulu#endif 665*91f16700Schasinglulu 666*91f16700Schasinglulu#if (SOC_CLUSTER_STANDBY) 667*91f16700Schasinglulu 668*91f16700Schasinglulu.global _psci_clstr_prep_stdby 669*91f16700Schasinglulu.global _psci_clstr_exit_stdby 670*91f16700Schasinglulu 671*91f16700Schasinglulu/* 672*91f16700Schasinglulu * void _psci_clstr_prep_stdby(u_register_t core_mask) - this 673*91f16700Schasinglulu * sets up the clstr to enter standby state thru the normal path 674*91f16700Schasinglulu */ 675*91f16700Schasinglulu 676*91f16700Schasinglulufunc _psci_clstr_prep_stdby 677*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 678*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 679*91f16700Schasinglulu 680*91f16700Schasinglulu mov x5, x0 681*91f16700Schasinglulu 682*91f16700Schasinglulu /* x5 = core mask */ 683*91f16700Schasinglulu 684*91f16700Schasinglulu /* save scr_el3 */ 685*91f16700Schasinglulu mov x0, x5 686*91f16700Schasinglulu mrs x4, SCR_EL3 687*91f16700Schasinglulu mov x2, x4 688*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 689*91f16700Schasinglulu bl _setCoreData 690*91f16700Schasinglulu 691*91f16700Schasinglulu /* allow interrupts @ EL3 */ 692*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 693*91f16700Schasinglulu msr SCR_EL3, x4 694*91f16700Schasinglulu 695*91f16700Schasinglulu /* x5 = core mask */ 696*91f16700Schasinglulu 697*91f16700Schasinglulu /* call for any SoC-specific programming */ 698*91f16700Schasinglulu mov x0, x5 699*91f16700Schasinglulu bl _soc_clstr_prep_stdby 700*91f16700Schasinglulu 701*91f16700Schasinglulu ldp x6, x30, [sp], #16 702*91f16700Schasinglulu ldp x4, x5, [sp], #16 703*91f16700Schasinglulu isb 704*91f16700Schasinglulu ret 705*91f16700Schasingluluendfunc _psci_clstr_prep_stdby 706*91f16700Schasinglulu 707*91f16700Schasinglulu/* 708*91f16700Schasinglulu * void _psci_clstr_exit_stdby(u_register_t core_mask) - this 709*91f16700Schasinglulu * exits the clstr from standby state thru the normal path 710*91f16700Schasinglulu */ 711*91f16700Schasinglulu 712*91f16700Schasinglulufunc _psci_clstr_exit_stdby 713*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 714*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 715*91f16700Schasinglulu 716*91f16700Schasinglulu mov x5, x0 /* x5 = core mask */ 717*91f16700Schasinglulu 718*91f16700Schasinglulu /* restore scr_el3 */ 719*91f16700Schasinglulu mov x0, x5 720*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 721*91f16700Schasinglulu bl _getCoreData 722*91f16700Schasinglulu /* x0 = saved scr_el3 */ 723*91f16700Schasinglulu msr SCR_EL3, x0 724*91f16700Schasinglulu 725*91f16700Schasinglulu /* x5 = core mask */ 726*91f16700Schasinglulu 727*91f16700Schasinglulu /* perform any SoC-specific programming after standby state */ 728*91f16700Schasinglulu mov x0, x5 729*91f16700Schasinglulu bl _soc_clstr_exit_stdby 730*91f16700Schasinglulu 731*91f16700Schasinglulu ldp x6, x30, [sp], #16 732*91f16700Schasinglulu ldp x4, x5, [sp], #16 733*91f16700Schasinglulu isb 734*91f16700Schasinglulu ret 735*91f16700Schasingluluendfunc _psci_clstr_exit_stdby 736*91f16700Schasinglulu 737*91f16700Schasinglulu#endif 738*91f16700Schasinglulu 739*91f16700Schasinglulu#if (SOC_CLUSTER_PWR_DWN) 740*91f16700Schasinglulu 741*91f16700Schasinglulu.global _psci_clstr_prep_pwrdn 742*91f16700Schasinglulu.global _psci_clstr_exit_pwrdn 743*91f16700Schasinglulu 744*91f16700Schasinglulu/* 745*91f16700Schasinglulu * void _psci_clstr_prep_pwrdn_(u_register_t core_mask) 746*91f16700Schasinglulu * this function prepares the cluster+core for power-down 747*91f16700Schasinglulu * x0 = core mask 748*91f16700Schasinglulu * 749*91f16700Schasinglulu * called from C, so save the non-volatile regs 750*91f16700Schasinglulu * save these as pairs of registers to maintain the 751*91f16700Schasinglulu * required 16-byte alignment on the stack 752*91f16700Schasinglulu */ 753*91f16700Schasinglulu 754*91f16700Schasinglulufunc _psci_clstr_prep_pwrdn 755*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 756*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 757*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 758*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 759*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 760*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 761*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 762*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 763*91f16700Schasinglulu 764*91f16700Schasinglulu mov x6, x0 /* x6 = core mask */ 765*91f16700Schasinglulu 766*91f16700Schasinglulu /* mask interrupts by setting DAIF[7:4] to 'b1111 */ 767*91f16700Schasinglulu msr DAIFSet, #0xF 768*91f16700Schasinglulu 769*91f16700Schasinglulu /* save scr_el3 */ 770*91f16700Schasinglulu mov x0, x6 771*91f16700Schasinglulu mrs x4, SCR_EL3 772*91f16700Schasinglulu mov x2, x4 773*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 774*91f16700Schasinglulu bl _setCoreData 775*91f16700Schasinglulu 776*91f16700Schasinglulu /* allow interrupts @ EL3 */ 777*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 778*91f16700Schasinglulu msr SCR_EL3, x4 779*91f16700Schasinglulu 780*91f16700Schasinglulu /* save cpuectlr */ 781*91f16700Schasinglulu mov x0, x6 782*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 783*91f16700Schasinglulu mrs x2, CPUECTLR_EL1 784*91f16700Schasinglulu mov x4, x2 785*91f16700Schasinglulu bl _setCoreData 786*91f16700Schasinglulu 787*91f16700Schasinglulu /* remove core from coherency */ 788*91f16700Schasinglulu bic x4, x4, #CPUECTLR_SMPEN_MASK 789*91f16700Schasinglulu msr CPUECTLR_EL1, x4 790*91f16700Schasinglulu 791*91f16700Schasinglulu /* x6 = core mask */ 792*91f16700Schasinglulu 793*91f16700Schasinglulu /* SoC-specific programming for power-down */ 794*91f16700Schasinglulu mov x0, x6 795*91f16700Schasinglulu bl _soc_clstr_prep_pwrdn 796*91f16700Schasinglulu 797*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 798*91f16700Schasinglulu */ 799*91f16700Schasinglulu ldp x18, x30, [sp], #16 800*91f16700Schasinglulu ldp x16, x17, [sp], #16 801*91f16700Schasinglulu ldp x14, x15, [sp], #16 802*91f16700Schasinglulu ldp x12, x13, [sp], #16 803*91f16700Schasinglulu ldp x10, x11, [sp], #16 804*91f16700Schasinglulu ldp x8, x9, [sp], #16 805*91f16700Schasinglulu ldp x6, x7, [sp], #16 806*91f16700Schasinglulu ldp x4, x5, [sp], #16 807*91f16700Schasinglulu b psci_completed 808*91f16700Schasingluluendfunc _psci_clstr_prep_pwrdn 809*91f16700Schasinglulu 810*91f16700Schasinglulu/* 811*91f16700Schasinglulu * void _psci_clstr_exit_pwrdn_(u_register_t core_mask) 812*91f16700Schasinglulu * this function cleans up after a cluster power-down 813*91f16700Schasinglulu * x0 = core mask 814*91f16700Schasinglulu * 815*91f16700Schasinglulu * called from C, so save the non-volatile regs 816*91f16700Schasinglulu * save these as pairs of registers to maintain the 817*91f16700Schasinglulu * required 16-byte alignment on the stack 818*91f16700Schasinglulu */ 819*91f16700Schasinglulu 820*91f16700Schasinglulufunc _psci_clstr_exit_pwrdn 821*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 822*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 823*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 824*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 825*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 826*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 827*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 828*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 829*91f16700Schasinglulu 830*91f16700Schasinglulu mov x4, x0 /* x4 = core mask */ 831*91f16700Schasinglulu 832*91f16700Schasinglulu /* restore scr_el3 */ 833*91f16700Schasinglulu mov x0, x4 834*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 835*91f16700Schasinglulu bl _getCoreData 836*91f16700Schasinglulu /* x0 = saved scr_el3 */ 837*91f16700Schasinglulu msr SCR_EL3, x0 838*91f16700Schasinglulu 839*91f16700Schasinglulu /* x4 = core mask */ 840*91f16700Schasinglulu 841*91f16700Schasinglulu /* restore cpuectlr */ 842*91f16700Schasinglulu mov x0, x4 843*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 844*91f16700Schasinglulu bl _getCoreData 845*91f16700Schasinglulu /* make sure smp is set */ 846*91f16700Schasinglulu orr x0, x0, #CPUECTLR_SMPEN_MASK 847*91f16700Schasinglulu msr CPUECTLR_EL1, x0 848*91f16700Schasinglulu 849*91f16700Schasinglulu /* x4 = core mask */ 850*91f16700Schasinglulu 851*91f16700Schasinglulu /* SoC-specific cleanup */ 852*91f16700Schasinglulu mov x0, x4 853*91f16700Schasinglulu bl _soc_clstr_exit_pwrdn 854*91f16700Schasinglulu 855*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 856*91f16700Schasinglulu */ 857*91f16700Schasinglulu ldp x18, x30, [sp], #16 858*91f16700Schasinglulu ldp x16, x17, [sp], #16 859*91f16700Schasinglulu ldp x14, x15, [sp], #16 860*91f16700Schasinglulu ldp x12, x13, [sp], #16 861*91f16700Schasinglulu ldp x10, x11, [sp], #16 862*91f16700Schasinglulu ldp x8, x9, [sp], #16 863*91f16700Schasinglulu ldp x6, x7, [sp], #16 864*91f16700Schasinglulu ldp x4, x5, [sp], #16 865*91f16700Schasinglulu b psci_completed 866*91f16700Schasingluluendfunc _psci_clstr_exit_pwrdn 867*91f16700Schasinglulu 868*91f16700Schasinglulu#endif 869*91f16700Schasinglulu 870*91f16700Schasinglulu#if (SOC_SYSTEM_STANDBY) 871*91f16700Schasinglulu 872*91f16700Schasinglulu.global _psci_sys_prep_stdby 873*91f16700Schasinglulu.global _psci_sys_exit_stdby 874*91f16700Schasinglulu 875*91f16700Schasinglulu/* 876*91f16700Schasinglulu * void _psci_sys_prep_stdby(u_register_t core_mask) - this 877*91f16700Schasinglulu * sets up the system to enter standby state thru the normal path 878*91f16700Schasinglulu */ 879*91f16700Schasinglulu 880*91f16700Schasinglulufunc _psci_sys_prep_stdby 881*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 882*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 883*91f16700Schasinglulu 884*91f16700Schasinglulu mov x5, x0 /* x5 = core mask */ 885*91f16700Schasinglulu 886*91f16700Schasinglulu /* save scr_el3 */ 887*91f16700Schasinglulu mov x0, x5 888*91f16700Schasinglulu mrs x4, SCR_EL3 889*91f16700Schasinglulu mov x2, x4 890*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 891*91f16700Schasinglulu bl _setCoreData 892*91f16700Schasinglulu 893*91f16700Schasinglulu /* allow interrupts @ EL3 */ 894*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 895*91f16700Schasinglulu msr SCR_EL3, x4 896*91f16700Schasinglulu 897*91f16700Schasinglulu /* x5 = core mask */ 898*91f16700Schasinglulu 899*91f16700Schasinglulu /* call for any SoC-specific programming */ 900*91f16700Schasinglulu mov x0, x5 901*91f16700Schasinglulu bl _soc_sys_prep_stdby 902*91f16700Schasinglulu 903*91f16700Schasinglulu ldp x6, x30, [sp], #16 904*91f16700Schasinglulu ldp x4, x5, [sp], #16 905*91f16700Schasinglulu isb 906*91f16700Schasinglulu ret 907*91f16700Schasingluluendfunc _psci_sys_prep_stdby 908*91f16700Schasinglulu 909*91f16700Schasinglulu/* 910*91f16700Schasinglulu * void _psci_sys_exit_stdby(u_register_t core_mask) - this 911*91f16700Schasinglulu * exits the system from standby state thru the normal path 912*91f16700Schasinglulu */ 913*91f16700Schasinglulu 914*91f16700Schasinglulufunc _psci_sys_exit_stdby 915*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 916*91f16700Schasinglulu stp x6, x30, [sp, #-16]! 917*91f16700Schasinglulu 918*91f16700Schasinglulu mov x5, x0 919*91f16700Schasinglulu 920*91f16700Schasinglulu /* x5 = core mask */ 921*91f16700Schasinglulu 922*91f16700Schasinglulu /* restore scr_el3 */ 923*91f16700Schasinglulu mov x0, x5 924*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 925*91f16700Schasinglulu bl _getCoreData 926*91f16700Schasinglulu /* x0 = saved scr_el3 */ 927*91f16700Schasinglulu msr SCR_EL3, x0 928*91f16700Schasinglulu 929*91f16700Schasinglulu /* x5 = core mask */ 930*91f16700Schasinglulu 931*91f16700Schasinglulu /* perform any SoC-specific programming after standby state */ 932*91f16700Schasinglulu mov x0, x5 933*91f16700Schasinglulu bl _soc_sys_exit_stdby 934*91f16700Schasinglulu 935*91f16700Schasinglulu ldp x6, x30, [sp], #16 936*91f16700Schasinglulu ldp x4, x5, [sp], #16 937*91f16700Schasinglulu isb 938*91f16700Schasinglulu ret 939*91f16700Schasingluluendfunc _psci_sys_exit_stdby 940*91f16700Schasinglulu 941*91f16700Schasinglulu#endif 942*91f16700Schasinglulu 943*91f16700Schasinglulu#if (SOC_SYSTEM_PWR_DWN) 944*91f16700Schasinglulu 945*91f16700Schasinglulu.global _psci_sys_prep_pwrdn 946*91f16700Schasinglulu.global _psci_sys_pwrdn_wfi 947*91f16700Schasinglulu.global _psci_sys_exit_pwrdn 948*91f16700Schasinglulu 949*91f16700Schasinglulu/* 950*91f16700Schasinglulu * void _psci_sys_prep_pwrdn_(u_register_t core_mask) 951*91f16700Schasinglulu * this function prepares the system+core for power-down 952*91f16700Schasinglulu * x0 = core mask 953*91f16700Schasinglulu * 954*91f16700Schasinglulu * called from C, so save the non-volatile regs 955*91f16700Schasinglulu * save these as pairs of registers to maintain the 956*91f16700Schasinglulu * required 16-byte alignment on the stack 957*91f16700Schasinglulu */ 958*91f16700Schasinglulu 959*91f16700Schasinglulufunc _psci_sys_prep_pwrdn 960*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 961*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 962*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 963*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 964*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 965*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 966*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 967*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 968*91f16700Schasinglulu 969*91f16700Schasinglulu mov x6, x0 /* x6 = core mask */ 970*91f16700Schasinglulu 971*91f16700Schasinglulu /* mask interrupts by setting DAIF[7:4] to 'b1111 */ 972*91f16700Schasinglulu msr DAIFSet, #0xF 973*91f16700Schasinglulu 974*91f16700Schasinglulu /* save scr_el3 */ 975*91f16700Schasinglulu mov x0, x6 976*91f16700Schasinglulu mrs x4, SCR_EL3 977*91f16700Schasinglulu mov x2, x4 978*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 979*91f16700Schasinglulu bl _setCoreData 980*91f16700Schasinglulu 981*91f16700Schasinglulu /* allow interrupts @ EL3 */ 982*91f16700Schasinglulu orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) 983*91f16700Schasinglulu msr SCR_EL3, x4 984*91f16700Schasinglulu 985*91f16700Schasinglulu /* save cpuectlr */ 986*91f16700Schasinglulu mov x0, x6 987*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 988*91f16700Schasinglulu mrs x2, CPUECTLR_EL1 989*91f16700Schasinglulu mov x4, x2 990*91f16700Schasinglulu bl _setCoreData 991*91f16700Schasinglulu 992*91f16700Schasinglulu /* remove core from coherency */ 993*91f16700Schasinglulu bic x4, x4, #CPUECTLR_SMPEN_MASK 994*91f16700Schasinglulu msr CPUECTLR_EL1, x4 995*91f16700Schasinglulu 996*91f16700Schasinglulu /* x6 = core mask */ 997*91f16700Schasinglulu 998*91f16700Schasinglulu /* SoC-specific programming for power-down */ 999*91f16700Schasinglulu mov x0, x6 1000*91f16700Schasinglulu bl _soc_sys_prep_pwrdn 1001*91f16700Schasinglulu 1002*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 1003*91f16700Schasinglulu */ 1004*91f16700Schasinglulu ldp x18, x30, [sp], #16 1005*91f16700Schasinglulu ldp x16, x17, [sp], #16 1006*91f16700Schasinglulu ldp x14, x15, [sp], #16 1007*91f16700Schasinglulu ldp x12, x13, [sp], #16 1008*91f16700Schasinglulu ldp x10, x11, [sp], #16 1009*91f16700Schasinglulu ldp x8, x9, [sp], #16 1010*91f16700Schasinglulu ldp x6, x7, [sp], #16 1011*91f16700Schasinglulu ldp x4, x5, [sp], #16 1012*91f16700Schasinglulu b psci_completed 1013*91f16700Schasingluluendfunc _psci_sys_prep_pwrdn 1014*91f16700Schasinglulu 1015*91f16700Schasinglulu 1016*91f16700Schasinglulu/* 1017*91f16700Schasinglulu * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr) 1018*91f16700Schasinglulu * this function powers down the system 1019*91f16700Schasinglulu */ 1020*91f16700Schasinglulu 1021*91f16700Schasinglulufunc _psci_sys_pwrdn_wfi 1022*91f16700Schasinglulu /* save the wakeup address */ 1023*91f16700Schasinglulu mov x29, x1 1024*91f16700Schasinglulu 1025*91f16700Schasinglulu /* x0 = core mask */ 1026*91f16700Schasinglulu 1027*91f16700Schasinglulu /* shutdown the system */ 1028*91f16700Schasinglulu bl _soc_sys_pwrdn_wfi 1029*91f16700Schasinglulu 1030*91f16700Schasinglulu /* branch to resume execution */ 1031*91f16700Schasinglulu br x29 1032*91f16700Schasingluluendfunc _psci_sys_pwrdn_wfi 1033*91f16700Schasinglulu 1034*91f16700Schasinglulu/* 1035*91f16700Schasinglulu * void _psci_sys_exit_pwrdn_(u_register_t core_mask) 1036*91f16700Schasinglulu * this function cleans up after a system power-down 1037*91f16700Schasinglulu * x0 = core mask 1038*91f16700Schasinglulu * 1039*91f16700Schasinglulu * Called from C, so save the non-volatile regs 1040*91f16700Schasinglulu * save these as pairs of registers to maintain the 1041*91f16700Schasinglulu * required 16-byte alignment on the stack 1042*91f16700Schasinglulu */ 1043*91f16700Schasinglulu 1044*91f16700Schasinglulufunc _psci_sys_exit_pwrdn 1045*91f16700Schasinglulu 1046*91f16700Schasinglulu stp x4, x5, [sp, #-16]! 1047*91f16700Schasinglulu stp x6, x7, [sp, #-16]! 1048*91f16700Schasinglulu stp x8, x9, [sp, #-16]! 1049*91f16700Schasinglulu stp x10, x11, [sp, #-16]! 1050*91f16700Schasinglulu stp x12, x13, [sp, #-16]! 1051*91f16700Schasinglulu stp x14, x15, [sp, #-16]! 1052*91f16700Schasinglulu stp x16, x17, [sp, #-16]! 1053*91f16700Schasinglulu stp x18, x30, [sp, #-16]! 1054*91f16700Schasinglulu 1055*91f16700Schasinglulu mov x4, x0 /* x4 = core mask */ 1056*91f16700Schasinglulu 1057*91f16700Schasinglulu /* restore scr_el3 */ 1058*91f16700Schasinglulu mov x0, x4 1059*91f16700Schasinglulu mov x1, #SCR_EL3_DATA 1060*91f16700Schasinglulu bl _getCoreData 1061*91f16700Schasinglulu 1062*91f16700Schasinglulu /* x0 = saved scr_el3 */ 1063*91f16700Schasinglulu msr SCR_EL3, x0 1064*91f16700Schasinglulu 1065*91f16700Schasinglulu /* x4 = core mask */ 1066*91f16700Schasinglulu 1067*91f16700Schasinglulu /* restore cpuectlr */ 1068*91f16700Schasinglulu mov x0, x4 1069*91f16700Schasinglulu mov x1, #CPUECTLR_DATA 1070*91f16700Schasinglulu bl _getCoreData 1071*91f16700Schasinglulu 1072*91f16700Schasinglulu /* make sure smp is set */ 1073*91f16700Schasinglulu orr x0, x0, #CPUECTLR_SMPEN_MASK 1074*91f16700Schasinglulu msr CPUECTLR_EL1, x0 1075*91f16700Schasinglulu 1076*91f16700Schasinglulu /* x4 = core mask */ 1077*91f16700Schasinglulu 1078*91f16700Schasinglulu /* SoC-specific cleanup */ 1079*91f16700Schasinglulu mov x0, x4 1080*91f16700Schasinglulu bl _soc_sys_exit_pwrdn 1081*91f16700Schasinglulu 1082*91f16700Schasinglulu /* restore the aarch32/64 non-volatile registers 1083*91f16700Schasinglulu */ 1084*91f16700Schasinglulu ldp x18, x30, [sp], #16 1085*91f16700Schasinglulu ldp x16, x17, [sp], #16 1086*91f16700Schasinglulu ldp x14, x15, [sp], #16 1087*91f16700Schasinglulu ldp x12, x13, [sp], #16 1088*91f16700Schasinglulu ldp x10, x11, [sp], #16 1089*91f16700Schasinglulu ldp x8, x9, [sp], #16 1090*91f16700Schasinglulu ldp x6, x7, [sp], #16 1091*91f16700Schasinglulu ldp x4, x5, [sp], #16 1092*91f16700Schasinglulu b psci_completed 1093*91f16700Schasingluluendfunc _psci_sys_exit_pwrdn 1094*91f16700Schasinglulu 1095*91f16700Schasinglulu#endif 1096*91f16700Schasinglulu 1097*91f16700Schasinglulu 1098*91f16700Schasinglulu/* psci std returns */ 1099*91f16700Schasinglulufunc psci_disabled 1100*91f16700Schasinglulu ldr w0, =PSCI_E_DISABLED 1101*91f16700Schasinglulu b psci_completed 1102*91f16700Schasingluluendfunc psci_disabled 1103*91f16700Schasinglulu 1104*91f16700Schasinglulu 1105*91f16700Schasinglulufunc psci_not_present 1106*91f16700Schasinglulu ldr w0, =PSCI_E_NOT_PRESENT 1107*91f16700Schasinglulu b psci_completed 1108*91f16700Schasingluluendfunc psci_not_present 1109*91f16700Schasinglulu 1110*91f16700Schasinglulu 1111*91f16700Schasinglulufunc psci_on_pending 1112*91f16700Schasinglulu ldr w0, =PSCI_E_ON_PENDING 1113*91f16700Schasinglulu b psci_completed 1114*91f16700Schasingluluendfunc psci_on_pending 1115*91f16700Schasinglulu 1116*91f16700Schasinglulu 1117*91f16700Schasinglulufunc psci_already_on 1118*91f16700Schasinglulu ldr w0, =PSCI_E_ALREADY_ON 1119*91f16700Schasinglulu b psci_completed 1120*91f16700Schasingluluendfunc psci_already_on 1121*91f16700Schasinglulu 1122*91f16700Schasinglulu 1123*91f16700Schasinglulufunc psci_failure 1124*91f16700Schasinglulu ldr w0, =PSCI_E_INTERN_FAIL 1125*91f16700Schasinglulu b psci_completed 1126*91f16700Schasingluluendfunc psci_failure 1127*91f16700Schasinglulu 1128*91f16700Schasinglulu 1129*91f16700Schasinglulufunc psci_unimplemented 1130*91f16700Schasinglulu ldr w0, =PSCI_E_NOT_SUPPORTED 1131*91f16700Schasinglulu b psci_completed 1132*91f16700Schasingluluendfunc psci_unimplemented 1133*91f16700Schasinglulu 1134*91f16700Schasinglulu 1135*91f16700Schasinglulufunc psci_denied 1136*91f16700Schasinglulu ldr w0, =PSCI_E_DENIED 1137*91f16700Schasinglulu b psci_completed 1138*91f16700Schasingluluendfunc psci_denied 1139*91f16700Schasinglulu 1140*91f16700Schasinglulu 1141*91f16700Schasinglulufunc psci_invalid 1142*91f16700Schasinglulu ldr w0, =PSCI_E_INVALID_PARAMS 1143*91f16700Schasinglulu b psci_completed 1144*91f16700Schasingluluendfunc psci_invalid 1145*91f16700Schasinglulu 1146*91f16700Schasinglulu 1147*91f16700Schasinglulufunc psci_success 1148*91f16700Schasinglulu mov x0, #PSCI_E_SUCCESS 1149*91f16700Schasingluluendfunc psci_success 1150*91f16700Schasinglulu 1151*91f16700Schasinglulu 1152*91f16700Schasinglulufunc psci_completed 1153*91f16700Schasinglulu /* x0 = status code */ 1154*91f16700Schasinglulu ret 1155*91f16700Schasingluluendfunc psci_completed 1156