1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <assert.h> 8*91f16700Schasinglulu #include <errno.h> 9*91f16700Schasinglulu 10*91f16700Schasinglulu #include <arch_helpers.h> 11*91f16700Schasinglulu #include <common/debug.h> 12*91f16700Schasinglulu #include <drivers/arm/cci.h> 13*91f16700Schasinglulu #include <drivers/arm/gicv2.h> 14*91f16700Schasinglulu #include <drivers/ti/uart/uart_16550.h> 15*91f16700Schasinglulu #include <lib/bakery_lock.h> 16*91f16700Schasinglulu #include <lib/mmio.h> 17*91f16700Schasinglulu #include <lib/psci/psci.h> 18*91f16700Schasinglulu #include <plat/arm/common/plat_arm.h> 19*91f16700Schasinglulu 20*91f16700Schasinglulu #include <mcucfg.h> 21*91f16700Schasinglulu #include <mt8173_def.h> 22*91f16700Schasinglulu #include <mt_cpuxgpt.h> /* generic_timer_backup() */ 23*91f16700Schasinglulu #include <plat_private.h> 24*91f16700Schasinglulu #include <power_tracer.h> 25*91f16700Schasinglulu #include <rtc.h> 26*91f16700Schasinglulu #include <scu.h> 27*91f16700Schasinglulu #include <spm_hotplug.h> 28*91f16700Schasinglulu #include <spm_mcdi.h> 29*91f16700Schasinglulu #include <spm_suspend.h> 30*91f16700Schasinglulu #include <wdt.h> 31*91f16700Schasinglulu 32*91f16700Schasinglulu #define MTK_PWR_LVL0 0 33*91f16700Schasinglulu #define MTK_PWR_LVL1 1 34*91f16700Schasinglulu #define MTK_PWR_LVL2 2 35*91f16700Schasinglulu 36*91f16700Schasinglulu /* Macros to read the MTK power domain state */ 37*91f16700Schasinglulu #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0] 38*91f16700Schasinglulu #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1] 39*91f16700Schasinglulu #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\ 40*91f16700Schasinglulu (state)->pwr_domain_state[MTK_PWR_LVL2] : 0) 41*91f16700Schasinglulu 42*91f16700Schasinglulu #if PSCI_EXTENDED_STATE_ID 43*91f16700Schasinglulu /* 44*91f16700Schasinglulu * The table storing the valid idle power states. Ensure that the 45*91f16700Schasinglulu * array entries are populated in ascending order of state-id to 46*91f16700Schasinglulu * enable us to use binary search during power state validation. 47*91f16700Schasinglulu * The table must be terminated by a NULL entry. 48*91f16700Schasinglulu */ 49*91f16700Schasinglulu const unsigned int mtk_pm_idle_states[] = { 50*91f16700Schasinglulu /* State-id - 0x001 */ 51*91f16700Schasinglulu mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 52*91f16700Schasinglulu MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY), 53*91f16700Schasinglulu /* State-id - 0x002 */ 54*91f16700Schasinglulu mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN, 55*91f16700Schasinglulu MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 56*91f16700Schasinglulu /* State-id - 0x022 */ 57*91f16700Schasinglulu mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF, 58*91f16700Schasinglulu MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 59*91f16700Schasinglulu #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1 60*91f16700Schasinglulu /* State-id - 0x222 */ 61*91f16700Schasinglulu mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF, 62*91f16700Schasinglulu MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 63*91f16700Schasinglulu #endif 64*91f16700Schasinglulu 0, 65*91f16700Schasinglulu }; 66*91f16700Schasinglulu #endif 67*91f16700Schasinglulu 68*91f16700Schasinglulu struct core_context { 69*91f16700Schasinglulu unsigned long timer_data[8]; 70*91f16700Schasinglulu unsigned int count; 71*91f16700Schasinglulu unsigned int rst; 72*91f16700Schasinglulu unsigned int abt; 73*91f16700Schasinglulu unsigned int brk; 74*91f16700Schasinglulu }; 75*91f16700Schasinglulu 76*91f16700Schasinglulu struct cluster_context { 77*91f16700Schasinglulu struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER]; 78*91f16700Schasinglulu }; 79*91f16700Schasinglulu 80*91f16700Schasinglulu /* 81*91f16700Schasinglulu * Top level structure to hold the complete context of a multi cluster system 82*91f16700Schasinglulu */ 83*91f16700Schasinglulu struct system_context { 84*91f16700Schasinglulu struct cluster_context cluster[PLATFORM_CLUSTER_COUNT]; 85*91f16700Schasinglulu }; 86*91f16700Schasinglulu 87*91f16700Schasinglulu /* 88*91f16700Schasinglulu * Top level structure which encapsulates the context of the entire system 89*91f16700Schasinglulu */ 90*91f16700Schasinglulu static struct system_context dormant_data[1]; 91*91f16700Schasinglulu 92*91f16700Schasinglulu static inline struct cluster_context *system_cluster( 93*91f16700Schasinglulu struct system_context *system, 94*91f16700Schasinglulu uint32_t clusterid) 95*91f16700Schasinglulu { 96*91f16700Schasinglulu return &system->cluster[clusterid]; 97*91f16700Schasinglulu } 98*91f16700Schasinglulu 99*91f16700Schasinglulu static inline struct core_context *cluster_core(struct cluster_context *cluster, 100*91f16700Schasinglulu uint32_t cpuid) 101*91f16700Schasinglulu { 102*91f16700Schasinglulu return &cluster->core[cpuid]; 103*91f16700Schasinglulu } 104*91f16700Schasinglulu 105*91f16700Schasinglulu static struct cluster_context *get_cluster_data(unsigned long mpidr) 106*91f16700Schasinglulu { 107*91f16700Schasinglulu uint32_t clusterid; 108*91f16700Schasinglulu 109*91f16700Schasinglulu clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; 110*91f16700Schasinglulu 111*91f16700Schasinglulu return system_cluster(dormant_data, clusterid); 112*91f16700Schasinglulu } 113*91f16700Schasinglulu 114*91f16700Schasinglulu static struct core_context *get_core_data(unsigned long mpidr) 115*91f16700Schasinglulu { 116*91f16700Schasinglulu struct cluster_context *cluster; 117*91f16700Schasinglulu uint32_t cpuid; 118*91f16700Schasinglulu 119*91f16700Schasinglulu cluster = get_cluster_data(mpidr); 120*91f16700Schasinglulu cpuid = mpidr & MPIDR_CPU_MASK; 121*91f16700Schasinglulu 122*91f16700Schasinglulu return cluster_core(cluster, cpuid); 123*91f16700Schasinglulu } 124*91f16700Schasinglulu 125*91f16700Schasinglulu static void mt_save_generic_timer(unsigned long *container) 126*91f16700Schasinglulu { 127*91f16700Schasinglulu uint64_t ctl; 128*91f16700Schasinglulu uint64_t val; 129*91f16700Schasinglulu 130*91f16700Schasinglulu __asm__ volatile("mrs %x0, cntkctl_el1\n\t" 131*91f16700Schasinglulu "mrs %x1, cntp_cval_el0\n\t" 132*91f16700Schasinglulu "stp %x0, %x1, [%2, #0]" 133*91f16700Schasinglulu : "=&r" (ctl), "=&r" (val) 134*91f16700Schasinglulu : "r" (container) 135*91f16700Schasinglulu : "memory"); 136*91f16700Schasinglulu 137*91f16700Schasinglulu __asm__ volatile("mrs %x0, cntp_tval_el0\n\t" 138*91f16700Schasinglulu "mrs %x1, cntp_ctl_el0\n\t" 139*91f16700Schasinglulu "stp %x0, %x1, [%2, #16]" 140*91f16700Schasinglulu : "=&r" (val), "=&r" (ctl) 141*91f16700Schasinglulu : "r" (container) 142*91f16700Schasinglulu : "memory"); 143*91f16700Schasinglulu 144*91f16700Schasinglulu __asm__ volatile("mrs %x0, cntv_tval_el0\n\t" 145*91f16700Schasinglulu "mrs %x1, cntv_ctl_el0\n\t" 146*91f16700Schasinglulu "stp %x0, %x1, [%2, #32]" 147*91f16700Schasinglulu : "=&r" (val), "=&r" (ctl) 148*91f16700Schasinglulu : "r" (container) 149*91f16700Schasinglulu : "memory"); 150*91f16700Schasinglulu } 151*91f16700Schasinglulu 152*91f16700Schasinglulu static void mt_restore_generic_timer(unsigned long *container) 153*91f16700Schasinglulu { 154*91f16700Schasinglulu uint64_t ctl; 155*91f16700Schasinglulu uint64_t val; 156*91f16700Schasinglulu 157*91f16700Schasinglulu __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t" 158*91f16700Schasinglulu "msr cntkctl_el1, %x0\n\t" 159*91f16700Schasinglulu "msr cntp_cval_el0, %x1" 160*91f16700Schasinglulu : "=&r" (ctl), "=&r" (val) 161*91f16700Schasinglulu : "r" (container) 162*91f16700Schasinglulu : "memory"); 163*91f16700Schasinglulu 164*91f16700Schasinglulu __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t" 165*91f16700Schasinglulu "msr cntp_tval_el0, %x0\n\t" 166*91f16700Schasinglulu "msr cntp_ctl_el0, %x1" 167*91f16700Schasinglulu : "=&r" (val), "=&r" (ctl) 168*91f16700Schasinglulu : "r" (container) 169*91f16700Schasinglulu : "memory"); 170*91f16700Schasinglulu 171*91f16700Schasinglulu __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t" 172*91f16700Schasinglulu "msr cntv_tval_el0, %x0\n\t" 173*91f16700Schasinglulu "msr cntv_ctl_el0, %x1" 174*91f16700Schasinglulu : "=&r" (val), "=&r" (ctl) 175*91f16700Schasinglulu : "r" (container) 176*91f16700Schasinglulu : "memory"); 177*91f16700Schasinglulu } 178*91f16700Schasinglulu 179*91f16700Schasinglulu static inline uint64_t read_cntpctl(void) 180*91f16700Schasinglulu { 181*91f16700Schasinglulu uint64_t cntpctl; 182*91f16700Schasinglulu 183*91f16700Schasinglulu __asm__ volatile("mrs %x0, cntp_ctl_el0" 184*91f16700Schasinglulu : "=r" (cntpctl) : : "memory"); 185*91f16700Schasinglulu 186*91f16700Schasinglulu return cntpctl; 187*91f16700Schasinglulu } 188*91f16700Schasinglulu 189*91f16700Schasinglulu static inline void write_cntpctl(uint64_t cntpctl) 190*91f16700Schasinglulu { 191*91f16700Schasinglulu __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl)); 192*91f16700Schasinglulu } 193*91f16700Schasinglulu 194*91f16700Schasinglulu static void stop_generic_timer(void) 195*91f16700Schasinglulu { 196*91f16700Schasinglulu /* 197*91f16700Schasinglulu * Disable the timer and mask the irq to prevent 198*91f16700Schasinglulu * suprious interrupts on this cpu interface. It 199*91f16700Schasinglulu * will bite us when we come back if we don't. It 200*91f16700Schasinglulu * will be replayed on the inbound cluster. 201*91f16700Schasinglulu */ 202*91f16700Schasinglulu uint64_t cntpctl = read_cntpctl(); 203*91f16700Schasinglulu 204*91f16700Schasinglulu write_cntpctl(clr_cntp_ctl_enable(cntpctl)); 205*91f16700Schasinglulu } 206*91f16700Schasinglulu 207*91f16700Schasinglulu static void mt_cpu_save(unsigned long mpidr) 208*91f16700Schasinglulu { 209*91f16700Schasinglulu struct core_context *core; 210*91f16700Schasinglulu 211*91f16700Schasinglulu core = get_core_data(mpidr); 212*91f16700Schasinglulu mt_save_generic_timer(core->timer_data); 213*91f16700Schasinglulu 214*91f16700Schasinglulu /* disable timer irq, and upper layer should enable it again. */ 215*91f16700Schasinglulu stop_generic_timer(); 216*91f16700Schasinglulu } 217*91f16700Schasinglulu 218*91f16700Schasinglulu static void mt_cpu_restore(unsigned long mpidr) 219*91f16700Schasinglulu { 220*91f16700Schasinglulu struct core_context *core; 221*91f16700Schasinglulu 222*91f16700Schasinglulu core = get_core_data(mpidr); 223*91f16700Schasinglulu mt_restore_generic_timer(core->timer_data); 224*91f16700Schasinglulu } 225*91f16700Schasinglulu 226*91f16700Schasinglulu static void mt_platform_save_context(unsigned long mpidr) 227*91f16700Schasinglulu { 228*91f16700Schasinglulu /* mcusys_save_context: */ 229*91f16700Schasinglulu mt_cpu_save(mpidr); 230*91f16700Schasinglulu } 231*91f16700Schasinglulu 232*91f16700Schasinglulu static void mt_platform_restore_context(unsigned long mpidr) 233*91f16700Schasinglulu { 234*91f16700Schasinglulu /* mcusys_restore_context: */ 235*91f16700Schasinglulu mt_cpu_restore(mpidr); 236*91f16700Schasinglulu } 237*91f16700Schasinglulu 238*91f16700Schasinglulu static void plat_cpu_standby(plat_local_state_t cpu_state) 239*91f16700Schasinglulu { 240*91f16700Schasinglulu u_register_t scr; 241*91f16700Schasinglulu 242*91f16700Schasinglulu scr = read_scr_el3(); 243*91f16700Schasinglulu write_scr_el3(scr | SCR_IRQ_BIT); 244*91f16700Schasinglulu isb(); 245*91f16700Schasinglulu dsb(); 246*91f16700Schasinglulu wfi(); 247*91f16700Schasinglulu write_scr_el3(scr); 248*91f16700Schasinglulu } 249*91f16700Schasinglulu 250*91f16700Schasinglulu /******************************************************************************* 251*91f16700Schasinglulu * MTK_platform handler called when an affinity instance is about to be turned 252*91f16700Schasinglulu * on. The level and mpidr determine the affinity instance. 253*91f16700Schasinglulu ******************************************************************************/ 254*91f16700Schasinglulu static uintptr_t secure_entrypoint; 255*91f16700Schasinglulu 256*91f16700Schasinglulu static int plat_power_domain_on(unsigned long mpidr) 257*91f16700Schasinglulu { 258*91f16700Schasinglulu int rc = PSCI_E_SUCCESS; 259*91f16700Schasinglulu unsigned long cpu_id; 260*91f16700Schasinglulu unsigned long cluster_id; 261*91f16700Schasinglulu uintptr_t rv; 262*91f16700Schasinglulu 263*91f16700Schasinglulu cpu_id = mpidr & MPIDR_CPU_MASK; 264*91f16700Schasinglulu cluster_id = mpidr & MPIDR_CLUSTER_MASK; 265*91f16700Schasinglulu 266*91f16700Schasinglulu if (cluster_id) 267*91f16700Schasinglulu rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 268*91f16700Schasinglulu else 269*91f16700Schasinglulu rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 270*91f16700Schasinglulu 271*91f16700Schasinglulu mmio_write_32(rv, secure_entrypoint); 272*91f16700Schasinglulu INFO("mt_on[%ld:%ld], entry %x\n", 273*91f16700Schasinglulu cluster_id, cpu_id, mmio_read_32(rv)); 274*91f16700Schasinglulu 275*91f16700Schasinglulu spm_hotplug_on(mpidr); 276*91f16700Schasinglulu return rc; 277*91f16700Schasinglulu } 278*91f16700Schasinglulu 279*91f16700Schasinglulu /******************************************************************************* 280*91f16700Schasinglulu * MTK_platform handler called when an affinity instance is about to be turned 281*91f16700Schasinglulu * off. The level and mpidr determine the affinity instance. The 'state' arg. 282*91f16700Schasinglulu * allows the platform to decide whether the cluster is being turned off and 283*91f16700Schasinglulu * take apt actions. 284*91f16700Schasinglulu * 285*91f16700Schasinglulu * CAUTION: This function is called with coherent stacks so that caches can be 286*91f16700Schasinglulu * turned off, flushed and coherency disabled. There is no guarantee that caches 287*91f16700Schasinglulu * will remain turned on across calls to this function as each affinity level is 288*91f16700Schasinglulu * dealt with. So do not write & read global variables across calls. It will be 289*91f16700Schasinglulu * wise to do flush a write to the global to prevent unpredictable results. 290*91f16700Schasinglulu ******************************************************************************/ 291*91f16700Schasinglulu static void plat_power_domain_off(const psci_power_state_t *state) 292*91f16700Schasinglulu { 293*91f16700Schasinglulu unsigned long mpidr = read_mpidr_el1(); 294*91f16700Schasinglulu 295*91f16700Schasinglulu /* Prevent interrupts from spuriously waking up this cpu */ 296*91f16700Schasinglulu gicv2_cpuif_disable(); 297*91f16700Schasinglulu 298*91f16700Schasinglulu spm_hotplug_off(mpidr); 299*91f16700Schasinglulu 300*91f16700Schasinglulu trace_power_flow(mpidr, CPU_DOWN); 301*91f16700Schasinglulu 302*91f16700Schasinglulu if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 303*91f16700Schasinglulu /* Disable coherency if this cluster is to be turned off */ 304*91f16700Schasinglulu plat_cci_disable(); 305*91f16700Schasinglulu 306*91f16700Schasinglulu trace_power_flow(mpidr, CLUSTER_DOWN); 307*91f16700Schasinglulu } 308*91f16700Schasinglulu } 309*91f16700Schasinglulu 310*91f16700Schasinglulu /******************************************************************************* 311*91f16700Schasinglulu * MTK_platform handler called when an affinity instance is about to be 312*91f16700Schasinglulu * suspended. The level and mpidr determine the affinity instance. The 'state' 313*91f16700Schasinglulu * arg. allows the platform to decide whether the cluster is being turned off 314*91f16700Schasinglulu * and take apt actions. 315*91f16700Schasinglulu * 316*91f16700Schasinglulu * CAUTION: This function is called with coherent stacks so that caches can be 317*91f16700Schasinglulu * turned off, flushed and coherency disabled. There is no guarantee that caches 318*91f16700Schasinglulu * will remain turned on across calls to this function as each affinity level is 319*91f16700Schasinglulu * dealt with. So do not write & read global variables across calls. It will be 320*91f16700Schasinglulu * wise to do flush a write to the global to prevent unpredictable results. 321*91f16700Schasinglulu ******************************************************************************/ 322*91f16700Schasinglulu static void plat_power_domain_suspend(const psci_power_state_t *state) 323*91f16700Schasinglulu { 324*91f16700Schasinglulu unsigned long mpidr = read_mpidr_el1(); 325*91f16700Schasinglulu unsigned long cluster_id; 326*91f16700Schasinglulu unsigned long cpu_id; 327*91f16700Schasinglulu uintptr_t rv; 328*91f16700Schasinglulu 329*91f16700Schasinglulu cpu_id = mpidr & MPIDR_CPU_MASK; 330*91f16700Schasinglulu cluster_id = mpidr & MPIDR_CLUSTER_MASK; 331*91f16700Schasinglulu 332*91f16700Schasinglulu if (cluster_id) 333*91f16700Schasinglulu rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw; 334*91f16700Schasinglulu else 335*91f16700Schasinglulu rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw; 336*91f16700Schasinglulu 337*91f16700Schasinglulu mmio_write_32(rv, secure_entrypoint); 338*91f16700Schasinglulu 339*91f16700Schasinglulu if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 340*91f16700Schasinglulu spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0); 341*91f16700Schasinglulu if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 342*91f16700Schasinglulu spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1); 343*91f16700Schasinglulu } 344*91f16700Schasinglulu 345*91f16700Schasinglulu mt_platform_save_context(mpidr); 346*91f16700Schasinglulu 347*91f16700Schasinglulu /* Perform the common cluster specific operations */ 348*91f16700Schasinglulu if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 349*91f16700Schasinglulu /* Disable coherency if this cluster is to be turned off */ 350*91f16700Schasinglulu plat_cci_disable(); 351*91f16700Schasinglulu } 352*91f16700Schasinglulu 353*91f16700Schasinglulu if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 354*91f16700Schasinglulu wdt_suspend(); 355*91f16700Schasinglulu disable_scu(mpidr); 356*91f16700Schasinglulu generic_timer_backup(); 357*91f16700Schasinglulu spm_system_suspend(); 358*91f16700Schasinglulu /* Prevent interrupts from spuriously waking up this cpu */ 359*91f16700Schasinglulu gicv2_cpuif_disable(); 360*91f16700Schasinglulu } 361*91f16700Schasinglulu } 362*91f16700Schasinglulu 363*91f16700Schasinglulu /******************************************************************************* 364*91f16700Schasinglulu * MTK_platform handler called when an affinity instance has just been powered 365*91f16700Schasinglulu * on after being turned off earlier. The level and mpidr determine the affinity 366*91f16700Schasinglulu * instance. The 'state' arg. allows the platform to decide whether the cluster 367*91f16700Schasinglulu * was turned off prior to wakeup and do what's necessary to setup it up 368*91f16700Schasinglulu * correctly. 369*91f16700Schasinglulu ******************************************************************************/ 370*91f16700Schasinglulu void mtk_system_pwr_domain_resume(void); 371*91f16700Schasinglulu 372*91f16700Schasinglulu static void plat_power_domain_on_finish(const psci_power_state_t *state) 373*91f16700Schasinglulu { 374*91f16700Schasinglulu unsigned long mpidr = read_mpidr_el1(); 375*91f16700Schasinglulu 376*91f16700Schasinglulu assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF); 377*91f16700Schasinglulu 378*91f16700Schasinglulu if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 379*91f16700Schasinglulu (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 380*91f16700Schasinglulu mtk_system_pwr_domain_resume(); 381*91f16700Schasinglulu 382*91f16700Schasinglulu if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) { 383*91f16700Schasinglulu plat_cci_enable(); 384*91f16700Schasinglulu trace_power_flow(mpidr, CLUSTER_UP); 385*91f16700Schasinglulu } 386*91f16700Schasinglulu 387*91f16700Schasinglulu if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) && 388*91f16700Schasinglulu (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF)) 389*91f16700Schasinglulu return; 390*91f16700Schasinglulu 391*91f16700Schasinglulu /* Enable the gic cpu interface */ 392*91f16700Schasinglulu gicv2_cpuif_enable(); 393*91f16700Schasinglulu gicv2_pcpu_distif_init(); 394*91f16700Schasinglulu trace_power_flow(mpidr, CPU_UP); 395*91f16700Schasinglulu } 396*91f16700Schasinglulu 397*91f16700Schasinglulu /******************************************************************************* 398*91f16700Schasinglulu * MTK_platform handler called when an affinity instance has just been powered 399*91f16700Schasinglulu * on after having been suspended earlier. The level and mpidr determine the 400*91f16700Schasinglulu * affinity instance. 401*91f16700Schasinglulu ******************************************************************************/ 402*91f16700Schasinglulu static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 403*91f16700Schasinglulu { 404*91f16700Schasinglulu unsigned long mpidr = read_mpidr_el1(); 405*91f16700Schasinglulu 406*91f16700Schasinglulu if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET) 407*91f16700Schasinglulu return; 408*91f16700Schasinglulu 409*91f16700Schasinglulu if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 410*91f16700Schasinglulu /* Enable the gic cpu interface */ 411*91f16700Schasinglulu plat_arm_gic_init(); 412*91f16700Schasinglulu spm_system_suspend_finish(); 413*91f16700Schasinglulu enable_scu(mpidr); 414*91f16700Schasinglulu wdt_resume(); 415*91f16700Schasinglulu } 416*91f16700Schasinglulu 417*91f16700Schasinglulu /* Perform the common cluster specific operations */ 418*91f16700Schasinglulu if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) { 419*91f16700Schasinglulu /* Enable coherency if this cluster was off */ 420*91f16700Schasinglulu plat_cci_enable(); 421*91f16700Schasinglulu } 422*91f16700Schasinglulu 423*91f16700Schasinglulu mt_platform_restore_context(mpidr); 424*91f16700Schasinglulu 425*91f16700Schasinglulu if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) { 426*91f16700Schasinglulu spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0); 427*91f16700Schasinglulu if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) 428*91f16700Schasinglulu spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1); 429*91f16700Schasinglulu } 430*91f16700Schasinglulu 431*91f16700Schasinglulu gicv2_pcpu_distif_init(); 432*91f16700Schasinglulu } 433*91f16700Schasinglulu 434*91f16700Schasinglulu static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 435*91f16700Schasinglulu { 436*91f16700Schasinglulu assert(PLAT_MAX_PWR_LVL >= 2); 437*91f16700Schasinglulu 438*91f16700Schasinglulu for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) 439*91f16700Schasinglulu req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF; 440*91f16700Schasinglulu } 441*91f16700Schasinglulu 442*91f16700Schasinglulu /******************************************************************************* 443*91f16700Schasinglulu * MTK handlers to shutdown/reboot the system 444*91f16700Schasinglulu ******************************************************************************/ 445*91f16700Schasinglulu static void __dead2 plat_system_off(void) 446*91f16700Schasinglulu { 447*91f16700Schasinglulu INFO("MTK System Off\n"); 448*91f16700Schasinglulu 449*91f16700Schasinglulu rtc_bbpu_power_down(); 450*91f16700Schasinglulu 451*91f16700Schasinglulu wfi(); 452*91f16700Schasinglulu ERROR("MTK System Off: operation not handled.\n"); 453*91f16700Schasinglulu panic(); 454*91f16700Schasinglulu } 455*91f16700Schasinglulu 456*91f16700Schasinglulu static void __dead2 plat_system_reset(void) 457*91f16700Schasinglulu { 458*91f16700Schasinglulu /* Write the System Configuration Control Register */ 459*91f16700Schasinglulu INFO("MTK System Reset\n"); 460*91f16700Schasinglulu 461*91f16700Schasinglulu wdt_trigger_reset(); 462*91f16700Schasinglulu 463*91f16700Schasinglulu wfi(); 464*91f16700Schasinglulu ERROR("MTK System Reset: operation not handled.\n"); 465*91f16700Schasinglulu panic(); 466*91f16700Schasinglulu } 467*91f16700Schasinglulu 468*91f16700Schasinglulu #if !PSCI_EXTENDED_STATE_ID 469*91f16700Schasinglulu static int plat_validate_power_state(unsigned int power_state, 470*91f16700Schasinglulu psci_power_state_t *req_state) 471*91f16700Schasinglulu { 472*91f16700Schasinglulu int pstate = psci_get_pstate_type(power_state); 473*91f16700Schasinglulu int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 474*91f16700Schasinglulu int i; 475*91f16700Schasinglulu 476*91f16700Schasinglulu assert(req_state); 477*91f16700Schasinglulu 478*91f16700Schasinglulu if (pwr_lvl > PLAT_MAX_PWR_LVL) 479*91f16700Schasinglulu return PSCI_E_INVALID_PARAMS; 480*91f16700Schasinglulu 481*91f16700Schasinglulu /* Sanity check the requested state */ 482*91f16700Schasinglulu if (pstate == PSTATE_TYPE_STANDBY) { 483*91f16700Schasinglulu /* 484*91f16700Schasinglulu * It's possible to enter standby only on power level 0 485*91f16700Schasinglulu * Ignore any other power level. 486*91f16700Schasinglulu */ 487*91f16700Schasinglulu if (pwr_lvl != 0) 488*91f16700Schasinglulu return PSCI_E_INVALID_PARAMS; 489*91f16700Schasinglulu 490*91f16700Schasinglulu req_state->pwr_domain_state[MTK_PWR_LVL0] = 491*91f16700Schasinglulu MTK_LOCAL_STATE_RET; 492*91f16700Schasinglulu } else { 493*91f16700Schasinglulu for (i = 0; i <= pwr_lvl; i++) 494*91f16700Schasinglulu req_state->pwr_domain_state[i] = 495*91f16700Schasinglulu MTK_LOCAL_STATE_OFF; 496*91f16700Schasinglulu } 497*91f16700Schasinglulu 498*91f16700Schasinglulu /* 499*91f16700Schasinglulu * We expect the 'state id' to be zero. 500*91f16700Schasinglulu */ 501*91f16700Schasinglulu if (psci_get_pstate_id(power_state)) 502*91f16700Schasinglulu return PSCI_E_INVALID_PARAMS; 503*91f16700Schasinglulu 504*91f16700Schasinglulu return PSCI_E_SUCCESS; 505*91f16700Schasinglulu } 506*91f16700Schasinglulu #else 507*91f16700Schasinglulu int plat_validate_power_state(unsigned int power_state, 508*91f16700Schasinglulu psci_power_state_t *req_state) 509*91f16700Schasinglulu { 510*91f16700Schasinglulu unsigned int state_id; 511*91f16700Schasinglulu int i; 512*91f16700Schasinglulu 513*91f16700Schasinglulu assert(req_state); 514*91f16700Schasinglulu 515*91f16700Schasinglulu /* 516*91f16700Schasinglulu * Currently we are using a linear search for finding the matching 517*91f16700Schasinglulu * entry in the idle power state array. This can be made a binary 518*91f16700Schasinglulu * search if the number of entries justify the additional complexity. 519*91f16700Schasinglulu */ 520*91f16700Schasinglulu for (i = 0; !!mtk_pm_idle_states[i]; i++) { 521*91f16700Schasinglulu if (power_state == mtk_pm_idle_states[i]) 522*91f16700Schasinglulu break; 523*91f16700Schasinglulu } 524*91f16700Schasinglulu 525*91f16700Schasinglulu /* Return error if entry not found in the idle state array */ 526*91f16700Schasinglulu if (!mtk_pm_idle_states[i]) 527*91f16700Schasinglulu return PSCI_E_INVALID_PARAMS; 528*91f16700Schasinglulu 529*91f16700Schasinglulu i = 0; 530*91f16700Schasinglulu state_id = psci_get_pstate_id(power_state); 531*91f16700Schasinglulu 532*91f16700Schasinglulu /* Parse the State ID and populate the state info parameter */ 533*91f16700Schasinglulu while (state_id) { 534*91f16700Schasinglulu req_state->pwr_domain_state[i++] = state_id & 535*91f16700Schasinglulu MTK_LOCAL_PSTATE_MASK; 536*91f16700Schasinglulu state_id >>= MTK_LOCAL_PSTATE_WIDTH; 537*91f16700Schasinglulu } 538*91f16700Schasinglulu 539*91f16700Schasinglulu return PSCI_E_SUCCESS; 540*91f16700Schasinglulu } 541*91f16700Schasinglulu #endif 542*91f16700Schasinglulu 543*91f16700Schasinglulu void mtk_system_pwr_domain_resume(void) 544*91f16700Schasinglulu { 545*91f16700Schasinglulu console_switch_state(CONSOLE_FLAG_BOOT); 546*91f16700Schasinglulu 547*91f16700Schasinglulu /* Assert system power domain is available on the platform */ 548*91f16700Schasinglulu assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2); 549*91f16700Schasinglulu 550*91f16700Schasinglulu plat_arm_gic_init(); 551*91f16700Schasinglulu 552*91f16700Schasinglulu console_switch_state(CONSOLE_FLAG_RUNTIME); 553*91f16700Schasinglulu } 554*91f16700Schasinglulu 555*91f16700Schasinglulu static const plat_psci_ops_t plat_plat_pm_ops = { 556*91f16700Schasinglulu .cpu_standby = plat_cpu_standby, 557*91f16700Schasinglulu .pwr_domain_on = plat_power_domain_on, 558*91f16700Schasinglulu .pwr_domain_on_finish = plat_power_domain_on_finish, 559*91f16700Schasinglulu .pwr_domain_off = plat_power_domain_off, 560*91f16700Schasinglulu .pwr_domain_suspend = plat_power_domain_suspend, 561*91f16700Schasinglulu .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 562*91f16700Schasinglulu .system_off = plat_system_off, 563*91f16700Schasinglulu .system_reset = plat_system_reset, 564*91f16700Schasinglulu .validate_power_state = plat_validate_power_state, 565*91f16700Schasinglulu .get_sys_suspend_power_state = plat_get_sys_suspend_power_state, 566*91f16700Schasinglulu }; 567*91f16700Schasinglulu 568*91f16700Schasinglulu int plat_setup_psci_ops(uintptr_t sec_entrypoint, 569*91f16700Schasinglulu const plat_psci_ops_t **psci_ops) 570*91f16700Schasinglulu { 571*91f16700Schasinglulu *psci_ops = &plat_plat_pm_ops; 572*91f16700Schasinglulu secure_entrypoint = sec_entrypoint; 573*91f16700Schasinglulu return 0; 574*91f16700Schasinglulu } 575*91f16700Schasinglulu 576*91f16700Schasinglulu /* 577*91f16700Schasinglulu * The PSCI generic code uses this API to let the platform participate in state 578*91f16700Schasinglulu * coordination during a power management operation. It compares the platform 579*91f16700Schasinglulu * specific local power states requested by each cpu for a given power domain 580*91f16700Schasinglulu * and returns the coordinated target power state that the domain should 581*91f16700Schasinglulu * enter. A platform assigns a number to a local power state. This default 582*91f16700Schasinglulu * implementation assumes that the platform assigns these numbers in order of 583*91f16700Schasinglulu * increasing depth of the power state i.e. for two power states X & Y, if X < Y 584*91f16700Schasinglulu * then X represents a shallower power state than Y. As a result, the 585*91f16700Schasinglulu * coordinated target local power state for a power domain will be the minimum 586*91f16700Schasinglulu * of the requested local power states. 587*91f16700Schasinglulu */ 588*91f16700Schasinglulu plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, 589*91f16700Schasinglulu const plat_local_state_t *states, 590*91f16700Schasinglulu unsigned int ncpu) 591*91f16700Schasinglulu { 592*91f16700Schasinglulu plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; 593*91f16700Schasinglulu 594*91f16700Schasinglulu assert(ncpu); 595*91f16700Schasinglulu 596*91f16700Schasinglulu do { 597*91f16700Schasinglulu temp = *states++; 598*91f16700Schasinglulu if (temp < target) 599*91f16700Schasinglulu target = temp; 600*91f16700Schasinglulu } while (--ncpu); 601*91f16700Schasinglulu 602*91f16700Schasinglulu return target; 603*91f16700Schasinglulu } 604