1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <stdlib.h> 8*91f16700Schasinglulu #include <stdint.h> 9*91f16700Schasinglulu #include <stdbool.h> 10*91f16700Schasinglulu 11*91f16700Schasinglulu #include <arch_helpers.h> 12*91f16700Schasinglulu #include <common/debug.h> 13*91f16700Schasinglulu #include <drivers/delay_timer.h> 14*91f16700Schasinglulu #include <lib/mmio.h> 15*91f16700Schasinglulu #include <lib/psci/psci.h> 16*91f16700Schasinglulu #include <lib/smccc.h> 17*91f16700Schasinglulu #include <lib/spinlock.h> 18*91f16700Schasinglulu #include <plat/common/platform.h> 19*91f16700Schasinglulu #include <services/std_svc.h> 20*91f16700Schasinglulu 21*91f16700Schasinglulu #include <gpc.h> 22*91f16700Schasinglulu #include <platform_def.h> 23*91f16700Schasinglulu 24*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_MASK U(0x00) 25*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_UNMASK U(0x01) 26*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02) 27*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03) 28*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04) 29*91f16700Schasinglulu #define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05) 30*91f16700Schasinglulu 31*91f16700Schasinglulu #define MAX_HW_IRQ_NUM U(128) 32*91f16700Schasinglulu #define MAX_IMR_NUM U(4) 33*91f16700Schasinglulu 34*91f16700Schasinglulu static uint32_t gpc_saved_imrs[16]; 35*91f16700Schasinglulu static uint32_t gpc_wake_irqs[4]; 36*91f16700Schasinglulu static uint32_t gpc_imr_offset[] = { 37*91f16700Schasinglulu IMX_GPC_BASE + IMR1_CORE0_A53, 38*91f16700Schasinglulu IMX_GPC_BASE + IMR1_CORE1_A53, 39*91f16700Schasinglulu IMX_GPC_BASE + IMR1_CORE2_A53, 40*91f16700Schasinglulu IMX_GPC_BASE + IMR1_CORE3_A53, 41*91f16700Schasinglulu IMX_GPC_BASE + IMR1_CORE0_M4, 42*91f16700Schasinglulu }; 43*91f16700Schasinglulu 44*91f16700Schasinglulu spinlock_t gpc_imr_lock[4]; 45*91f16700Schasinglulu 46*91f16700Schasinglulu static void gpc_imr_core_spin_lock(unsigned int core_id) 47*91f16700Schasinglulu { 48*91f16700Schasinglulu spin_lock(&gpc_imr_lock[core_id]); 49*91f16700Schasinglulu } 50*91f16700Schasinglulu 51*91f16700Schasinglulu static void gpc_imr_core_spin_unlock(unsigned int core_id) 52*91f16700Schasinglulu { 53*91f16700Schasinglulu spin_unlock(&gpc_imr_lock[core_id]); 54*91f16700Schasinglulu } 55*91f16700Schasinglulu 56*91f16700Schasinglulu static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx) 57*91f16700Schasinglulu { 58*91f16700Schasinglulu uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; 59*91f16700Schasinglulu 60*91f16700Schasinglulu gpc_imr_core_spin_lock(core_id); 61*91f16700Schasinglulu 62*91f16700Schasinglulu gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg); 63*91f16700Schasinglulu mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]); 64*91f16700Schasinglulu 65*91f16700Schasinglulu gpc_imr_core_spin_unlock(core_id); 66*91f16700Schasinglulu } 67*91f16700Schasinglulu 68*91f16700Schasinglulu static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx) 69*91f16700Schasinglulu { 70*91f16700Schasinglulu uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; 71*91f16700Schasinglulu uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4]; 72*91f16700Schasinglulu 73*91f16700Schasinglulu gpc_imr_core_spin_lock(core_id); 74*91f16700Schasinglulu 75*91f16700Schasinglulu mmio_write_32(reg, val); 76*91f16700Schasinglulu 77*91f16700Schasinglulu gpc_imr_core_spin_unlock(core_id); 78*91f16700Schasinglulu } 79*91f16700Schasinglulu 80*91f16700Schasinglulu /* 81*91f16700Schasinglulu * On i.MX8MQ, only in system suspend mode, the A53 cluster can 82*91f16700Schasinglulu * enter LPM mode and shutdown the A53 PLAT power domain. So LPM 83*91f16700Schasinglulu * wakeup only used for system suspend. when system enter suspend, 84*91f16700Schasinglulu * any A53 CORE can be the last core to suspend the system, But 85*91f16700Schasinglulu * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster 86*91f16700Schasinglulu * from LPM, so save C0's IMRs before suspend, restore back after 87*91f16700Schasinglulu * resume. 88*91f16700Schasinglulu */ 89*91f16700Schasinglulu void imx_set_sys_wakeup(unsigned int last_core, bool pdn) 90*91f16700Schasinglulu { 91*91f16700Schasinglulu unsigned int imr, core; 92*91f16700Schasinglulu 93*91f16700Schasinglulu if (pdn) { 94*91f16700Schasinglulu for (imr = 0U; imr < MAX_IMR_NUM; imr++) { 95*91f16700Schasinglulu for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { 96*91f16700Schasinglulu gpc_save_imr_lpm(core, imr); 97*91f16700Schasinglulu } 98*91f16700Schasinglulu } 99*91f16700Schasinglulu } else { 100*91f16700Schasinglulu for (imr = 0U; imr < MAX_IMR_NUM; imr++) { 101*91f16700Schasinglulu for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { 102*91f16700Schasinglulu gpc_restore_imr_lpm(core, imr); 103*91f16700Schasinglulu } 104*91f16700Schasinglulu } 105*91f16700Schasinglulu } 106*91f16700Schasinglulu } 107*91f16700Schasinglulu 108*91f16700Schasinglulu static void imx_gpc_hwirq_mask(unsigned int hwirq) 109*91f16700Schasinglulu { 110*91f16700Schasinglulu uintptr_t reg; 111*91f16700Schasinglulu unsigned int val; 112*91f16700Schasinglulu 113*91f16700Schasinglulu if (hwirq >= MAX_HW_IRQ_NUM) { 114*91f16700Schasinglulu return; 115*91f16700Schasinglulu } 116*91f16700Schasinglulu 117*91f16700Schasinglulu gpc_imr_core_spin_lock(0); 118*91f16700Schasinglulu reg = gpc_imr_offset[0] + (hwirq / 32) * 4; 119*91f16700Schasinglulu val = mmio_read_32(reg); 120*91f16700Schasinglulu val |= 1 << hwirq % 32; 121*91f16700Schasinglulu mmio_write_32(reg, val); 122*91f16700Schasinglulu gpc_imr_core_spin_unlock(0); 123*91f16700Schasinglulu } 124*91f16700Schasinglulu 125*91f16700Schasinglulu static void imx_gpc_hwirq_unmask(unsigned int hwirq) 126*91f16700Schasinglulu { 127*91f16700Schasinglulu uintptr_t reg; 128*91f16700Schasinglulu unsigned int val; 129*91f16700Schasinglulu 130*91f16700Schasinglulu if (hwirq >= MAX_HW_IRQ_NUM) { 131*91f16700Schasinglulu return; 132*91f16700Schasinglulu } 133*91f16700Schasinglulu 134*91f16700Schasinglulu gpc_imr_core_spin_lock(0); 135*91f16700Schasinglulu reg = gpc_imr_offset[0] + (hwirq / 32) * 4; 136*91f16700Schasinglulu val = mmio_read_32(reg); 137*91f16700Schasinglulu val &= ~(1 << hwirq % 32); 138*91f16700Schasinglulu mmio_write_32(reg, val); 139*91f16700Schasinglulu gpc_imr_core_spin_unlock(0); 140*91f16700Schasinglulu } 141*91f16700Schasinglulu 142*91f16700Schasinglulu static void imx_gpc_set_wake(uint32_t hwirq, bool on) 143*91f16700Schasinglulu { 144*91f16700Schasinglulu uint32_t mask, idx; 145*91f16700Schasinglulu 146*91f16700Schasinglulu if (hwirq >= MAX_HW_IRQ_NUM) { 147*91f16700Schasinglulu return; 148*91f16700Schasinglulu } 149*91f16700Schasinglulu 150*91f16700Schasinglulu mask = 1 << hwirq % 32; 151*91f16700Schasinglulu idx = hwirq / 32; 152*91f16700Schasinglulu gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask : 153*91f16700Schasinglulu gpc_wake_irqs[idx] & ~mask; 154*91f16700Schasinglulu } 155*91f16700Schasinglulu 156*91f16700Schasinglulu static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask) 157*91f16700Schasinglulu { 158*91f16700Schasinglulu gpc_imr_core_spin_lock(core_id); 159*91f16700Schasinglulu if (mask) { 160*91f16700Schasinglulu mmio_setbits_32(gpc_imr_offset[core_id], 1); 161*91f16700Schasinglulu } else { 162*91f16700Schasinglulu mmio_clrbits_32(gpc_imr_offset[core_id], 1); 163*91f16700Schasinglulu } 164*91f16700Schasinglulu 165*91f16700Schasinglulu dsb(); 166*91f16700Schasinglulu gpc_imr_core_spin_unlock(core_id); 167*91f16700Schasinglulu } 168*91f16700Schasinglulu 169*91f16700Schasinglulu void imx_gpc_core_wake(uint32_t cpumask) 170*91f16700Schasinglulu { 171*91f16700Schasinglulu for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { 172*91f16700Schasinglulu if (cpumask & (1 << i)) { 173*91f16700Schasinglulu imx_gpc_mask_irq0(i, false); 174*91f16700Schasinglulu } 175*91f16700Schasinglulu } 176*91f16700Schasinglulu } 177*91f16700Schasinglulu 178*91f16700Schasinglulu void imx_gpc_set_a53_core_awake(uint32_t core_id) 179*91f16700Schasinglulu { 180*91f16700Schasinglulu imx_gpc_mask_irq0(core_id, true); 181*91f16700Schasinglulu } 182*91f16700Schasinglulu 183*91f16700Schasinglulu static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx) 184*91f16700Schasinglulu { 185*91f16700Schasinglulu uintptr_t reg; 186*91f16700Schasinglulu unsigned int val; 187*91f16700Schasinglulu 188*91f16700Schasinglulu if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) { 189*91f16700Schasinglulu return; 190*91f16700Schasinglulu } 191*91f16700Schasinglulu 192*91f16700Schasinglulu /* 193*91f16700Schasinglulu * using the mask/unmask bit as affinity function.unmask the 194*91f16700Schasinglulu * IMR bit to enable IRQ wakeup for this core. 195*91f16700Schasinglulu */ 196*91f16700Schasinglulu gpc_imr_core_spin_lock(cpu_idx); 197*91f16700Schasinglulu reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4; 198*91f16700Schasinglulu val = mmio_read_32(reg); 199*91f16700Schasinglulu val &= ~(1 << hwirq % 32); 200*91f16700Schasinglulu mmio_write_32(reg, val); 201*91f16700Schasinglulu gpc_imr_core_spin_unlock(cpu_idx); 202*91f16700Schasinglulu 203*91f16700Schasinglulu /* clear affinity of other core */ 204*91f16700Schasinglulu for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { 205*91f16700Schasinglulu if (cpu_idx != i) { 206*91f16700Schasinglulu gpc_imr_core_spin_lock(i); 207*91f16700Schasinglulu reg = gpc_imr_offset[i] + (hwirq / 32) * 4; 208*91f16700Schasinglulu val = mmio_read_32(reg); 209*91f16700Schasinglulu val |= (1 << hwirq % 32); 210*91f16700Schasinglulu mmio_write_32(reg, val); 211*91f16700Schasinglulu gpc_imr_core_spin_unlock(i); 212*91f16700Schasinglulu } 213*91f16700Schasinglulu } 214*91f16700Schasinglulu } 215*91f16700Schasinglulu 216*91f16700Schasinglulu /* use wfi power down the core */ 217*91f16700Schasinglulu void imx_set_cpu_pwr_off(unsigned int core_id) 218*91f16700Schasinglulu { 219*91f16700Schasinglulu bakery_lock_get(&gpc_lock); 220*91f16700Schasinglulu 221*91f16700Schasinglulu /* enable the wfi power down of the core */ 222*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 223*91f16700Schasinglulu (1 << (core_id + 20))); 224*91f16700Schasinglulu 225*91f16700Schasinglulu bakery_lock_release(&gpc_lock); 226*91f16700Schasinglulu 227*91f16700Schasinglulu /* assert the pcg pcr bit of the core */ 228*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 229*91f16700Schasinglulu }; 230*91f16700Schasinglulu 231*91f16700Schasinglulu /* if out of lpm, we need to do reverse steps */ 232*91f16700Schasinglulu void imx_set_cpu_lpm(unsigned int core_id, bool pdn) 233*91f16700Schasinglulu { 234*91f16700Schasinglulu bakery_lock_get(&gpc_lock); 235*91f16700Schasinglulu 236*91f16700Schasinglulu if (pdn) { 237*91f16700Schasinglulu /* enable the core WFI PDN & IRQ PUP */ 238*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 239*91f16700Schasinglulu (1 << (core_id + 20)) | COREx_IRQ_WUP(core_id)); 240*91f16700Schasinglulu /* assert the pcg pcr bit of the core */ 241*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 242*91f16700Schasinglulu } else { 243*91f16700Schasinglulu /* disable CORE WFI PDN & IRQ PUP */ 244*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 245*91f16700Schasinglulu COREx_IRQ_WUP(core_id)); 246*91f16700Schasinglulu /* deassert the pcg pcr bit of the core */ 247*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 248*91f16700Schasinglulu } 249*91f16700Schasinglulu 250*91f16700Schasinglulu bakery_lock_release(&gpc_lock); 251*91f16700Schasinglulu } 252*91f16700Schasinglulu 253*91f16700Schasinglulu void imx_pup_pdn_slot_config(int last_core, bool pdn) 254*91f16700Schasinglulu { 255*91f16700Schasinglulu if (pdn) { 256*91f16700Schasinglulu /* SLOT0 for A53 PLAT power down */ 257*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), SLT_PLAT_PDN); 258*91f16700Schasinglulu /* SLOT1 for A53 PLAT power up */ 259*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(1), SLT_PLAT_PUP); 260*91f16700Schasinglulu /* SLOT2 for A53 primary core power up */ 261*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core)); 262*91f16700Schasinglulu /* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */ 263*91f16700Schasinglulu mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, 264*91f16700Schasinglulu A53_PLAT_PDN_ACK | SLT_COREx_PUP_ACK(last_core)); 265*91f16700Schasinglulu } else { 266*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF); 267*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF); 268*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(2), 0xFFFFFFFF); 269*91f16700Schasinglulu mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, 270*91f16700Schasinglulu A53_DUMMY_PDN_ACK | A53_DUMMY_PUP_ACK); 271*91f16700Schasinglulu } 272*91f16700Schasinglulu } 273*91f16700Schasinglulu 274*91f16700Schasinglulu void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state) 275*91f16700Schasinglulu { 276*91f16700Schasinglulu uint32_t val; 277*91f16700Schasinglulu 278*91f16700Schasinglulu if (is_local_state_off(power_state)) { 279*91f16700Schasinglulu val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 280*91f16700Schasinglulu val |= A53_LPM_STOP; /* enable C0-C1's STOP mode */ 281*91f16700Schasinglulu val &= ~CPU_CLOCK_ON_LPM; /* disable CPU clock in LPM mode */ 282*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 283*91f16700Schasinglulu 284*91f16700Schasinglulu /* enable C2-3's STOP mode */ 285*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_STOP); 286*91f16700Schasinglulu 287*91f16700Schasinglulu /* enable PLAT/SCU power down */ 288*91f16700Schasinglulu val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); 289*91f16700Schasinglulu val &= ~EN_L2_WFI_PDN; 290*91f16700Schasinglulu val |= L2PGE | EN_PLAT_PDN; 291*91f16700Schasinglulu val &= ~COREx_IRQ_WUP(last_core); /* disable IRQ PUP for last core */ 292*91f16700Schasinglulu val |= COREx_LPM_PUP(last_core); /* enable LPM PUP for last core */ 293*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); 294*91f16700Schasinglulu 295*91f16700Schasinglulu imx_pup_pdn_slot_config(last_core, true); 296*91f16700Schasinglulu 297*91f16700Schasinglulu /* enable PLAT PGC */ 298*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); 299*91f16700Schasinglulu } else { 300*91f16700Schasinglulu /* clear PLAT PGC */ 301*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); 302*91f16700Schasinglulu 303*91f16700Schasinglulu /* clear the slot and ack for cluster power down */ 304*91f16700Schasinglulu imx_pup_pdn_slot_config(last_core, false); 305*91f16700Schasinglulu 306*91f16700Schasinglulu val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 307*91f16700Schasinglulu val &= ~A53_LPM_MASK; /* clear the C0~1 LPM */ 308*91f16700Schasinglulu val |= CPU_CLOCK_ON_LPM; /* disable cpu clock in LPM */ 309*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 310*91f16700Schasinglulu 311*91f16700Schasinglulu /* set A53 LPM to RUN mode */ 312*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_MASK); 313*91f16700Schasinglulu 314*91f16700Schasinglulu /* clear PLAT/SCU power down */ 315*91f16700Schasinglulu val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); 316*91f16700Schasinglulu val |= EN_L2_WFI_PDN; 317*91f16700Schasinglulu val &= ~(L2PGE | EN_PLAT_PDN); 318*91f16700Schasinglulu val &= ~COREx_LPM_PUP(last_core); /* disable C0's LPM PUP */ 319*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); 320*91f16700Schasinglulu } 321*91f16700Schasinglulu } 322*91f16700Schasinglulu 323*91f16700Schasinglulu #define MAX_PLL_NUM U(12) 324*91f16700Schasinglulu 325*91f16700Schasinglulu static const struct pll_override imx8mq_pll[MAX_PLL_NUM] = { 326*91f16700Schasinglulu {.reg = 0x0, .override_mask = 0x140000, }, 327*91f16700Schasinglulu {.reg = 0x8, .override_mask = 0x140000, }, 328*91f16700Schasinglulu {.reg = 0x10, .override_mask = 0x140000, }, 329*91f16700Schasinglulu {.reg = 0x18, .override_mask = 0x140000, }, 330*91f16700Schasinglulu {.reg = 0x20, .override_mask = 0x140000, }, 331*91f16700Schasinglulu {.reg = 0x28, .override_mask = 0x140000, }, 332*91f16700Schasinglulu {.reg = 0x30, .override_mask = 0x1555540, }, 333*91f16700Schasinglulu {.reg = 0x3c, .override_mask = 0x1555540, }, 334*91f16700Schasinglulu {.reg = 0x48, .override_mask = 0x140, }, 335*91f16700Schasinglulu {.reg = 0x54, .override_mask = 0x140, }, 336*91f16700Schasinglulu {.reg = 0x60, .override_mask = 0x140, }, 337*91f16700Schasinglulu {.reg = 0x70, .override_mask = 0xa, }, 338*91f16700Schasinglulu }; 339*91f16700Schasinglulu 340*91f16700Schasinglulu void imx_anamix_override(bool enter) 341*91f16700Schasinglulu { 342*91f16700Schasinglulu unsigned int i; 343*91f16700Schasinglulu 344*91f16700Schasinglulu /* enable the pll override bit before entering DSM mode */ 345*91f16700Schasinglulu for (i = 0; i < MAX_PLL_NUM; i++) { 346*91f16700Schasinglulu if (enter) { 347*91f16700Schasinglulu mmio_setbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg, 348*91f16700Schasinglulu imx8mq_pll[i].override_mask); 349*91f16700Schasinglulu } else { 350*91f16700Schasinglulu mmio_clrbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg, 351*91f16700Schasinglulu imx8mq_pll[i].override_mask); 352*91f16700Schasinglulu } 353*91f16700Schasinglulu } 354*91f16700Schasinglulu } 355*91f16700Schasinglulu 356*91f16700Schasinglulu int imx_gpc_handler(uint32_t smc_fid, 357*91f16700Schasinglulu u_register_t x1, 358*91f16700Schasinglulu u_register_t x2, 359*91f16700Schasinglulu u_register_t x3) 360*91f16700Schasinglulu { 361*91f16700Schasinglulu switch (x1) { 362*91f16700Schasinglulu case FSL_SIP_CONFIG_GPC_CORE_WAKE: 363*91f16700Schasinglulu imx_gpc_core_wake(x2); 364*91f16700Schasinglulu break; 365*91f16700Schasinglulu case FSL_SIP_CONFIG_GPC_SET_WAKE: 366*91f16700Schasinglulu imx_gpc_set_wake(x2, x3); 367*91f16700Schasinglulu break; 368*91f16700Schasinglulu case FSL_SIP_CONFIG_GPC_MASK: 369*91f16700Schasinglulu imx_gpc_hwirq_mask(x2); 370*91f16700Schasinglulu break; 371*91f16700Schasinglulu case FSL_SIP_CONFIG_GPC_UNMASK: 372*91f16700Schasinglulu imx_gpc_hwirq_unmask(x2); 373*91f16700Schasinglulu break; 374*91f16700Schasinglulu case FSL_SIP_CONFIG_GPC_SET_AFF: 375*91f16700Schasinglulu imx_gpc_set_affinity(x2, x3); 376*91f16700Schasinglulu break; 377*91f16700Schasinglulu default: 378*91f16700Schasinglulu return SMC_UNK; 379*91f16700Schasinglulu } 380*91f16700Schasinglulu 381*91f16700Schasinglulu return 0; 382*91f16700Schasinglulu } 383*91f16700Schasinglulu 384*91f16700Schasinglulu void imx_gpc_init(void) 385*91f16700Schasinglulu { 386*91f16700Schasinglulu uint32_t val; 387*91f16700Schasinglulu unsigned int i, j; 388*91f16700Schasinglulu 389*91f16700Schasinglulu /* mask all the interrupt by default */ 390*91f16700Schasinglulu for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { 391*91f16700Schasinglulu for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) { 392*91f16700Schasinglulu mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0); 393*91f16700Schasinglulu } 394*91f16700Schasinglulu } 395*91f16700Schasinglulu 396*91f16700Schasinglulu /* Due to the hardware design requirement, need to make 397*91f16700Schasinglulu * sure GPR interrupt(#32) is unmasked during RUN mode to 398*91f16700Schasinglulu * avoid entering DSM mode by mistake. 399*91f16700Schasinglulu */ 400*91f16700Schasinglulu for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { 401*91f16700Schasinglulu mmio_write_32(gpc_imr_offset[i], ~0x1); 402*91f16700Schasinglulu } 403*91f16700Schasinglulu 404*91f16700Schasinglulu /* leave the IOMUX_GPC bit 12 on for core wakeup */ 405*91f16700Schasinglulu mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12); 406*91f16700Schasinglulu 407*91f16700Schasinglulu /* use external IRQs to wakeup C0~C3 from LPM */ 408*91f16700Schasinglulu val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 409*91f16700Schasinglulu val |= IRQ_SRC_A53_WUP; 410*91f16700Schasinglulu /* clear the MASTER0 LPM handshake */ 411*91f16700Schasinglulu val &= ~MASTER0_LPM_HSK; 412*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 413*91f16700Schasinglulu 414*91f16700Schasinglulu /* mask M4 DSM trigger if M4 is NOT enabled */ 415*91f16700Schasinglulu mmio_setbits_32(IMX_GPC_BASE + LPCR_M4, DSM_MODE_MASK); 416*91f16700Schasinglulu 417*91f16700Schasinglulu /* set all mix/PU in A53 domain */ 418*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd); 419*91f16700Schasinglulu 420*91f16700Schasinglulu /* set SCU timing */ 421*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, 422*91f16700Schasinglulu (0x59 << 10) | 0x5B | (0x2 << 20)); 423*91f16700Schasinglulu 424*91f16700Schasinglulu /* set DUMMY PDN/PUP ACK by default for A53 domain */ 425*91f16700Schasinglulu mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK | 426*91f16700Schasinglulu A53_DUMMY_PDN_ACK); 427*91f16700Schasinglulu 428*91f16700Schasinglulu /* disable DSM mode by default */ 429*91f16700Schasinglulu mmio_clrbits_32(IMX_GPC_BASE + SLPCR, DSM_MODE_MASK); 430*91f16700Schasinglulu 431*91f16700Schasinglulu /* 432*91f16700Schasinglulu * USB PHY power up needs to make sure RESET bit in SRC is clear, 433*91f16700Schasinglulu * otherwise, the PU power up bit in GPC will NOT self-cleared. 434*91f16700Schasinglulu * only need to do it once. 435*91f16700Schasinglulu */ 436*91f16700Schasinglulu mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); 437*91f16700Schasinglulu mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); 438*91f16700Schasinglulu 439*91f16700Schasinglulu /* 440*91f16700Schasinglulu * for USB OTG, the limitation are: 441*91f16700Schasinglulu * 1. before system clock config, the IPG clock run at 12.5MHz, delay time 442*91f16700Schasinglulu * should be longer than 82us. 443*91f16700Schasinglulu * 2. after system clock config, ipg clock run at 66.5MHz, delay time 444*91f16700Schasinglulu * be longer that 15.3 us. 445*91f16700Schasinglulu * Add 100us to make sure the USB OTG SRC is clear safely. 446*91f16700Schasinglulu */ 447*91f16700Schasinglulu udelay(100); 448*91f16700Schasinglulu } 449