1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2016 - 2020, Broadcom 3*91f16700Schasinglulu * 4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 5*91f16700Schasinglulu */ 6*91f16700Schasinglulu 7*91f16700Schasinglulu #include <errno.h> 8*91f16700Schasinglulu #include <stdbool.h> 9*91f16700Schasinglulu 10*91f16700Schasinglulu #include <common/debug.h> 11*91f16700Schasinglulu #include <drivers/delay_timer.h> 12*91f16700Schasinglulu #include <lib/mmio.h> 13*91f16700Schasinglulu 14*91f16700Schasinglulu #include <paxb.h> 15*91f16700Schasinglulu #include <sr_def.h> 16*91f16700Schasinglulu #include <sr_utils.h> 17*91f16700Schasinglulu 18*91f16700Schasinglulu #define PCIE_CORE_PWR_ARR_POWERON 0x8 19*91f16700Schasinglulu #define PCIE_CORE_PWR_ARR_POWEROK 0x4 20*91f16700Schasinglulu #define PCIE_CORE_PWR_POWERON 0x2 21*91f16700Schasinglulu #define PCIE_CORE_PWR_POWEROK 0x1 22*91f16700Schasinglulu 23*91f16700Schasinglulu #define PCIE_CORE_USER_CFG (PCIE_CORE_BASE + 0x38) 24*91f16700Schasinglulu #define PCIE_PAXB_SMMU_SID_CFG (PCIE_CORE_BASE + 0x60) 25*91f16700Schasinglulu #ifdef SID_B8_D1_F1 26*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x8 << 8) 27*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x1 << 12) 28*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x1 << 16) 29*91f16700Schasinglulu #else 30*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x2 << 8) 31*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x5 << 12) 32*91f16700Schasinglulu #define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x3 << 16) 33*91f16700Schasinglulu #endif 34*91f16700Schasinglulu 35*91f16700Schasinglulu #define PAXB_APB_TIMEOUT_COUNT_OFFSET 0x034 36*91f16700Schasinglulu 37*91f16700Schasinglulu /* allow up to 5 ms for each power switch to stabilize */ 38*91f16700Schasinglulu #define PCIE_CORE_PWR_TIMEOUT_MS 5 39*91f16700Schasinglulu 40*91f16700Schasinglulu /* wait 1 microsecond for PCIe core soft reset */ 41*91f16700Schasinglulu #define PCIE_CORE_SOFT_RST_DELAY_US 1 42*91f16700Schasinglulu 43*91f16700Schasinglulu /* 44*91f16700Schasinglulu * List of PAXB APB registers 45*91f16700Schasinglulu */ 46*91f16700Schasinglulu #define PAXB_BASE 0x48000000 47*91f16700Schasinglulu #define PAXB_BASE_OFFSET 0x4000 48*91f16700Schasinglulu #define PAXB_OFFSET(core) (PAXB_BASE + \ 49*91f16700Schasinglulu (core) * PAXB_BASE_OFFSET) 50*91f16700Schasinglulu 51*91f16700Schasinglulu #define PAXB_CLK_CTRL_OFFSET 0x000 52*91f16700Schasinglulu #define PAXB_EP_PERST_SRC_SEL_MASK (1 << 2) 53*91f16700Schasinglulu #define PAXB_EP_MODE_PERST_MASK (1 << 1) 54*91f16700Schasinglulu #define PAXB_RC_PCIE_RST_OUT_MASK (1 << 0) 55*91f16700Schasinglulu 56*91f16700Schasinglulu #define PAXB_MAX_IMAP_WINDOWS 8 57*91f16700Schasinglulu #define PAXB_IMAP_REG_WIDTH 8 58*91f16700Schasinglulu #define PAXB_IMAP0_REG_WIDTH 4 59*91f16700Schasinglulu #define PAXB_AXUSER_REG_WIDTH 4 60*91f16700Schasinglulu 61*91f16700Schasinglulu #define PAXB_CFG_IND_ADDR_OFFSET 0x120 62*91f16700Schasinglulu #define PAXB_CFG_IND_DATA_OFFSET 0x124 63*91f16700Schasinglulu #define PAXB_CFG_IND_ADDR_MASK 0x1ffc 64*91f16700Schasinglulu #define PAXB_CFG_CFG_TYPE_MASK 0x1 65*91f16700Schasinglulu 66*91f16700Schasinglulu #define PAXB_EP_CFG_ADDR_OFFSET 0x1f8 67*91f16700Schasinglulu #define PAXB_EP_CFG_DATA_OFFSET 0x1fc 68*91f16700Schasinglulu #define PAXB_EP_CFG_ADDR_MASK 0xffc 69*91f16700Schasinglulu #define PAXB_EP_CFG_TYPE_MASK 0x1 70*91f16700Schasinglulu 71*91f16700Schasinglulu #define PAXB_0_DEFAULT_IMAP 0xed0 72*91f16700Schasinglulu #define DEFAULT_ADDR_INVALID BIT(0) 73*91f16700Schasinglulu #define PAXB_0_DEFAULT_IMAP_AXUSER 0xed8 74*91f16700Schasinglulu #define PAXB_0_DEFAULT_IMAP_AXCACHE 0xedc 75*91f16700Schasinglulu #define IMAP_AXCACHE 0xff 76*91f16700Schasinglulu #define OARR_VALID BIT(0) 77*91f16700Schasinglulu #define IMAP_VALID BIT(0) 78*91f16700Schasinglulu 79*91f16700Schasinglulu #define PAXB_IMAP0_BASE_OFFSET 0xc00 80*91f16700Schasinglulu #define PAXB_IARR0_BASE_OFFSET 0xd00 81*91f16700Schasinglulu #define PAXB_IMAP0_OFFSET(idx) (PAXB_IMAP0_BASE_OFFSET + \ 82*91f16700Schasinglulu (idx) * PAXB_IMAP0_REG_WIDTH) 83*91f16700Schasinglulu #define PAXB_IMAP0_WINDOW_SIZE 0x1000 84*91f16700Schasinglulu 85*91f16700Schasinglulu #define PAXB_IMAP2_OFFSET 0xcc0 86*91f16700Schasinglulu #define PAXB_IMAP0_REGS_TYPE_OFFSET 0xcd0 87*91f16700Schasinglulu #define PAXB_IARR2_LOWER_OFFSET 0xd10 88*91f16700Schasinglulu 89*91f16700Schasinglulu #define PAXB_IMAP3_BASE_OFFSET 0xe08 90*91f16700Schasinglulu #define PAXB_IMAP3_OFFSET(idx) (PAXB_IMAP3_BASE_OFFSET + \ 91*91f16700Schasinglulu (idx) * PAXB_IMAP_REG_WIDTH) 92*91f16700Schasinglulu 93*91f16700Schasinglulu #define PAXB_IMAP3_0_AXUSER_B_OFFSET 0xe48 94*91f16700Schasinglulu #define PAXB_IMAP3_0_AXUSER_OFFSET(idx) (PAXB_IMAP3_0_AXUSER_B_OFFSET + \ 95*91f16700Schasinglulu (idx) * PAXB_AXUSER_REG_WIDTH) 96*91f16700Schasinglulu 97*91f16700Schasinglulu #define PAXB_IMAP4_BASE_OFFSET 0xe70 98*91f16700Schasinglulu #define PAXB_IMAP4_OFFSET(idx) (PAXB_IMAP4_BASE_OFFSET + \ 99*91f16700Schasinglulu (idx) * PAXB_IMAP_REG_WIDTH) 100*91f16700Schasinglulu 101*91f16700Schasinglulu #define PAXB_IMAP4_0_AXUSER_B_OFFSET 0xeb0 102*91f16700Schasinglulu #define PAXB_IMAP4_0_AXUSER_OFFSET(idx) (PAXB_IMAP4_0_AXUSER_B_OFFSET + \ 103*91f16700Schasinglulu (idx) * PAXB_AXUSER_REG_WIDTH) 104*91f16700Schasinglulu 105*91f16700Schasinglulu #define PAXB_CFG_LINK_STATUS_OFFSET 0xf0c 106*91f16700Schasinglulu #define PAXB_CFG_PHYLINKUP_MASK (1 << 3) 107*91f16700Schasinglulu #define PAXB_CFG_DL_ACTIVE_MASK (1 << 2) 108*91f16700Schasinglulu 109*91f16700Schasinglulu #define PAXB_IMAP0_0_AXUSER_OFFSET 0xf60 110*91f16700Schasinglulu #define PAXB_IMAP2_AXUSER_OFFSET 0xfe0 111*91f16700Schasinglulu 112*91f16700Schasinglulu /* cacheable write-back, allocate on both reads and writes */ 113*91f16700Schasinglulu #define IMAP_ARCACHE 0x0f0 114*91f16700Schasinglulu #define IMAP_AWCACHE 0xf00 115*91f16700Schasinglulu /* normal access, nonsecure access, and data access */ 116*91f16700Schasinglulu /* AWQOS:0xe and ARQOS:0xa */ 117*91f16700Schasinglulu /* AWPROT:0x2 and ARPROT:0x1 */ 118*91f16700Schasinglulu #define IMAP_AXUSER 0x002e002a 119*91f16700Schasinglulu 120*91f16700Schasinglulu /* 121*91f16700Schasinglulu * List of NIC security and PIPEMUX related registers 122*91f16700Schasinglulu */ 123*91f16700Schasinglulu #define SR_PCIE_NIC_SECURITY_BASE 0x58100000 124*91f16700Schasinglulu #define NS3Z_PCIE_NIC_SECURITY_BASE 0x48100000 125*91f16700Schasinglulu 126*91f16700Schasinglulu #define GITS_TRANSLATER 0x63c30000 127*91f16700Schasinglulu 128*91f16700Schasinglulu #define VENDOR_ID 0x14e4 129*91f16700Schasinglulu #define CFG_RC_DEV_ID 0x434 130*91f16700Schasinglulu #define CFG_RC_DEV_SUBID 0x438 131*91f16700Schasinglulu #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c 132*91f16700Schasinglulu #define PCI_CLASS_BRIDGE_MASK 0xffff00 133*91f16700Schasinglulu #define PCI_CLASS_BRIDGE_SHIFT 8 134*91f16700Schasinglulu #define PCI_CLASS_BRIDGE_PCI 0x0604 135*91f16700Schasinglulu 136*91f16700Schasinglulu /* 137*91f16700Schasinglulu * List of PAXB RC configuration space registers 138*91f16700Schasinglulu */ 139*91f16700Schasinglulu 140*91f16700Schasinglulu /* first capability list entry */ 141*91f16700Schasinglulu #define PCI_CAPABILITY_LIST_OFFSET 0x34 142*91f16700Schasinglulu #define PCI_CAPABILITY_SPEED_OFFSET 0xc 143*91f16700Schasinglulu #define PCI_EP_CAPABILITY_OFFSET 0x10 144*91f16700Schasinglulu 145*91f16700Schasinglulu #define CFG_RC_LINK_STATUS_CTRL_2 0x0dc 146*91f16700Schasinglulu #define CFG_RC_LINK_SPEED_SHIFT 0 147*91f16700Schasinglulu #define CFG_RC_LINK_SPEED_MASK (0xf << CFG_RC_LINK_SPEED_SHIFT) 148*91f16700Schasinglulu 149*91f16700Schasinglulu #define CFG_RC_DEVICE_CAP 0x4d4 150*91f16700Schasinglulu #define CFG_RC_DEVICE_CAP_MPS_SHIFT 0 151*91f16700Schasinglulu #define CFG_RC_DEVICE_CAP_MPS_MASK (0x7 << CFG_RC_DEVICE_CAP_MPS_SHIFT) 152*91f16700Schasinglulu /* MPS 256 bytes */ 153*91f16700Schasinglulu #define CFG_RC_DEVICE_CAP_MPS_256B (0x1 << CFG_RC_DEVICE_CAP_MPS_SHIFT) 154*91f16700Schasinglulu /* MPS 512 bytes */ 155*91f16700Schasinglulu #define CFG_RC_DEVICE_CAP_MPS_512B (0x2 << CFG_RC_DEVICE_CAP_MPS_SHIFT) 156*91f16700Schasinglulu 157*91f16700Schasinglulu #define CFG_RC_TL_FCIMM_NP_LIMIT 0xa10 158*91f16700Schasinglulu #define CFG_RC_TL_FCIMM_NP_VAL 0x01500000 159*91f16700Schasinglulu #define CFG_RC_TL_FCIMM_P_LIMIT 0xa14 160*91f16700Schasinglulu #define CFG_RC_TL_FCIMM_P_VAL 0x03408080 161*91f16700Schasinglulu 162*91f16700Schasinglulu #define CFG_RC_LINK_CAP 0x4dc 163*91f16700Schasinglulu #define CFG_RC_LINK_CAP_SPEED_SHIFT 0 164*91f16700Schasinglulu #define CFG_RC_LINK_CAP_SPEED_MASK (0xf << CFG_RC_LINK_CAP_SPEED_SHIFT) 165*91f16700Schasinglulu #define CFG_RC_LINK_CAP_WIDTH_SHIFT 4 166*91f16700Schasinglulu #define CFG_RC_LINK_CAP_WIDTH_MASK (0x1f << CFG_RC_LINK_CAP_WIDTH_SHIFT) 167*91f16700Schasinglulu 168*91f16700Schasinglulu #define CFG_LINK_CAP_RC 0x4f0 169*91f16700Schasinglulu #define CFG_RC_DL_ACTIVE_SHIFT 0 170*91f16700Schasinglulu #define CFG_RC_DL_ACTIVE_MASK (0x1 << CFG_RC_DL_ACTIVE_SHIFT) 171*91f16700Schasinglulu #define CFG_RC_SLOT_CLK_SHIFT 1 172*91f16700Schasinglulu #define CFG_RC_SLOT_CLK_MASK (0x1 << CFG_RC_SLOT_CLK_SHIFT) 173*91f16700Schasinglulu 174*91f16700Schasinglulu #define CFG_ROOT_CAP_RC 0x4f8 175*91f16700Schasinglulu #define CFG_ROOT_CAP_LTR_SHIFT 1 176*91f16700Schasinglulu #define CFG_ROOT_CAP_LTR_MASK (0x1 << CFG_ROOT_CAP_LTR_SHIFT) 177*91f16700Schasinglulu 178*91f16700Schasinglulu #define CFG_RC_CLKREQ_ENABLED 0x4fc 179*91f16700Schasinglulu #define CFG_RC_CLKREQ_ENABLED_SHIFT 0 180*91f16700Schasinglulu #define CFG_RC_CLKREQ_ENABLED_MASK (0x1 << CFG_RC_CLKREQ_ENABLED_SHIFT) 181*91f16700Schasinglulu 182*91f16700Schasinglulu #define CFG_RC_COEFF_ADDR 0x638 183*91f16700Schasinglulu 184*91f16700Schasinglulu #define CFG_RC_TL_CTRL_0 0x800 185*91f16700Schasinglulu #define RC_MEM_DW_CHK_MASK 0x03fe 186*91f16700Schasinglulu 187*91f16700Schasinglulu #define CFG_RC_PDL_CTRL_4 0x1010 188*91f16700Schasinglulu #define NPH_FC_INIT_SHIFT 24 189*91f16700Schasinglulu #define NPH_FC_INIT_MASK (U(0xff) << NPH_FC_INIT_SHIFT) 190*91f16700Schasinglulu #define PD_FC_INIT_SHIFT 12 191*91f16700Schasinglulu #define PD_FC_INIT_MASK (0xffff << PD_FC_INIT_SHIFT) 192*91f16700Schasinglulu 193*91f16700Schasinglulu #define CFG_RC_PDL_CTRL_5 0x1014 194*91f16700Schasinglulu #define PH_INIT_SHIFT 0 195*91f16700Schasinglulu #define PH_INIT_MASK (0xff << PH_INIT_SHIFT) 196*91f16700Schasinglulu 197*91f16700Schasinglulu #define DL_STATUS_OFFSET 0x1048 198*91f16700Schasinglulu #define PHYLINKUP BIT(13) 199*91f16700Schasinglulu 200*91f16700Schasinglulu #define PH_INIT 0x10 201*91f16700Schasinglulu #define PD_FC_INIT 0x100 202*91f16700Schasinglulu #define NPH_FC_INIT 0x8 203*91f16700Schasinglulu 204*91f16700Schasinglulu #define SRP_PH_INIT 0x7F 205*91f16700Schasinglulu #define SRP_PD_FC_INIT 0x200 206*91f16700Schasinglulu #define SRP_NPH_FC_INIT 0x7F 207*91f16700Schasinglulu 208*91f16700Schasinglulu #define CFG_ADDR_BUS_NUM_SHIFT 20 209*91f16700Schasinglulu #define CFG_ADDR_DEV_NUM_SHIFT 15 210*91f16700Schasinglulu #define CFG_ADDR_FUNC_NUM_SHIFT 12 211*91f16700Schasinglulu #define CFG_ADDR_REG_NUM_SHIFT 2 212*91f16700Schasinglulu #define CFG_ADDR_REG_NUM_MASK 0x00000ffc 213*91f16700Schasinglulu #define CFG_ADDR_CFG_TYPE_MASK 0x00000003 214*91f16700Schasinglulu 215*91f16700Schasinglulu #define DL_LINK_UP_TIMEOUT_MS 1000 216*91f16700Schasinglulu 217*91f16700Schasinglulu #define CFG_RETRY_STATUS 0xffff0001 218*91f16700Schasinglulu #define CRS_TIMEOUT_MS 5000 219*91f16700Schasinglulu 220*91f16700Schasinglulu /* create EP config data to write */ 221*91f16700Schasinglulu #define DEF_BUS_NO 1 /* default bus 1 */ 222*91f16700Schasinglulu #define DEF_SLOT_NO 0 /* default slot 0 */ 223*91f16700Schasinglulu #define DEF_FN_NO 0 /* default fn 0 */ 224*91f16700Schasinglulu 225*91f16700Schasinglulu #define EP_CONFIG_VAL(bus_no, slot, fn, where) \ 226*91f16700Schasinglulu (((bus_no) << CFG_ADDR_BUS_NUM_SHIFT) | \ 227*91f16700Schasinglulu ((slot) << CFG_ADDR_DEV_NUM_SHIFT) | \ 228*91f16700Schasinglulu ((fn) << CFG_ADDR_FUNC_NUM_SHIFT) | \ 229*91f16700Schasinglulu ((where) & CFG_ADDR_REG_NUM_MASK) | \ 230*91f16700Schasinglulu (1 & CFG_ADDR_CFG_TYPE_MASK)) 231*91f16700Schasinglulu 232*91f16700Schasinglulu /* PAXB security offset */ 233*91f16700Schasinglulu #define PAXB_SECURITY_IDM_OFFSET 0x1c 234*91f16700Schasinglulu #define PAXB_SECURITY_APB_OFFSET 0x24 235*91f16700Schasinglulu #define PAXB_SECURITY_ECAM_OFFSET 0x3c 236*91f16700Schasinglulu 237*91f16700Schasinglulu #define paxb_get_config(type) paxb_get_##type##_config() 238*91f16700Schasinglulu 239*91f16700Schasinglulu static unsigned int paxb_sec_reg_offset[] = { 240*91f16700Schasinglulu 0x0c, /* PAXB0 AXI */ 241*91f16700Schasinglulu 0x10, /* PAXB1 AXI */ 242*91f16700Schasinglulu 0x14, /* PAXB2 AXI */ 243*91f16700Schasinglulu 0x18, /* PAXB3 AXI */ 244*91f16700Schasinglulu 0x20, /* PAXB4 AXI */ 245*91f16700Schasinglulu 0x28, /* PAXB5 AXI */ 246*91f16700Schasinglulu 0x2c, /* PAXB6 AXI */ 247*91f16700Schasinglulu 0x30, /* PAXB7 AXI */ 248*91f16700Schasinglulu 0x24, /* PAXB APB */ 249*91f16700Schasinglulu }; 250*91f16700Schasinglulu 251*91f16700Schasinglulu const paxb_cfg *paxb; 252*91f16700Schasinglulu 253*91f16700Schasinglulu /* 254*91f16700Schasinglulu * Given a PIPEMUX strap and PCIe core index, this function returns 1 if a 255*91f16700Schasinglulu * PCIe core needs to be enabled 256*91f16700Schasinglulu */ 257*91f16700Schasinglulu int pcie_core_needs_enable(unsigned int core_idx) 258*91f16700Schasinglulu { 259*91f16700Schasinglulu if (paxb->core_needs_enable) 260*91f16700Schasinglulu return paxb->core_needs_enable(core_idx); 261*91f16700Schasinglulu 262*91f16700Schasinglulu return 0; 263*91f16700Schasinglulu } 264*91f16700Schasinglulu 265*91f16700Schasinglulu static void pcie_set_default_tx_coeff(uint32_t core_idx, uint32_t link_width) 266*91f16700Schasinglulu { 267*91f16700Schasinglulu unsigned int lanes = 0; 268*91f16700Schasinglulu uint32_t data, addr; 269*91f16700Schasinglulu 270*91f16700Schasinglulu addr = CFG_RC_COEFF_ADDR; 271*91f16700Schasinglulu for (lanes = 0; lanes < link_width; lanes = lanes + 2) { 272*91f16700Schasinglulu data = paxb_rc_cfg_read(core_idx, addr); 273*91f16700Schasinglulu data &= 0xf0f0f0f0; 274*91f16700Schasinglulu data |= (7 & 0xf); 275*91f16700Schasinglulu data |= (7 & 0xf) << 8; 276*91f16700Schasinglulu data |= (7 & 0xf) << 16; 277*91f16700Schasinglulu data |= (7 & 0xf) << 24; 278*91f16700Schasinglulu 279*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, addr, data); 280*91f16700Schasinglulu addr += 4; 281*91f16700Schasinglulu } 282*91f16700Schasinglulu } 283*91f16700Schasinglulu 284*91f16700Schasinglulu static int paxb_rc_link_init(void) 285*91f16700Schasinglulu { 286*91f16700Schasinglulu uint32_t val, link_speed; 287*91f16700Schasinglulu unsigned int link_width; 288*91f16700Schasinglulu uint32_t core_idx; 289*91f16700Schasinglulu 290*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 291*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 292*91f16700Schasinglulu continue; 293*91f16700Schasinglulu 294*91f16700Schasinglulu link_width = paxb->get_link_width(core_idx); 295*91f16700Schasinglulu if (!link_width) { 296*91f16700Schasinglulu ERROR("Unsupported PIPEMUX\n"); 297*91f16700Schasinglulu return -EOPNOTSUPP; 298*91f16700Schasinglulu } 299*91f16700Schasinglulu 300*91f16700Schasinglulu link_speed = paxb->get_link_speed(); 301*91f16700Schasinglulu /* program RC's link cap reg to advertise proper link width */ 302*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP); 303*91f16700Schasinglulu val &= ~CFG_RC_LINK_CAP_WIDTH_MASK; 304*91f16700Schasinglulu val |= (link_width << CFG_RC_LINK_CAP_WIDTH_SHIFT); 305*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val); 306*91f16700Schasinglulu 307*91f16700Schasinglulu /* program RC's link cap reg to advertise proper link speed */ 308*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP); 309*91f16700Schasinglulu val &= ~CFG_RC_LINK_CAP_SPEED_MASK; 310*91f16700Schasinglulu val |= link_speed << CFG_RC_LINK_CAP_SPEED_SHIFT; 311*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val); 312*91f16700Schasinglulu 313*91f16700Schasinglulu /* also need to program RC's link status control register */ 314*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_STATUS_CTRL_2); 315*91f16700Schasinglulu val &= ~(CFG_RC_LINK_SPEED_MASK); 316*91f16700Schasinglulu val |= link_speed << CFG_RC_LINK_SPEED_SHIFT; 317*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_LINK_STATUS_CTRL_2, val); 318*91f16700Schasinglulu 319*91f16700Schasinglulu #ifdef WAR_PLX_PRESET_PARITY_FAIL 320*91f16700Schasinglulu /* WAR to avoid crash with PLX switch in GEN3*/ 321*91f16700Schasinglulu /* While PRESET, PLX switch is not fixing parity so disabled */ 322*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_REG_PHY_CTL_10); 323*91f16700Schasinglulu val &= ~(PHY_CTL_10_GEN3_MATCH_PARITY); 324*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_REG_PHY_CTL_10, val); 325*91f16700Schasinglulu #endif 326*91f16700Schasinglulu pcie_set_default_tx_coeff(core_idx, link_width); 327*91f16700Schasinglulu } 328*91f16700Schasinglulu return 0; 329*91f16700Schasinglulu } 330*91f16700Schasinglulu 331*91f16700Schasinglulu #ifdef PAXB_LINKUP 332*91f16700Schasinglulu static void paxb_perst_ctrl(unsigned int core_idx, bool assert) 333*91f16700Schasinglulu { 334*91f16700Schasinglulu uint32_t clk_ctrl = PAXB_OFFSET(core_idx) + PAXB_CLK_CTRL_OFFSET; 335*91f16700Schasinglulu 336*91f16700Schasinglulu if (assert) { 337*91f16700Schasinglulu mmio_clrbits_32(clk_ctrl, PAXB_EP_PERST_SRC_SEL_MASK | 338*91f16700Schasinglulu PAXB_EP_MODE_PERST_MASK | 339*91f16700Schasinglulu PAXB_RC_PCIE_RST_OUT_MASK); 340*91f16700Schasinglulu udelay(250); 341*91f16700Schasinglulu } else { 342*91f16700Schasinglulu mmio_setbits_32(clk_ctrl, PAXB_RC_PCIE_RST_OUT_MASK); 343*91f16700Schasinglulu mdelay(100); 344*91f16700Schasinglulu } 345*91f16700Schasinglulu } 346*91f16700Schasinglulu 347*91f16700Schasinglulu static void paxb_start_link_up(void) 348*91f16700Schasinglulu { 349*91f16700Schasinglulu unsigned int core_idx; 350*91f16700Schasinglulu uint32_t val, timeout; 351*91f16700Schasinglulu 352*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 353*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 354*91f16700Schasinglulu continue; 355*91f16700Schasinglulu 356*91f16700Schasinglulu /* toggle PERST */ 357*91f16700Schasinglulu paxb_perst_ctrl(core_idx, true); 358*91f16700Schasinglulu paxb_perst_ctrl(core_idx, false); 359*91f16700Schasinglulu 360*91f16700Schasinglulu timeout = DL_LINK_UP_TIMEOUT_MS; 361*91f16700Schasinglulu /* wait for Link up */ 362*91f16700Schasinglulu do { 363*91f16700Schasinglulu val = mmio_read_32(PAXB_OFFSET(core_idx) + 364*91f16700Schasinglulu PAXB_CFG_LINK_STATUS_OFFSET); 365*91f16700Schasinglulu if (val & PAXB_CFG_DL_ACTIVE_MASK) 366*91f16700Schasinglulu break; 367*91f16700Schasinglulu 368*91f16700Schasinglulu mdelay(1); 369*91f16700Schasinglulu } while (--timeout); 370*91f16700Schasinglulu 371*91f16700Schasinglulu if (!timeout) 372*91f16700Schasinglulu ERROR("PAXB core %u link is down\n", core_idx); 373*91f16700Schasinglulu } 374*91f16700Schasinglulu } 375*91f16700Schasinglulu #endif 376*91f16700Schasinglulu 377*91f16700Schasinglulu static void pcie_core_soft_reset(unsigned int core_idx) 378*91f16700Schasinglulu { 379*91f16700Schasinglulu uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET; 380*91f16700Schasinglulu uintptr_t ctrl = (uintptr_t)(PCIE_CORE_SOFT_RST_CFG_BASE + offset); 381*91f16700Schasinglulu 382*91f16700Schasinglulu /* Put PCIe core in soft reset */ 383*91f16700Schasinglulu mmio_clrbits_32(ctrl, PCIE_CORE_SOFT_RST); 384*91f16700Schasinglulu 385*91f16700Schasinglulu /* Wait for 1 us before pulling PCIe core out of soft reset */ 386*91f16700Schasinglulu udelay(PCIE_CORE_SOFT_RST_DELAY_US); 387*91f16700Schasinglulu 388*91f16700Schasinglulu mmio_setbits_32(ctrl, PCIE_CORE_SOFT_RST); 389*91f16700Schasinglulu } 390*91f16700Schasinglulu 391*91f16700Schasinglulu static int pcie_core_pwron_switch(uintptr_t ctrl, uintptr_t status, 392*91f16700Schasinglulu uint32_t mask) 393*91f16700Schasinglulu { 394*91f16700Schasinglulu uint32_t val; 395*91f16700Schasinglulu unsigned int timeout = PCIE_CORE_PWR_TIMEOUT_MS; 396*91f16700Schasinglulu 397*91f16700Schasinglulu /* enable switch */ 398*91f16700Schasinglulu mmio_setbits_32(ctrl, mask); 399*91f16700Schasinglulu 400*91f16700Schasinglulu /* now wait for it to stabilize */ 401*91f16700Schasinglulu do { 402*91f16700Schasinglulu val = mmio_read_32(status); 403*91f16700Schasinglulu if ((val & mask) == mask) 404*91f16700Schasinglulu return 0; 405*91f16700Schasinglulu mdelay(1); 406*91f16700Schasinglulu } while (--timeout); 407*91f16700Schasinglulu 408*91f16700Schasinglulu return -EIO; 409*91f16700Schasinglulu } 410*91f16700Schasinglulu 411*91f16700Schasinglulu static int pcie_core_pwr_seq(uintptr_t ctrl, uintptr_t status) 412*91f16700Schasinglulu { 413*91f16700Schasinglulu int ret; 414*91f16700Schasinglulu 415*91f16700Schasinglulu /* 416*91f16700Schasinglulu * Enable the switch with the following sequence: 417*91f16700Schasinglulu * 1. Array weak switch output switch 418*91f16700Schasinglulu * 2. Array strong switch 419*91f16700Schasinglulu * 3. Weak switch output acknowledge 420*91f16700Schasinglulu * 4. Strong switch output acknowledge 421*91f16700Schasinglulu */ 422*91f16700Schasinglulu ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWERON); 423*91f16700Schasinglulu if (ret) 424*91f16700Schasinglulu return ret; 425*91f16700Schasinglulu 426*91f16700Schasinglulu ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWEROK); 427*91f16700Schasinglulu if (ret) 428*91f16700Schasinglulu return ret; 429*91f16700Schasinglulu 430*91f16700Schasinglulu ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWERON); 431*91f16700Schasinglulu if (ret) 432*91f16700Schasinglulu return ret; 433*91f16700Schasinglulu 434*91f16700Schasinglulu ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWEROK); 435*91f16700Schasinglulu if (ret) 436*91f16700Schasinglulu return ret; 437*91f16700Schasinglulu 438*91f16700Schasinglulu return 0; 439*91f16700Schasinglulu } 440*91f16700Schasinglulu 441*91f16700Schasinglulu /* 442*91f16700Schasinglulu * This function enables PCIe core and PAXB memory buffer power, and then 443*91f16700Schasinglulu * remove the PCIe core from isolation 444*91f16700Schasinglulu */ 445*91f16700Schasinglulu static int pcie_core_pwr_init(unsigned int core_idx) 446*91f16700Schasinglulu { 447*91f16700Schasinglulu int ret; 448*91f16700Schasinglulu uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET; 449*91f16700Schasinglulu uintptr_t ctrl, status; 450*91f16700Schasinglulu 451*91f16700Schasinglulu /* enable mem power to PCIe core */ 452*91f16700Schasinglulu ctrl = (uintptr_t)(PCIE_CORE_MEM_PWR_BASE + offset); 453*91f16700Schasinglulu status = (uintptr_t)(PCIE_CORE_MEM_PWR_STATUS_BASE + offset); 454*91f16700Schasinglulu ret = pcie_core_pwr_seq(ctrl, status); 455*91f16700Schasinglulu if (ret) { 456*91f16700Schasinglulu ERROR("PCIe core mem power failed\n"); 457*91f16700Schasinglulu return ret; 458*91f16700Schasinglulu } 459*91f16700Schasinglulu 460*91f16700Schasinglulu /* now enable mem power to PAXB wrapper */ 461*91f16700Schasinglulu ctrl = (uintptr_t)(PCIE_PAXB_MEM_PWR_BASE + offset); 462*91f16700Schasinglulu status = (uintptr_t)(PCIE_PAXB_MEM_PWR_STATUS_BASE + offset); 463*91f16700Schasinglulu ret = pcie_core_pwr_seq(ctrl, status); 464*91f16700Schasinglulu if (ret) { 465*91f16700Schasinglulu ERROR("PAXB mem power failed\n"); 466*91f16700Schasinglulu return ret; 467*91f16700Schasinglulu } 468*91f16700Schasinglulu 469*91f16700Schasinglulu /* now remove power isolation */ 470*91f16700Schasinglulu ctrl = (uintptr_t)(PCIE_CORE_ISO_CFG_BASE + offset); 471*91f16700Schasinglulu mmio_clrbits_32(ctrl, PCIE_CORE_ISO | PCIE_CORE_MEM_ISO); 472*91f16700Schasinglulu 473*91f16700Schasinglulu return 0; 474*91f16700Schasinglulu } 475*91f16700Schasinglulu 476*91f16700Schasinglulu static void pcie_ss_reset(void) 477*91f16700Schasinglulu { 478*91f16700Schasinglulu mmio_setbits_32(CDRU_MISC_RESET_CONTROL, 479*91f16700Schasinglulu 1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R); 480*91f16700Schasinglulu } 481*91f16700Schasinglulu 482*91f16700Schasinglulu /* 483*91f16700Schasinglulu * This function reads the PIPEMUX strap, figures out all the PCIe cores that 484*91f16700Schasinglulu * need to be enabled and enable the mem power for those cores 485*91f16700Schasinglulu */ 486*91f16700Schasinglulu static int pcie_cores_init(void) 487*91f16700Schasinglulu { 488*91f16700Schasinglulu int ret = 0; 489*91f16700Schasinglulu uint32_t core_idx; 490*91f16700Schasinglulu 491*91f16700Schasinglulu if (paxb->pipemux_init) { 492*91f16700Schasinglulu ret = paxb->pipemux_init(); 493*91f16700Schasinglulu if (ret) 494*91f16700Schasinglulu return ret; 495*91f16700Schasinglulu } 496*91f16700Schasinglulu 497*91f16700Schasinglulu /* bring PCIe subsystem out of reset */ 498*91f16700Schasinglulu pcie_ss_reset(); 499*91f16700Schasinglulu 500*91f16700Schasinglulu /* power up all PCIe cores that will be used as RC */ 501*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 502*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 503*91f16700Schasinglulu continue; 504*91f16700Schasinglulu 505*91f16700Schasinglulu ret = pcie_core_pwr_init(core_idx); 506*91f16700Schasinglulu if (ret) { 507*91f16700Schasinglulu ERROR("PCIe core %u power up failed\n", core_idx); 508*91f16700Schasinglulu return ret; 509*91f16700Schasinglulu } 510*91f16700Schasinglulu 511*91f16700Schasinglulu pcie_core_soft_reset(core_idx); 512*91f16700Schasinglulu 513*91f16700Schasinglulu VERBOSE("PCIe core %u is powered up\n", core_idx); 514*91f16700Schasinglulu } 515*91f16700Schasinglulu 516*91f16700Schasinglulu return ret; 517*91f16700Schasinglulu } 518*91f16700Schasinglulu 519*91f16700Schasinglulu void paxb_rc_cfg_write(unsigned int core_idx, unsigned int where, 520*91f16700Schasinglulu uint32_t val) 521*91f16700Schasinglulu { 522*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET, 523*91f16700Schasinglulu (where & PAXB_CFG_IND_ADDR_MASK) | 524*91f16700Schasinglulu PAXB_CFG_CFG_TYPE_MASK); 525*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET, val); 526*91f16700Schasinglulu } 527*91f16700Schasinglulu 528*91f16700Schasinglulu unsigned int paxb_rc_cfg_read(unsigned int core_idx, unsigned int where) 529*91f16700Schasinglulu { 530*91f16700Schasinglulu unsigned int val; 531*91f16700Schasinglulu 532*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET, 533*91f16700Schasinglulu (where & PAXB_CFG_IND_ADDR_MASK) | 534*91f16700Schasinglulu PAXB_CFG_CFG_TYPE_MASK); 535*91f16700Schasinglulu val = mmio_read_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET); 536*91f16700Schasinglulu 537*91f16700Schasinglulu return val; 538*91f16700Schasinglulu } 539*91f16700Schasinglulu 540*91f16700Schasinglulu static void paxb_cfg_mps(void) 541*91f16700Schasinglulu { 542*91f16700Schasinglulu uint32_t val, core_idx, mps; 543*91f16700Schasinglulu 544*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 545*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 546*91f16700Schasinglulu continue; 547*91f16700Schasinglulu 548*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_DEVICE_CAP); 549*91f16700Schasinglulu val &= ~CFG_RC_DEVICE_CAP_MPS_MASK; 550*91f16700Schasinglulu mps = CFG_RC_DEVICE_CAP_MPS_256B; 551*91f16700Schasinglulu if (core_idx == 0 || core_idx == 1 || 552*91f16700Schasinglulu core_idx == 6 || core_idx == 7) { 553*91f16700Schasinglulu mps = CFG_RC_DEVICE_CAP_MPS_512B; 554*91f16700Schasinglulu } 555*91f16700Schasinglulu val |= mps; 556*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_DEVICE_CAP, val); 557*91f16700Schasinglulu } 558*91f16700Schasinglulu } 559*91f16700Schasinglulu 560*91f16700Schasinglulu static void paxb_cfg_dev_id(void) 561*91f16700Schasinglulu { 562*91f16700Schasinglulu uint32_t val, core_idx; 563*91f16700Schasinglulu uint32_t device_id; 564*91f16700Schasinglulu 565*91f16700Schasinglulu device_id = paxb->device_id; 566*91f16700Schasinglulu 567*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 568*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 569*91f16700Schasinglulu continue; 570*91f16700Schasinglulu 571*91f16700Schasinglulu /* Set Core in RC mode */ 572*91f16700Schasinglulu mmio_setbits_32(PCIE_CORE_USER_CFG + 573*91f16700Schasinglulu (core_idx * PCIE_CORE_PWR_OFFSET), 1); 574*91f16700Schasinglulu 575*91f16700Schasinglulu /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ 576*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET); 577*91f16700Schasinglulu val &= ~PCI_CLASS_BRIDGE_MASK; 578*91f16700Schasinglulu val |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); 579*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET, val); 580*91f16700Schasinglulu 581*91f16700Schasinglulu val = (VENDOR_ID << 16) | device_id; 582*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_DEV_ID, val); 583*91f16700Schasinglulu 584*91f16700Schasinglulu val = (device_id << 16) | VENDOR_ID; 585*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_DEV_SUBID, val); 586*91f16700Schasinglulu } 587*91f16700Schasinglulu } 588*91f16700Schasinglulu 589*91f16700Schasinglulu static void paxb_cfg_tgt_trn(void) 590*91f16700Schasinglulu { 591*91f16700Schasinglulu uint32_t val, core_idx; 592*91f16700Schasinglulu 593*91f16700Schasinglulu /* 594*91f16700Schasinglulu * Disable all mem Rd/Wr size check so it allows target read/write 595*91f16700Schasinglulu * transactions to be more than stipulated DW. As a result, PAXB root 596*91f16700Schasinglulu * complex will not abort these read/write transcations beyond 597*91f16700Schasinglulu * stipulated limit 598*91f16700Schasinglulu */ 599*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 600*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 601*91f16700Schasinglulu continue; 602*91f16700Schasinglulu 603*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_TL_CTRL_0); 604*91f16700Schasinglulu val &= ~(RC_MEM_DW_CHK_MASK); 605*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_TL_CTRL_0, val); 606*91f16700Schasinglulu } 607*91f16700Schasinglulu } 608*91f16700Schasinglulu 609*91f16700Schasinglulu static void paxb_cfg_pdl_ctrl(void) 610*91f16700Schasinglulu { 611*91f16700Schasinglulu uint32_t val, core_idx; 612*91f16700Schasinglulu uint32_t nph, ph, pd; 613*91f16700Schasinglulu 614*91f16700Schasinglulu /* increase the credit counter to 4 for non-posted header */ 615*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 616*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 617*91f16700Schasinglulu continue; 618*91f16700Schasinglulu 619*91f16700Schasinglulu nph = NPH_FC_INIT; 620*91f16700Schasinglulu ph = PH_INIT; 621*91f16700Schasinglulu pd = PD_FC_INIT; 622*91f16700Schasinglulu 623*91f16700Schasinglulu if (core_idx == 0 || core_idx == 1 || 624*91f16700Schasinglulu core_idx == 6 || core_idx == 7) { 625*91f16700Schasinglulu nph = SRP_NPH_FC_INIT; 626*91f16700Schasinglulu ph = SRP_PH_INIT; 627*91f16700Schasinglulu pd = SRP_PD_FC_INIT; 628*91f16700Schasinglulu } 629*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_4); 630*91f16700Schasinglulu val &= ~NPH_FC_INIT_MASK; 631*91f16700Schasinglulu val &= ~PD_FC_INIT_MASK; 632*91f16700Schasinglulu val = val | (nph << NPH_FC_INIT_SHIFT); 633*91f16700Schasinglulu val = val | (pd << PD_FC_INIT_SHIFT); 634*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_4, val); 635*91f16700Schasinglulu 636*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_5); 637*91f16700Schasinglulu val &= ~PH_INIT_MASK; 638*91f16700Schasinglulu val = val | (ph << PH_INIT_SHIFT); 639*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_5, val); 640*91f16700Schasinglulu 641*91f16700Schasinglulu /* 642*91f16700Schasinglulu * ASIC to give more optmized value after further investigation. 643*91f16700Schasinglulu * till then this is important to have to get similar 644*91f16700Schasinglulu * performance on all the slots. 645*91f16700Schasinglulu */ 646*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_NP_LIMIT, 647*91f16700Schasinglulu CFG_RC_TL_FCIMM_NP_VAL); 648*91f16700Schasinglulu 649*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_P_LIMIT, 650*91f16700Schasinglulu CFG_RC_TL_FCIMM_P_VAL); 651*91f16700Schasinglulu } 652*91f16700Schasinglulu } 653*91f16700Schasinglulu 654*91f16700Schasinglulu static void paxb_cfg_clkreq(void) 655*91f16700Schasinglulu { 656*91f16700Schasinglulu uint32_t val, core_idx; 657*91f16700Schasinglulu 658*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 659*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 660*91f16700Schasinglulu continue; 661*91f16700Schasinglulu 662*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_RC_CLKREQ_ENABLED); 663*91f16700Schasinglulu val &= ~CFG_RC_CLKREQ_ENABLED_MASK; 664*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_RC_CLKREQ_ENABLED, val); 665*91f16700Schasinglulu } 666*91f16700Schasinglulu } 667*91f16700Schasinglulu 668*91f16700Schasinglulu static void paxb_cfg_dl_active(bool enable) 669*91f16700Schasinglulu { 670*91f16700Schasinglulu uint32_t val, core_idx; 671*91f16700Schasinglulu 672*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 673*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 674*91f16700Schasinglulu continue; 675*91f16700Schasinglulu 676*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_LINK_CAP_RC); 677*91f16700Schasinglulu if (enable) 678*91f16700Schasinglulu val |= CFG_RC_DL_ACTIVE_MASK; 679*91f16700Schasinglulu else 680*91f16700Schasinglulu val &= ~CFG_RC_DL_ACTIVE_MASK; 681*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_LINK_CAP_RC, val); 682*91f16700Schasinglulu } 683*91f16700Schasinglulu } 684*91f16700Schasinglulu 685*91f16700Schasinglulu static void paxb_cfg_LTR(int enable) 686*91f16700Schasinglulu { 687*91f16700Schasinglulu uint32_t val, core_idx; 688*91f16700Schasinglulu 689*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 690*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 691*91f16700Schasinglulu continue; 692*91f16700Schasinglulu 693*91f16700Schasinglulu val = paxb_rc_cfg_read(core_idx, CFG_ROOT_CAP_RC); 694*91f16700Schasinglulu if (enable) 695*91f16700Schasinglulu val |= CFG_ROOT_CAP_LTR_MASK; 696*91f16700Schasinglulu else 697*91f16700Schasinglulu val &= ~CFG_ROOT_CAP_LTR_MASK; 698*91f16700Schasinglulu paxb_rc_cfg_write(core_idx, CFG_ROOT_CAP_RC, val); 699*91f16700Schasinglulu } 700*91f16700Schasinglulu } 701*91f16700Schasinglulu 702*91f16700Schasinglulu static void paxb_ib_regs_bypass(void) 703*91f16700Schasinglulu { 704*91f16700Schasinglulu unsigned int i, j; 705*91f16700Schasinglulu 706*91f16700Schasinglulu for (i = 0; i < paxb->num_cores; i++) { 707*91f16700Schasinglulu if (!pcie_core_needs_enable(i)) 708*91f16700Schasinglulu continue; 709*91f16700Schasinglulu 710*91f16700Schasinglulu /* Configure Default IMAP window */ 711*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP, 712*91f16700Schasinglulu DEFAULT_ADDR_INVALID); 713*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXUSER, 714*91f16700Schasinglulu IMAP_AXUSER); 715*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXCACHE, 716*91f16700Schasinglulu IMAP_AXCACHE); 717*91f16700Schasinglulu 718*91f16700Schasinglulu /* Configure MSI IMAP window */ 719*91f16700Schasinglulu mmio_setbits_32(PAXB_OFFSET(i) + 720*91f16700Schasinglulu PAXB_IMAP0_REGS_TYPE_OFFSET, 721*91f16700Schasinglulu 0x1); 722*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IARR0_BASE_OFFSET, 723*91f16700Schasinglulu GITS_TRANSLATER | OARR_VALID); 724*91f16700Schasinglulu for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) { 725*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j), 726*91f16700Schasinglulu (GITS_TRANSLATER + 727*91f16700Schasinglulu (j * PAXB_IMAP0_WINDOW_SIZE)) | 728*91f16700Schasinglulu IMAP_VALID); 729*91f16700Schasinglulu } 730*91f16700Schasinglulu } 731*91f16700Schasinglulu } 732*91f16700Schasinglulu 733*91f16700Schasinglulu static void paxb_ib_regs_init(void) 734*91f16700Schasinglulu { 735*91f16700Schasinglulu unsigned int core_idx; 736*91f16700Schasinglulu 737*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 738*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 739*91f16700Schasinglulu continue; 740*91f16700Schasinglulu 741*91f16700Schasinglulu /* initialize IARR2 to zero */ 742*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_IARR2_LOWER_OFFSET, 743*91f16700Schasinglulu 0x0); 744*91f16700Schasinglulu mmio_setbits_32(PAXB_OFFSET(core_idx) + 745*91f16700Schasinglulu PAXB_IMAP0_REGS_TYPE_OFFSET, 746*91f16700Schasinglulu 0x1); 747*91f16700Schasinglulu } 748*91f16700Schasinglulu } 749*91f16700Schasinglulu 750*91f16700Schasinglulu static void paxb_cfg_apb_timeout(void) 751*91f16700Schasinglulu { 752*91f16700Schasinglulu unsigned int core_idx; 753*91f16700Schasinglulu 754*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 755*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 756*91f16700Schasinglulu continue; 757*91f16700Schasinglulu 758*91f16700Schasinglulu /* allow unlimited timeout */ 759*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(core_idx) + 760*91f16700Schasinglulu PAXB_APB_TIMEOUT_COUNT_OFFSET, 761*91f16700Schasinglulu 0xFFFFFFFF); 762*91f16700Schasinglulu } 763*91f16700Schasinglulu } 764*91f16700Schasinglulu 765*91f16700Schasinglulu static void paxb_smmu_cfg(void) 766*91f16700Schasinglulu { 767*91f16700Schasinglulu unsigned int core_idx; 768*91f16700Schasinglulu uint32_t offset; 769*91f16700Schasinglulu uint32_t val; 770*91f16700Schasinglulu 771*91f16700Schasinglulu for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) { 772*91f16700Schasinglulu if (!pcie_core_needs_enable(core_idx)) 773*91f16700Schasinglulu continue; 774*91f16700Schasinglulu 775*91f16700Schasinglulu offset = core_idx * PCIE_CORE_PWR_OFFSET; 776*91f16700Schasinglulu val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset); 777*91f16700Schasinglulu val &= ~(0xFFF00); 778*91f16700Schasinglulu val |= (PAXB_SMMU_SID_CFG_FUN_WIDTH | 779*91f16700Schasinglulu PAXB_SMMU_SID_CFG_DEV_WIDTH | 780*91f16700Schasinglulu PAXB_SMMU_SID_CFG_BUS_WIDTH); 781*91f16700Schasinglulu mmio_write_32(PCIE_PAXB_SMMU_SID_CFG + offset, val); 782*91f16700Schasinglulu val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset); 783*91f16700Schasinglulu VERBOSE("smmu cfg reg 0x%x\n", val); 784*91f16700Schasinglulu } 785*91f16700Schasinglulu } 786*91f16700Schasinglulu 787*91f16700Schasinglulu static void paxb_cfg_coherency(void) 788*91f16700Schasinglulu { 789*91f16700Schasinglulu unsigned int i, j; 790*91f16700Schasinglulu 791*91f16700Schasinglulu for (i = 0; i < paxb->num_cores; i++) { 792*91f16700Schasinglulu if (!pcie_core_needs_enable(i)) 793*91f16700Schasinglulu continue; 794*91f16700Schasinglulu 795*91f16700Schasinglulu #ifdef USE_DDR 796*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_OFFSET, 797*91f16700Schasinglulu IMAP_ARCACHE | IMAP_AWCACHE); 798*91f16700Schasinglulu #endif 799*91f16700Schasinglulu 800*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_0_AXUSER_OFFSET, 801*91f16700Schasinglulu IMAP_AXUSER); 802*91f16700Schasinglulu 803*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_AXUSER_OFFSET, 804*91f16700Schasinglulu IMAP_AXUSER); 805*91f16700Schasinglulu 806*91f16700Schasinglulu for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) { 807*91f16700Schasinglulu #ifdef USE_DDR 808*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP3_OFFSET(j), 809*91f16700Schasinglulu IMAP_ARCACHE | IMAP_AWCACHE); 810*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP4_OFFSET(j), 811*91f16700Schasinglulu IMAP_ARCACHE | IMAP_AWCACHE); 812*91f16700Schasinglulu #endif 813*91f16700Schasinglulu /* zero out IMAP0 mapping windows for MSI/MSI-X */ 814*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j), 815*91f16700Schasinglulu 0x0); 816*91f16700Schasinglulu 817*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + 818*91f16700Schasinglulu PAXB_IMAP3_0_AXUSER_OFFSET(j), 819*91f16700Schasinglulu IMAP_AXUSER); 820*91f16700Schasinglulu mmio_write_32(PAXB_OFFSET(i) + 821*91f16700Schasinglulu PAXB_IMAP4_0_AXUSER_OFFSET(j), 822*91f16700Schasinglulu IMAP_AXUSER); 823*91f16700Schasinglulu } 824*91f16700Schasinglulu } 825*91f16700Schasinglulu } 826*91f16700Schasinglulu 827*91f16700Schasinglulu /* 828*91f16700Schasinglulu * This function configures all PAXB related blocks to allow non-secure access 829*91f16700Schasinglulu */ 830*91f16700Schasinglulu void paxb_ns_init(enum paxb_type type) 831*91f16700Schasinglulu { 832*91f16700Schasinglulu unsigned int reg; 833*91f16700Schasinglulu 834*91f16700Schasinglulu switch (type) { 835*91f16700Schasinglulu case PAXB_SR: 836*91f16700Schasinglulu for (reg = 0; reg < ARRAY_SIZE(paxb_sec_reg_offset); reg++) { 837*91f16700Schasinglulu 838*91f16700Schasinglulu mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE + 839*91f16700Schasinglulu paxb_sec_reg_offset[reg], 0x1); 840*91f16700Schasinglulu } 841*91f16700Schasinglulu /* Enabled all PAXB's relevant IDM blocks access in non-secure mode */ 842*91f16700Schasinglulu mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE + PAXB_SECURITY_IDM_OFFSET, 843*91f16700Schasinglulu 0xffff); 844*91f16700Schasinglulu break; 845*91f16700Schasinglulu case PAXB_NS3Z: 846*91f16700Schasinglulu mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE + 847*91f16700Schasinglulu paxb_sec_reg_offset[0], 0x1); 848*91f16700Schasinglulu mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE + 849*91f16700Schasinglulu PAXB_SECURITY_IDM_OFFSET, 0xffff); 850*91f16700Schasinglulu mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE + 851*91f16700Schasinglulu PAXB_SECURITY_APB_OFFSET, 0x7); 852*91f16700Schasinglulu mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE + 853*91f16700Schasinglulu PAXB_SECURITY_ECAM_OFFSET, 0x1); 854*91f16700Schasinglulu break; 855*91f16700Schasinglulu } 856*91f16700Schasinglulu } 857*91f16700Schasinglulu 858*91f16700Schasinglulu static int paxb_set_config(void) 859*91f16700Schasinglulu { 860*91f16700Schasinglulu paxb = paxb_get_config(sr); 861*91f16700Schasinglulu if (paxb) 862*91f16700Schasinglulu return 0; 863*91f16700Schasinglulu 864*91f16700Schasinglulu return -ENODEV; 865*91f16700Schasinglulu } 866*91f16700Schasinglulu 867*91f16700Schasinglulu void paxb_init(void) 868*91f16700Schasinglulu { 869*91f16700Schasinglulu int ret; 870*91f16700Schasinglulu 871*91f16700Schasinglulu ret = paxb_set_config(); 872*91f16700Schasinglulu if (ret) 873*91f16700Schasinglulu return; 874*91f16700Schasinglulu 875*91f16700Schasinglulu paxb_ns_init(paxb->type); 876*91f16700Schasinglulu 877*91f16700Schasinglulu ret = pcie_cores_init(); 878*91f16700Schasinglulu if (ret) 879*91f16700Schasinglulu return; 880*91f16700Schasinglulu 881*91f16700Schasinglulu if (paxb->phy_init) { 882*91f16700Schasinglulu ret = paxb->phy_init(); 883*91f16700Schasinglulu if (ret) 884*91f16700Schasinglulu return; 885*91f16700Schasinglulu } 886*91f16700Schasinglulu 887*91f16700Schasinglulu paxb_cfg_dev_id(); 888*91f16700Schasinglulu paxb_cfg_tgt_trn(); 889*91f16700Schasinglulu paxb_cfg_pdl_ctrl(); 890*91f16700Schasinglulu if (paxb->type == PAXB_SR) { 891*91f16700Schasinglulu paxb_ib_regs_init(); 892*91f16700Schasinglulu paxb_cfg_coherency(); 893*91f16700Schasinglulu } else 894*91f16700Schasinglulu paxb_ib_regs_bypass(); 895*91f16700Schasinglulu 896*91f16700Schasinglulu paxb_cfg_apb_timeout(); 897*91f16700Schasinglulu paxb_smmu_cfg(); 898*91f16700Schasinglulu paxb_cfg_clkreq(); 899*91f16700Schasinglulu paxb_rc_link_init(); 900*91f16700Schasinglulu 901*91f16700Schasinglulu /* Stingray Doesn't support LTR */ 902*91f16700Schasinglulu paxb_cfg_LTR(false); 903*91f16700Schasinglulu paxb_cfg_dl_active(true); 904*91f16700Schasinglulu 905*91f16700Schasinglulu paxb_cfg_mps(); 906*91f16700Schasinglulu 907*91f16700Schasinglulu #ifdef PAXB_LINKUP 908*91f16700Schasinglulu paxb_start_link_up(); 909*91f16700Schasinglulu #endif 910*91f16700Schasinglulu INFO("PAXB init done\n"); 911*91f16700Schasinglulu } 912