cvmx-pcie.c revision 216476
1311116Sdim/***********************license start*************** 2311116Sdim * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3353358Sdim * reserved. 4353358Sdim * 5353358Sdim * 6311116Sdim * Redistribution and use in source and binary forms, with or without 7311116Sdim * modification, are permitted provided that the following conditions are 8311116Sdim * met: 9311116Sdim * 10311116Sdim * * Redistributions of source code must retain the above copyright 11311116Sdim * notice, this list of conditions and the following disclaimer. 12311116Sdim * 13311116Sdim * * Redistributions in binary form must reproduce the above 14311116Sdim * copyright notice, this list of conditions and the following 15327952Sdim * disclaimer in the documentation and/or other materials provided 16360784Sdim * with the distribution. 17321369Sdim 18360784Sdim * * Neither the name of Cavium Networks nor the names of 19321369Sdim * its contributors may be used to endorse or promote products 20321369Sdim * derived from this software without specific prior written 21327952Sdim * permission. 22311116Sdim 23321369Sdim * This Software, including technical data, may be subject to U.S. export control 24311116Sdim * laws, including the U.S. Export Administration Act and its associated 25311116Sdim * regulations, and may be subject to export or import regulations in other 26353358Sdim * countries. 27353358Sdim 28311116Sdim * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29341825Sdim * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30353358Sdim * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31341825Sdim * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32311116Sdim * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33327952Sdim * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34311116Sdim * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35311116Sdim * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36311116Sdim * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37311116Sdim * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38311116Sdim ***********************license end**************************************/ 39311116Sdim 40311116Sdim 41327952Sdim 42341825Sdim 43341825Sdim 44341825Sdim 45341825Sdim 46341825Sdim/** 47341825Sdim * @file 48341825Sdim * 49341825Sdim * Interface to PCIe as a host(RC) or target(EP) 50311116Sdim * 51311116Sdim * <hr>$Revision: 52004 $<hr> 52311116Sdim */ 53353358Sdim#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 54311116Sdim#include <asm/octeon/cvmx.h> 55360784Sdim#include <asm/octeon/cvmx-config.h> 56360784Sdim#include <asm/octeon/cvmx-clock.h> 57360784Sdim#include <asm/octeon/cvmx-ciu-defs.h> 58360784Sdim#include <asm/octeon/cvmx-dpi-defs.h> 59360784Sdim#include <asm/octeon/cvmx-npi-defs.h> 60360784Sdim#include <asm/octeon/cvmx-npei-defs.h> 61341825Sdim#include <asm/octeon/cvmx-pci-defs.h> 62360784Sdim#include <asm/octeon/cvmx-pcieepx-defs.h> 63344779Sdim#include <asm/octeon/cvmx-pciercx-defs.h> 64344779Sdim#include <asm/octeon/cvmx-pemx-defs.h> 65344779Sdim#include <asm/octeon/cvmx-pexp-defs.h> 66344779Sdim#include <asm/octeon/cvmx-pescx-defs.h> 67344779Sdim#include <asm/octeon/cvmx-sli-defs.h> 68344779Sdim#include <asm/octeon/cvmx-sriox-defs.h> 69344779Sdim 70344779Sdim#ifdef CONFIG_CAVIUM_DECODE_RSL 71344779Sdim#include <asm/octeon/cvmx-error.h> 72344779Sdim#endif 73344779Sdim#include <asm/octeon/cvmx-helper.h> 74344779Sdim#include <asm/octeon/cvmx-helper-board.h> 75344779Sdim#include <asm/octeon/cvmx-helper-errata.h> 76344779Sdim#include <asm/octeon/cvmx-pcie.h> 77321369Sdim#include <asm/octeon/cvmx-sysinfo.h> 78341825Sdim#include <asm/octeon/cvmx-swap.h> 79353358Sdim#include <asm/octeon/cvmx-wqe.h> 80341825Sdim#else 81341825Sdim#include "cvmx.h" 82327952Sdim#include "cvmx-csr-db.h" 83341825Sdim#include "cvmx-pcie.h" 84353358Sdim#include "cvmx-sysinfo.h" 85360784Sdim#include "cvmx-swap.h" 86341825Sdim#include "cvmx-wqe.h" 87327952Sdim#include "cvmx-error.h" 88353358Sdim#include "cvmx-helper-errata.h" 89353358Sdim#endif 90341825Sdim 91341825Sdim#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */ 92341825Sdim#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ 93341825Sdim#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */ 94341825Sdim#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ 95327952Sdim 96353358Sdim/** 97353358Sdim * Return the Core virtual base address for PCIe IO access. IOs are 98353358Sdim * read/written as an offset from this address. 99353358Sdim * 100353358Sdim * @param pcie_port PCIe port the IO is for 101353358Sdim * 102353358Sdim * @return 64bit Octeon IO base address for read/write 103353358Sdim */ 104353358Sdimuint64_t cvmx_pcie_get_io_base_address(int pcie_port) 105353358Sdim{ 106353358Sdim cvmx_pcie_address_t pcie_addr; 107360784Sdim pcie_addr.u64 = 0; 108341825Sdim pcie_addr.io.upper = 0; 109341825Sdim pcie_addr.io.io = 1; 110321369Sdim pcie_addr.io.did = 3; 111341825Sdim pcie_addr.io.subdid = 2; 112311116Sdim pcie_addr.io.es = 1; 113353358Sdim pcie_addr.io.port = pcie_port; 114341825Sdim return pcie_addr.u64; 115360784Sdim} 116341825Sdim 117353358Sdim 118353358Sdim/** 119311116Sdim * Size of the IO address region returned at address 120353358Sdim * cvmx_pcie_get_io_base_address() 121353358Sdim * 122353358Sdim * @param pcie_port PCIe port the IO is for 123353358Sdim * 124353358Sdim * @return Size of the IO window 125353358Sdim */ 126353358Sdimuint64_t cvmx_pcie_get_io_size(int pcie_port) 127360784Sdim{ 128360784Sdim return 1ull<<32; 129360784Sdim} 130360784Sdim 131360784Sdim 132360784Sdim/** 133353358Sdim * Return the Core virtual base address for PCIe MEM access. Memory is 134353358Sdim * read/written as an offset from this address. 135353358Sdim * 136353358Sdim * @param pcie_port PCIe port the IO is for 137341825Sdim * 138341825Sdim * @return 64bit Octeon IO base address for read/write 139311116Sdim */ 140341825Sdimuint64_t cvmx_pcie_get_mem_base_address(int pcie_port) 141341825Sdim{ 142321369Sdim cvmx_pcie_address_t pcie_addr; 143341825Sdim pcie_addr.u64 = 0; 144311116Sdim pcie_addr.mem.upper = 0; 145353358Sdim pcie_addr.mem.io = 1; 146360784Sdim pcie_addr.mem.did = 3; 147360784Sdim pcie_addr.mem.subdid = 3 + pcie_port; 148311116Sdim return pcie_addr.u64; 149353358Sdim} 150353358Sdim 151311116Sdim 152353358Sdim/** 153321369Sdim * Size of the Mem address region returned at address 154353358Sdim * cvmx_pcie_get_mem_base_address() 155353358Sdim * 156353358Sdim * @param pcie_port PCIe port the IO is for 157353358Sdim * 158353358Sdim * @return Size of the Mem window 159353358Sdim */ 160353358Sdimuint64_t cvmx_pcie_get_mem_size(int pcie_port) 161353358Sdim{ 162353358Sdim return 1ull<<36; 163353358Sdim} 164353358Sdim 165353358Sdim 166344779Sdim/** 167344779Sdim * @INTERNAL 168344779Sdim * Initialize the RC config space CSRs 169344779Sdim * 170344779Sdim * @param pcie_port PCIe port to initialize 171344779Sdim */ 172344779Sdimstatic void __cvmx_pcie_rc_initialize_config_space(int pcie_port) 173353358Sdim{ 174344779Sdim /* Max Payload Size (PCIE*_CFG030[MPS]) */ 175353358Sdim /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ 176353358Sdim /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ 177353358Sdim /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ 178353358Sdim { 179353358Sdim cvmx_pciercx_cfg030_t pciercx_cfg030; 180353358Sdim pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port)); 181353358Sdim if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) 182353358Sdim { 183341825Sdim pciercx_cfg030.s.mps = MPS_CN5XXX; 184341825Sdim pciercx_cfg030.s.mrrs = MRRS_CN5XXX; 185341825Sdim } 186341825Sdim else 187341825Sdim { 188341825Sdim pciercx_cfg030.s.mps = MPS_CN6XXX; 189341825Sdim pciercx_cfg030.s.mrrs = MRRS_CN6XXX; 190341825Sdim } 191341825Sdim pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */ 192341825Sdim pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */ 193341825Sdim pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */ 194341825Sdim pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */ 195341825Sdim pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */ 196341825Sdim pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */ 197341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32); 198341825Sdim } 199341825Sdim 200311116Sdim if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 201341825Sdim { 202341825Sdim /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */ 203341825Sdim /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 204341825Sdim cvmx_npei_ctl_status2_t npei_ctl_status2; 205341825Sdim npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); 206341825Sdim npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */ 207341825Sdim npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */ 208360784Sdim if (pcie_port) 209341825Sdim npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */ 210341825Sdim else 211341825Sdim npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */ 212341825Sdim 213341825Sdim cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); 214341825Sdim } 215360784Sdim else 216341825Sdim { 217341825Sdim /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */ 218341825Sdim /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 219341825Sdim cvmx_dpi_sli_prtx_cfg_t prt_cfg; 220311116Sdim cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl; 221341825Sdim prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port)); 222353358Sdim prt_cfg.s.mps = MPS_CN6XXX; 223353358Sdim prt_cfg.s.mrrs = MRRS_CN6XXX; 224353358Sdim cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64); 225353358Sdim 226353358Sdim sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port)); 227353358Sdim sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX; 228353358Sdim cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64); 229353358Sdim } 230353358Sdim 231341825Sdim /* ECRC Generation (PCIE*_CFG070[GE,CE]) */ 232341825Sdim { 233341825Sdim cvmx_pciercx_cfg070_t pciercx_cfg070; 234341825Sdim pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port)); 235341825Sdim pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */ 236341825Sdim pciercx_cfg070.s.ce = 1; /* ECRC check enable. */ 237341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32); 238311116Sdim } 239353358Sdim 240353358Sdim /* Access Enables (PCIE*_CFG001[MSAE,ME]) */ 241353358Sdim /* ME and MSAE should always be set. */ 242353358Sdim /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */ 243353358Sdim /* System Error Message Enable (PCIE*_CFG001[SEE]) */ 244353358Sdim { 245353358Sdim cvmx_pciercx_cfg001_t pciercx_cfg001; 246353358Sdim pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port)); 247341825Sdim pciercx_cfg001.s.msae = 1; /* Memory space enable. */ 248353358Sdim pciercx_cfg001.s.me = 1; /* Bus master enable. */ 249353358Sdim pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */ 250353358Sdim pciercx_cfg001.s.see = 1; /* SERR# enable */ 251353358Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32); 252353358Sdim } 253360784Sdim 254353358Sdim 255353358Sdim /* Advanced Error Recovery Message Enables */ 256353358Sdim /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */ 257353358Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0); 258353358Sdim /* Use CVMX_PCIERCX_CFG067 hardware default */ 259353358Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0); 260353358Sdim 261341825Sdim 262353358Sdim /* Active State Power Management (PCIE*_CFG032[ASLPC]) */ 263353358Sdim { 264341825Sdim cvmx_pciercx_cfg032_t pciercx_cfg032; 265360784Sdim pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 266341825Sdim pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */ 267341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32); 268344779Sdim } 269341825Sdim 270360784Sdim /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */ 271353358Sdim /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */ 272353358Sdim { 273353358Sdim /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */ 274311116Sdim cvmx_pciercx_cfg006_t pciercx_cfg006; 275341825Sdim pciercx_cfg006.u32 = 0; 276353358Sdim pciercx_cfg006.s.pbnum = 1; 277353358Sdim pciercx_cfg006.s.sbnum = 1; 278360784Sdim pciercx_cfg006.s.subbnum = 1; 279360784Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32); 280353358Sdim } 281353358Sdim 282353358Sdim /* Memory-mapped I/O BAR (PCIERCn_CFG008) */ 283360784Sdim /* Most applications should disable the memory-mapped I/O BAR by */ 284353358Sdim /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */ 285353358Sdim { 286353358Sdim cvmx_pciercx_cfg008_t pciercx_cfg008; 287353358Sdim pciercx_cfg008.u32 = 0; 288353358Sdim pciercx_cfg008.s.mb_addr = 0x100; 289353358Sdim pciercx_cfg008.s.ml_addr = 0; 290341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32); 291360784Sdim } 292341825Sdim 293341825Sdim /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */ 294344779Sdim /* Most applications should disable the prefetchable BAR by setting */ 295341825Sdim /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */ 296353358Sdim /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */ 297353358Sdim { 298353358Sdim cvmx_pciercx_cfg009_t pciercx_cfg009; 299321369Sdim cvmx_pciercx_cfg010_t pciercx_cfg010; 300311116Sdim cvmx_pciercx_cfg011_t pciercx_cfg011; 301341825Sdim pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port)); 302353358Sdim pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port)); 303353358Sdim pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port)); 304341825Sdim pciercx_cfg009.s.lmem_base = 0x100; 305341825Sdim pciercx_cfg009.s.lmem_limit = 0; 306341825Sdim pciercx_cfg010.s.umem_base = 0x100; 307341825Sdim pciercx_cfg011.s.umem_limit = 0; 308311116Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32); 309341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32); 310353358Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32); 311353358Sdim } 312353358Sdim 313353358Sdim /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */ 314353358Sdim /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */ 315353358Sdim { 316353358Sdim cvmx_pciercx_cfg035_t pciercx_cfg035; 317353358Sdim pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port)); 318353358Sdim pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */ 319353358Sdim pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */ 320353358Sdim pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */ 321360784Sdim pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */ 322341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32); 323353358Sdim } 324353358Sdim 325353358Sdim /* Advanced Error Recovery Interrupt Enables */ 326353358Sdim /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */ 327353358Sdim { 328353358Sdim cvmx_pciercx_cfg075_t pciercx_cfg075; 329353358Sdim pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port)); 330353358Sdim pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */ 331353358Sdim pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */ 332353358Sdim pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */ 333353358Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32); 334353358Sdim } 335353358Sdim 336353358Sdim /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */ 337353358Sdim /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */ 338311116Sdim { 339341825Sdim cvmx_pciercx_cfg034_t pciercx_cfg034; 340341825Sdim pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port)); 341341825Sdim pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */ 342341825Sdim pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */ 343341825Sdim pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */ 344311116Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32); 345311116Sdim } 346360784Sdim} 347360784Sdim 348311116Sdim/** 349360784Sdim * @INTERNAL 350360784Sdim * Initialize a host mode PCIe gen 1 link. This function takes a PCIe 351360784Sdim * port from reset to a link up state. Software can then begin 352360784Sdim * configuring the rest of the link. 353360784Sdim * 354360784Sdim * @param pcie_port PCIe port to initialize 355360784Sdim * 356353358Sdim * @return Zero on success 357360784Sdim */ 358353358Sdimstatic int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port) 359360784Sdim{ 360360784Sdim uint64_t start_cycle; 361360784Sdim cvmx_pescx_ctl_status_t pescx_ctl_status; 362353358Sdim cvmx_pciercx_cfg452_t pciercx_cfg452; 363360784Sdim cvmx_pciercx_cfg032_t pciercx_cfg032; 364360784Sdim cvmx_pciercx_cfg448_t pciercx_cfg448; 365360784Sdim 366360784Sdim /* Set the lane width */ 367360784Sdim pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port)); 368360784Sdim pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); 369353358Sdim if (pescx_ctl_status.s.qlm_cfg == 0) 370360784Sdim { 371360784Sdim /* We're in 8 lane (56XX) or 4 lane (54XX) mode */ 372360784Sdim pciercx_cfg452.s.lme = 0xf; 373360784Sdim } 374360784Sdim else 375353358Sdim { 376353358Sdim /* We're in 4 lane (56XX) or 2 lane (52XX) mode */ 377353358Sdim pciercx_cfg452.s.lme = 0x7; 378360784Sdim } 379360784Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32); 380327952Sdim 381341825Sdim /* CN52XX pass 1.x has an errata where length mismatches on UR responses can 382353358Sdim cause bus errors on 64bit memory reads. Turning off length error 383341825Sdim checking fixes this */ 384353358Sdim if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 385311116Sdim { 386311116Sdim cvmx_pciercx_cfg455_t pciercx_cfg455; 387341825Sdim pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port)); 388353358Sdim pciercx_cfg455.s.m_cpl_len_err = 1; 389341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32); 390341825Sdim } 391341825Sdim 392341825Sdim /* Lane swap needs to be manually enabled for CN52XX */ 393311116Sdim if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) 394341825Sdim { 395353358Sdim switch (cvmx_sysinfo_get()->board_type) 396341825Sdim { 397341825Sdim#if defined(OCTEON_VENDOR_LANNER) 398341825Sdim case CVMX_BOARD_TYPE_CUST_LANNER_MR730: 399341825Sdim break; 400311116Sdim#endif 401311116Sdim default: 402341825Sdim pescx_ctl_status.s.lane_swp = 1; 403341825Sdim break; 404311116Sdim } 405311116Sdim cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64); 406353358Sdim } 407353358Sdim 408341825Sdim /* Bring up the link */ 409341825Sdim pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); 410341825Sdim pescx_ctl_status.s.lnk_enb = 1; 411353358Sdim cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); 412353358Sdim 413321369Sdim /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */ 414311116Sdim if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) 415341825Sdim __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0); 416341825Sdim 417311116Sdim /* Wait for the link to come up */ 418341825Sdim start_cycle = cvmx_get_cycle(); 419341825Sdim do 420341825Sdim { 421341825Sdim if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE)) 422311116Sdim { 423341825Sdim cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port); 424341825Sdim return -1; 425341825Sdim } 426341825Sdim cvmx_wait(10000); 427341825Sdim pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 428311116Sdim } while (pciercx_cfg032.s.dlla == 0); 429311116Sdim 430327952Sdim /* Clear all pending errors */ 431341825Sdim cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM)); 432341825Sdim 433341825Sdim /* Update the Replay Time Limit. Empirically, some PCIe devices take a 434341825Sdim little longer to respond than expected under load. As a workaround for 435341825Sdim this we configure the Replay Time Limit to the value expected for a 512 436353358Sdim byte MPS instead of our actual 256 byte MPS. The numbers below are 437353358Sdim directly from the PCIe spec table 3-4 */ 438311116Sdim pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); 439341825Sdim switch (pciercx_cfg032.s.nlw) 440311116Sdim { 441321369Sdim case 1: /* 1 lane */ 442321369Sdim pciercx_cfg448.s.rtl = 1677; 443341825Sdim break; 444341825Sdim case 2: /* 2 lanes */ 445341825Sdim pciercx_cfg448.s.rtl = 867; 446341825Sdim break; 447321369Sdim case 4: /* 4 lanes */ 448327952Sdim pciercx_cfg448.s.rtl = 462; 449341825Sdim break; 450341825Sdim case 8: /* 8 lanes */ 451341825Sdim pciercx_cfg448.s.rtl = 258; 452341825Sdim break; 453327952Sdim } 454341825Sdim cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); 455341825Sdim 456341825Sdim return 0; 457341825Sdim} 458341825Sdim 459341825Sdim 460341825Sdim/** 461327952Sdim * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate 462327952Sdim * the bus. 463341825Sdim * 464341825Sdim * @param pcie_port PCIe port to initialize 465327952Sdim * 466341825Sdim * @return Zero on success 467341825Sdim */ 468341825Sdimstatic int __cvmx_pcie_rc_initialize_gen1(int pcie_port) 469341825Sdim{ 470341825Sdim int i; 471341825Sdim int base; 472341825Sdim uint64_t addr_swizzle; 473341825Sdim cvmx_ciu_soft_prst_t ciu_soft_prst; 474341825Sdim cvmx_pescx_bist_status_t pescx_bist_status; 475341825Sdim cvmx_pescx_bist_status2_t pescx_bist_status2; 476341825Sdim cvmx_npei_ctl_status_t npei_ctl_status; 477341825Sdim cvmx_npei_mem_access_ctl_t npei_mem_access_ctl; 478327952Sdim cvmx_npei_mem_access_subidx_t mem_access_subid; 479341825Sdim cvmx_npei_dbg_data_t npei_dbg_data; 480341825Sdim cvmx_pescx_ctl_status2_t pescx_ctl_status2; 481327952Sdim cvmx_pciercx_cfg032_t pciercx_cfg032; 482341825Sdim cvmx_npei_bar1_indexx_t bar1_index; 483341825Sdim 484341825Sdimretry: 485341825Sdim /* Make sure we aren't trying to setup a target mode interface in host mode */ 486341825Sdim npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); 487341825Sdim if ((pcie_port==0) && !npei_ctl_status.s.host_mode) 488341825Sdim { 489341825Sdim cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port); 490341825Sdim return -1; 491341825Sdim } 492341825Sdim 493341825Sdim /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */ 494353358Sdim if (OCTEON_IS_MODEL(OCTEON_CN52XX)) 495341825Sdim { 496341825Sdim npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 497353358Sdim if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width) 498341825Sdim { 499341825Sdim cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n"); 500341825Sdim return -1; 501341825Sdim } 502341825Sdim } 503341825Sdim 504341825Sdim /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */ 505341825Sdim npei_ctl_status.s.arb = 1; 506341825Sdim /* Allow up to 0x20 config retries */ 507341825Sdim npei_ctl_status.s.cfg_rtry = 0x20; 508341825Sdim /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */ 509341825Sdim if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 510341825Sdim { 511341825Sdim npei_ctl_status.s.p0_ntags = 0x20; 512341825Sdim npei_ctl_status.s.p1_ntags = 0x20; 513341825Sdim } 514341825Sdim cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64); 515341825Sdim 516341825Sdim /* Bring the PCIe out of reset */ 517341825Sdim if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) 518341825Sdim { 519341825Sdim /* The EBH5200 board swapped the PCIe reset lines on the board. As a 520341825Sdim workaround for this bug, we bring both PCIe ports out of reset at 521341825Sdim the same time instead of on separate calls. So for port 0, we bring 522341825Sdim both out of reset and do nothing on port 1 */ 523341825Sdim if (pcie_port == 0) 524341825Sdim { 525341825Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 526341825Sdim /* After a chip reset the PCIe will also be in reset. If it isn't, 527341825Sdim most likely someone is trying to init it again without a proper 528341825Sdim PCIe reset */ 529341825Sdim if (ciu_soft_prst.s.soft_prst == 0) 530341825Sdim { 531341825Sdim /* Reset the ports */ 532341825Sdim ciu_soft_prst.s.soft_prst = 1; 533341825Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 534341825Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 535341825Sdim ciu_soft_prst.s.soft_prst = 1; 536341825Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 537341825Sdim /* Wait until pcie resets the ports. */ 538353358Sdim cvmx_wait_usec(2000); 539353358Sdim } 540341825Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 541341825Sdim ciu_soft_prst.s.soft_prst = 0; 542344779Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 543344779Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 544344779Sdim ciu_soft_prst.s.soft_prst = 0; 545344779Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 546344779Sdim } 547344779Sdim } 548344779Sdim else 549344779Sdim { 550353358Sdim /* The normal case: The PCIe ports are completely separate and can be 551353358Sdim brought out of reset independently */ 552344779Sdim if (pcie_port) 553344779Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 554353358Sdim else 555353358Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 556353358Sdim /* After a chip reset the PCIe will also be in reset. If it isn't, 557353358Sdim most likely someone is trying to init it again without a proper 558353358Sdim PCIe reset */ 559353358Sdim if (ciu_soft_prst.s.soft_prst == 0) 560353358Sdim { 561344779Sdim /* Reset the port */ 562353358Sdim ciu_soft_prst.s.soft_prst = 1; 563353358Sdim if (pcie_port) 564353358Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 565353358Sdim else 566353358Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 567353358Sdim /* Wait until pcie resets the ports. */ 568344779Sdim cvmx_wait_usec(2000); 569344779Sdim } 570344779Sdim if (pcie_port) 571344779Sdim { 572344779Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 573344779Sdim ciu_soft_prst.s.soft_prst = 0; 574344779Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 575344779Sdim } 576344779Sdim else 577344779Sdim { 578353358Sdim ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 579353358Sdim ciu_soft_prst.s.soft_prst = 0; 580353358Sdim cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 581353358Sdim } 582353358Sdim } 583353358Sdim 584353358Sdim /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll 585353358Sdim PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */ 586353358Sdim cvmx_wait(400000); 587353358Sdim 588353358Sdim /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and 589353358Sdim CN52XX, so we only probe it on newer chips */ 590353358Sdim if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 591353358Sdim { 592353358Sdim /* Clear PCLK_RUN so we can check if the clock is running */ 593353358Sdim pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); 594353358Sdim pescx_ctl_status2.s.pclk_run = 1; 595353358Sdim cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64); 596353358Sdim /* Now that we cleared PCLK_RUN, wait for it to be set again telling 597353358Sdim us the clock is running */ 598353358Sdim if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port), 599353358Sdim cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000)) 600353358Sdim { 601353358Sdim cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port); 602353358Sdim return -1; 603353358Sdim } 604353358Sdim } 605353358Sdim 606353358Sdim /* Check and make sure PCIe came out of reset. If it doesn't the board 607353358Sdim probably hasn't wired the clocks up and the interface should be 608353358Sdim skipped */ 609353358Sdim pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); 610353358Sdim if (pescx_ctl_status2.s.pcierst) 611353358Sdim { 612353358Sdim cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); 613353358Sdim return -1; 614360784Sdim } 615360784Sdim 616311116Sdim /* Check BIST2 status. If any bits are set skip this interface. This 617341825Sdim is an attempt to catch PCIE-813 on pass 1 parts */ 618311116Sdim pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port)); 619321369Sdim if (pescx_bist_status2.u64) 620321369Sdim { 621321369Sdim cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port); 622344779Sdim return -1; 623344779Sdim } 624321369Sdim 625321369Sdim /* Check BIST status */ 626321369Sdim pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port)); 627321369Sdim if (pescx_bist_status.u64) 628321369Sdim cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64)); 629321369Sdim 630353358Sdim /* Initialize the config space CSRs */ 631353358Sdim __cvmx_pcie_rc_initialize_config_space(pcie_port); 632353358Sdim 633353358Sdim /* Bring the link up */ 634353358Sdim if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) 635353358Sdim { 636353358Sdim cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port); 637321369Sdim return -1; 638321369Sdim } 639321369Sdim 640321369Sdim /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ 641321369Sdim npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL); 642360784Sdim npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ 643360784Sdim npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ 644360784Sdim cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64); 645360784Sdim 646360784Sdim /* Setup Mem access SubDIDs */ 647360784Sdim mem_access_subid.u64 = 0; 648360784Sdim mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 649360784Sdim mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */ 650360784Sdim mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ 651360784Sdim mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ 652360784Sdim mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */ 653360784Sdim mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */ 654360784Sdim mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */ 655360784Sdim mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */ 656360784Sdim mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 657360784Sdim 658360784Sdim /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */ 659360784Sdim for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++) 660353358Sdim { 661353358Sdim cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); 662353358Sdim mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */ 663353358Sdim } 664353358Sdim 665353358Sdim /* Disable the peer to peer forwarding register. This must be setup 666353358Sdim by the OS after it enumerates the bus and assigns addresses to the 667353358Sdim PCIe busses */ 668353358Sdim for (i=0; i<4; i++) 669353358Sdim { 670353358Sdim cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1); 671353358Sdim cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1); 672353358Sdim } 673353358Sdim 674353358Sdim /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ 675353358Sdim cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0); 676353358Sdim 677353358Sdim /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */ 678353358Sdim cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); 679353358Sdim 680353358Sdim bar1_index.u32 = 0; 681353358Sdim bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); 682353358Sdim bar1_index.s.ca = 1; /* Not Cached */ 683353358Sdim bar1_index.s.end_swp = 1; /* Endian Swap mode */ 684353358Sdim bar1_index.s.addr_v = 1; /* Valid entry */ 685353358Sdim 686353358Sdim base = pcie_port ? 16 : 0; 687353358Sdim 688353358Sdim /* Big endian swizzle for 32-bit PEXP_NCB register. */ 689353358Sdim#ifdef __MIPSEB__ 690353358Sdim addr_swizzle = 4; 691353358Sdim#else 692353358Sdim addr_swizzle = 0; 693353358Sdim#endif 694353358Sdim for (i = 0; i < 16; i++) { 695353358Sdim cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32); 696353358Sdim base++; 697353358Sdim /* 256MB / 16 >> 22 == 4 */ 698360784Sdim bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); 699353358Sdim } 700353358Sdim 701353358Sdim /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence 702353358Sdim where they overlap. It also overlaps with the device addresses, so 703353358Sdim make sure the peer to peer forwarding is set right */ 704353358Sdim cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0); 705353358Sdim 706353358Sdim /* Setup BAR2 attributes */ 707353358Sdim /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */ 708353358Sdim /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */ 709353358Sdim /* � WAIT_COM=0 will likely work for all applications. */ 710353358Sdim /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */ 711353358Sdim if (pcie_port) 712353358Sdim { 713353358Sdim cvmx_npei_ctl_port1_t npei_ctl_port; 714353358Sdim npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1); 715360784Sdim npei_ctl_port.s.bar2_enb = 1; 716353358Sdim npei_ctl_port.s.bar2_esx = 1; 717353358Sdim npei_ctl_port.s.bar2_cax = 0; 718353358Sdim npei_ctl_port.s.ptlp_ro = 1; 719353358Sdim npei_ctl_port.s.ctlp_ro = 1; 720353358Sdim npei_ctl_port.s.wait_com = 0; 721353358Sdim npei_ctl_port.s.waitl_com = 0; 722353358Sdim cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64); 723321369Sdim } 724321369Sdim else 725321369Sdim { 726321369Sdim cvmx_npei_ctl_port0_t npei_ctl_port; 727321369Sdim npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0); 728321369Sdim npei_ctl_port.s.bar2_enb = 1; 729353358Sdim npei_ctl_port.s.bar2_esx = 1; 730353358Sdim npei_ctl_port.s.bar2_cax = 0; 731321369Sdim npei_ctl_port.s.ptlp_ro = 1; 732321369Sdim npei_ctl_port.s.ctlp_ro = 1; 733321369Sdim npei_ctl_port.s.wait_com = 0; 734321369Sdim npei_ctl_port.s.waitl_com = 0; 735321369Sdim cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64); 736353358Sdim } 737321369Sdim 738321369Sdim /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes 739321369Sdim TLP ordering to not be preserved after multiple PCIe port resets. This 740321369Sdim code detects this fault and corrects it by aligning the TLP counters 741321369Sdim properly. Another link reset is then performed. See PCIE-13340 */ 742353358Sdim if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 743321369Sdim OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 744321369Sdim { 745321369Sdim cvmx_npei_dbg_data_t dbg_data; 746321369Sdim int old_in_fif_p_count; 747360784Sdim int in_fif_p_count; 748321369Sdim int out_p_count; 749321369Sdim int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1; 750321369Sdim int i; 751321369Sdim 752321369Sdim /* Choose a write address of 1MB. It should be harmless as all bars 753321369Sdim haven't been setup */ 754321369Sdim uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63); 755321369Sdim 756321369Sdim /* Make sure at least in_p_offset have been executed before we try and 757321369Sdim read in_fif_p_count */ 758321369Sdim i = in_p_offset; 759321369Sdim while (i--) 760353358Sdim { 761321369Sdim cvmx_write64_uint32(write_address, 0); 762360784Sdim cvmx_wait(10000); 763321369Sdim } 764321369Sdim 765321369Sdim /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be 766321369Sdim unstable sometimes so read it twice with a write between the reads. 767321369Sdim This way we can tell the value is good as it will increment by one 768321369Sdim due to the write */ 769321369Sdim cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc); 770321369Sdim cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); 771321369Sdim do 772 { 773 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 774 old_in_fif_p_count = dbg_data.s.data & 0xff; 775 cvmx_write64_uint32(write_address, 0); 776 cvmx_wait(10000); 777 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 778 in_fif_p_count = dbg_data.s.data & 0xff; 779 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff)); 780 781 /* Update in_fif_p_count for it's offset with respect to out_p_count */ 782 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff; 783 784 /* Read the OUT_P_COUNT from the debug select */ 785 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f); 786 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); 787 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 788 out_p_count = (dbg_data.s.data>>1) & 0xff; 789 790 /* Check that the two counters are aligned */ 791 if (out_p_count != in_fif_p_count) 792 { 793 cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port); 794 while (in_fif_p_count != 0) 795 { 796 cvmx_write64_uint32(write_address, 0); 797 cvmx_wait(10000); 798 in_fif_p_count = (in_fif_p_count + 1) & 0xff; 799 } 800 /* The EBH5200 board swapped the PCIe reset lines on the board. This 801 means we must bring both links down and up, which will cause the 802 PCIe0 to need alignment again. Lots of messages will be displayed, 803 but everything should work */ 804 if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) && 805 (pcie_port == 1)) 806 cvmx_pcie_rc_initialize(0); 807 /* Rety bringing this port up */ 808 goto retry; 809 } 810 } 811 812 /* Display the link status */ 813 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 814 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw); 815 816 return 0; 817} 818 819 820/** 821 * @INTERNAL 822 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe 823 * port from reset to a link up state. Software can then begin 824 * configuring the rest of the link. 825 * 826 * @param pcie_port PCIe port to initialize 827 * 828 * @return Zero on success 829 */ 830static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port) 831{ 832 uint64_t start_cycle; 833 cvmx_pemx_ctl_status_t pem_ctl_status; 834 cvmx_pciercx_cfg032_t pciercx_cfg032; 835 cvmx_pciercx_cfg448_t pciercx_cfg448; 836 837 /* Bring up the link */ 838 pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); 839 pem_ctl_status.s.lnk_enb = 1; 840 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64); 841 842 /* Wait for the link to come up */ 843 start_cycle = cvmx_get_cycle(); 844 do 845 { 846 if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE)) 847 return -1; 848 cvmx_wait(10000); 849 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 850 } while (pciercx_cfg032.s.dlla == 0); 851 852 /* Update the Replay Time Limit. Empirically, some PCIe devices take a 853 little longer to respond than expected under load. As a workaround for 854 this we configure the Replay Time Limit to the value expected for a 512 855 byte MPS instead of our actual 256 byte MPS. The numbers below are 856 directly from the PCIe spec table 3-4 */ 857 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); 858 switch (pciercx_cfg032.s.nlw) 859 { 860 case 1: /* 1 lane */ 861 pciercx_cfg448.s.rtl = 1677; 862 break; 863 case 2: /* 2 lanes */ 864 pciercx_cfg448.s.rtl = 867; 865 break; 866 case 4: /* 4 lanes */ 867 pciercx_cfg448.s.rtl = 462; 868 break; 869 case 8: /* 8 lanes */ 870 pciercx_cfg448.s.rtl = 258; 871 break; 872 } 873 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); 874 875 return 0; 876} 877 878 879/** 880 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate 881 * the bus. 882 * 883 * @param pcie_port PCIe port to initialize 884 * 885 * @return Zero on success 886 */ 887static int __cvmx_pcie_rc_initialize_gen2(int pcie_port) 888{ 889 int i; 890 cvmx_ciu_soft_prst_t ciu_soft_prst; 891 cvmx_mio_rst_ctlx_t mio_rst_ctl; 892 cvmx_pemx_bar_ctl_t pemx_bar_ctl; 893 cvmx_pemx_ctl_status_t pemx_ctl_status; 894 cvmx_pemx_bist_status_t pemx_bist_status; 895 cvmx_pemx_bist_status2_t pemx_bist_status2; 896 cvmx_pciercx_cfg032_t pciercx_cfg032; 897 cvmx_pciercx_cfg515_t pciercx_cfg515; 898 cvmx_sli_ctl_portx_t sli_ctl_portx; 899 cvmx_sli_mem_access_ctl_t sli_mem_access_ctl; 900 cvmx_sli_mem_access_subidx_t mem_access_subid; 901 cvmx_mio_rst_ctlx_t mio_rst_ctlx; 902 cvmx_sriox_status_reg_t sriox_status_reg; 903 cvmx_pemx_bar1_indexx_t bar1_index; 904 905 /* Make sure this interface isn't SRIO */ 906 sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port)); 907 if (sriox_status_reg.s.srio) 908 { 909 cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port); 910 return -1; 911 } 912 913 /* Make sure we aren't trying to setup a target mode interface in host mode */ 914 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); 915 if (!mio_rst_ctl.s.host_mode) 916 { 917 cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port); 918 return -1; 919 } 920 921 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */ 922 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) 923 { 924 if (pcie_port) 925 { 926 cvmx_ciu_qlm1_t ciu_qlm; 927 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1); 928 ciu_qlm.s.txbypass = 1; 929 ciu_qlm.s.txdeemph = 5; 930 ciu_qlm.s.txmargin = 0x17; 931 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64); 932 } 933 else 934 { 935 cvmx_ciu_qlm0_t ciu_qlm; 936 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0); 937 ciu_qlm.s.txbypass = 1; 938 ciu_qlm.s.txdeemph = 5; 939 ciu_qlm.s.txmargin = 0x17; 940 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64); 941 } 942 } 943 944 /* Bring the PCIe out of reset */ 945 if (pcie_port) 946 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 947 else 948 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 949 /* After a chip reset the PCIe will also be in reset. If it isn't, 950 most likely someone is trying to init it again without a proper 951 PCIe reset */ 952 if (ciu_soft_prst.s.soft_prst == 0) 953 { 954 /* Reset the port */ 955 ciu_soft_prst.s.soft_prst = 1; 956 if (pcie_port) 957 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 958 else 959 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 960 /* Wait until pcie resets the ports. */ 961 cvmx_wait_usec(2000); 962 } 963 if (pcie_port) 964 { 965 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 966 ciu_soft_prst.s.soft_prst = 0; 967 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 968 } 969 else 970 { 971 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 972 ciu_soft_prst.s.soft_prst = 0; 973 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 974 } 975 976 /* Wait for PCIe reset to complete */ 977 cvmx_wait_usec(1000); 978 979 /* Check and make sure PCIe came out of reset. If it doesn't the board 980 probably hasn't wired the clocks up and the interface should be 981 skipped */ 982 mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); 983 if (!mio_rst_ctlx.s.rst_done) 984 { 985 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); 986 return -1; 987 } 988 989 /* Check BIST status */ 990 pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port)); 991 if (pemx_bist_status.u64) 992 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64)); 993 pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port)); 994 if (pemx_bist_status2.u64) 995 cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64)); 996 997 /* Initialize the config space CSRs */ 998 __cvmx_pcie_rc_initialize_config_space(pcie_port); 999 1000 /* Enable gen2 speed selection */ 1001 pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port)); 1002 pciercx_cfg515.s.dsc = 1; 1003 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32); 1004 1005 /* Bring the link up */ 1006 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) 1007 { 1008 /* Some gen1 devices don't handle the gen 2 training correctly. Disable 1009 gen2 and try again with only gen1 */ 1010 cvmx_pciercx_cfg031_t pciercx_cfg031; 1011 pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port)); 1012 pciercx_cfg031.s.mls = 1; 1013 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32); 1014 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) 1015 { 1016 cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port); 1017 return -1; 1018 } 1019 } 1020 1021 /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ 1022 sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL); 1023 sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ 1024 sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ 1025 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64); 1026 1027 /* Setup Mem access SubDIDs */ 1028 mem_access_subid.u64 = 0; 1029 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1030 mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */ 1031 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ 1032 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ 1033 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1034 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1035 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 1036 1037 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */ 1038 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++) 1039 { 1040 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); 1041 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */ 1042 } 1043 1044 /* Disable the peer to peer forwarding register. This must be setup 1045 by the OS after it enumerates the bus and assigns addresses to the 1046 PCIe busses */ 1047 for (i=0; i<4; i++) 1048 { 1049 cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1); 1050 cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1); 1051 } 1052 1053 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ 1054 cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0); 1055 1056 /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence 1057 where they overlap. It also overlaps with the device addresses, so 1058 make sure the peer to peer forwarding is set right */ 1059 cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0); 1060 1061 /* Setup BAR2 attributes */ 1062 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */ 1063 /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */ 1064 /* � WAIT_COM=0 will likely work for all applications. */ 1065 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */ 1066 pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port)); 1067 pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/ 1068 pemx_bar_ctl.s.bar2_enb = 1; 1069 pemx_bar_ctl.s.bar2_esx = 1; 1070 pemx_bar_ctl.s.bar2_cax = 0; 1071 cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64); 1072 sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port)); 1073 sli_ctl_portx.s.ptlp_ro = 1; 1074 sli_ctl_portx.s.ctlp_ro = 1; 1075 sli_ctl_portx.s.wait_com = 0; 1076 sli_ctl_portx.s.waitl_com = 0; 1077 cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64); 1078 1079 /* BAR1 follows BAR2 */ 1080 cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); 1081 1082 bar1_index.u64 = 0; 1083 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); 1084 bar1_index.s.ca = 1; /* Not Cached */ 1085 bar1_index.s.end_swp = 1; /* Endian Swap mode */ 1086 bar1_index.s.addr_v = 1; /* Valid entry */ 1087 1088 for (i = 0; i < 16; i++) { 1089 cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64); 1090 /* 256MB / 16 >> 22 == 4 */ 1091 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); 1092 } 1093 1094 /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES 1095 clock */ 1096 pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); 1097 pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000; 1098 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64); 1099 1100 /* Display the link status */ 1101 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 1102 cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls); 1103 1104 return 0; 1105} 1106 1107/** 1108 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus. 1109 * 1110 * @param pcie_port PCIe port to initialize 1111 * 1112 * @return Zero on success 1113 */ 1114int cvmx_pcie_rc_initialize(int pcie_port) 1115{ 1116 int result; 1117 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1118 result = __cvmx_pcie_rc_initialize_gen1(pcie_port); 1119 else 1120 result = __cvmx_pcie_rc_initialize_gen2(pcie_port); 1121#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL) 1122 if (result == 0) 1123 cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port); 1124#endif 1125 return result; 1126} 1127 1128 1129/** 1130 * Shutdown a PCIe port and put it in reset 1131 * 1132 * @param pcie_port PCIe port to shutdown 1133 * 1134 * @return Zero on success 1135 */ 1136int cvmx_pcie_rc_shutdown(int pcie_port) 1137{ 1138#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL) 1139 cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port); 1140#endif 1141 /* Wait for all pending operations to complete */ 1142 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1143 { 1144 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000)) 1145 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port); 1146 } 1147 else 1148 { 1149 if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000)) 1150 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port); 1151 } 1152 1153 /* Force reset */ 1154 if (pcie_port) 1155 { 1156 cvmx_ciu_soft_prst_t ciu_soft_prst; 1157 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 1158 ciu_soft_prst.s.soft_prst = 1; 1159 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 1160 } 1161 else 1162 { 1163 cvmx_ciu_soft_prst_t ciu_soft_prst; 1164 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 1165 ciu_soft_prst.s.soft_prst = 1; 1166 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 1167 } 1168 return 0; 1169} 1170 1171 1172/** 1173 * @INTERNAL 1174 * Build a PCIe config space request address for a device 1175 * 1176 * @param pcie_port PCIe port to access 1177 * @param bus Sub bus 1178 * @param dev Device ID 1179 * @param fn Device sub function 1180 * @param reg Register to access 1181 * 1182 * @return 64bit Octeon IO address 1183 */ 1184static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg) 1185{ 1186 cvmx_pcie_address_t pcie_addr; 1187 cvmx_pciercx_cfg006_t pciercx_cfg006; 1188 1189 pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); 1190 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) 1191 return 0; 1192 1193 pcie_addr.u64 = 0; 1194 pcie_addr.config.upper = 2; 1195 pcie_addr.config.io = 1; 1196 pcie_addr.config.did = 3; 1197 pcie_addr.config.subdid = 1; 1198 pcie_addr.config.es = 1; 1199 pcie_addr.config.port = pcie_port; 1200 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum); 1201 pcie_addr.config.bus = bus; 1202 pcie_addr.config.dev = dev; 1203 pcie_addr.config.func = fn; 1204 pcie_addr.config.reg = reg; 1205 return pcie_addr.u64; 1206} 1207 1208 1209/** 1210 * Read 8bits from a Device's config space 1211 * 1212 * @param pcie_port PCIe port the device is on 1213 * @param bus Sub bus 1214 * @param dev Device ID 1215 * @param fn Device sub function 1216 * @param reg Register to access 1217 * 1218 * @return Result of the read 1219 */ 1220uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg) 1221{ 1222 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1223 if (address) 1224 return cvmx_read64_uint8(address); 1225 else 1226 return 0xff; 1227} 1228 1229 1230/** 1231 * Read 16bits from a Device's config space 1232 * 1233 * @param pcie_port PCIe port the device is on 1234 * @param bus Sub bus 1235 * @param dev Device ID 1236 * @param fn Device sub function 1237 * @param reg Register to access 1238 * 1239 * @return Result of the read 1240 */ 1241uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg) 1242{ 1243 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1244 if (address) 1245 return cvmx_le16_to_cpu(cvmx_read64_uint16(address)); 1246 else 1247 return 0xffff; 1248} 1249 1250 1251/** 1252 * Read 32bits from a Device's config space 1253 * 1254 * @param pcie_port PCIe port the device is on 1255 * @param bus Sub bus 1256 * @param dev Device ID 1257 * @param fn Device sub function 1258 * @param reg Register to access 1259 * 1260 * @return Result of the read 1261 */ 1262uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg) 1263{ 1264 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1265 if (address) 1266 return cvmx_le32_to_cpu(cvmx_read64_uint32(address)); 1267 else 1268 return 0xffffffff; 1269} 1270 1271 1272/** 1273 * Write 8bits to a Device's config space 1274 * 1275 * @param pcie_port PCIe port the device is on 1276 * @param bus Sub bus 1277 * @param dev Device ID 1278 * @param fn Device sub function 1279 * @param reg Register to access 1280 * @param val Value to write 1281 */ 1282void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val) 1283{ 1284 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1285 if (address) 1286 cvmx_write64_uint8(address, val); 1287} 1288 1289 1290/** 1291 * Write 16bits to a Device's config space 1292 * 1293 * @param pcie_port PCIe port the device is on 1294 * @param bus Sub bus 1295 * @param dev Device ID 1296 * @param fn Device sub function 1297 * @param reg Register to access 1298 * @param val Value to write 1299 */ 1300void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val) 1301{ 1302 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1303 if (address) 1304 cvmx_write64_uint16(address, cvmx_cpu_to_le16(val)); 1305} 1306 1307 1308/** 1309 * Write 32bits to a Device's config space 1310 * 1311 * @param pcie_port PCIe port the device is on 1312 * @param bus Sub bus 1313 * @param dev Device ID 1314 * @param fn Device sub function 1315 * @param reg Register to access 1316 * @param val Value to write 1317 */ 1318void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val) 1319{ 1320 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1321 if (address) 1322 cvmx_write64_uint32(address, cvmx_cpu_to_le32(val)); 1323} 1324 1325 1326/** 1327 * Read a PCIe config space register indirectly. This is used for 1328 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. 1329 * 1330 * @param pcie_port PCIe port to read from 1331 * @param cfg_offset Address to read 1332 * 1333 * @return Value read 1334 */ 1335uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) 1336{ 1337 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1338 { 1339 cvmx_pescx_cfg_rd_t pescx_cfg_rd; 1340 pescx_cfg_rd.u64 = 0; 1341 pescx_cfg_rd.s.addr = cfg_offset; 1342 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); 1343 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); 1344 return pescx_cfg_rd.s.data; 1345 } 1346 else 1347 { 1348 cvmx_pemx_cfg_rd_t pemx_cfg_rd; 1349 pemx_cfg_rd.u64 = 0; 1350 pemx_cfg_rd.s.addr = cfg_offset; 1351 cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64); 1352 pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port)); 1353 return pemx_cfg_rd.s.data; 1354 } 1355} 1356 1357 1358/** 1359 * Write a PCIe config space register indirectly. This is used for 1360 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. 1361 * 1362 * @param pcie_port PCIe port to write to 1363 * @param cfg_offset Address to write 1364 * @param val Value to write 1365 */ 1366void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val) 1367{ 1368 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1369 { 1370 cvmx_pescx_cfg_wr_t pescx_cfg_wr; 1371 pescx_cfg_wr.u64 = 0; 1372 pescx_cfg_wr.s.addr = cfg_offset; 1373 pescx_cfg_wr.s.data = val; 1374 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); 1375 } 1376 else 1377 { 1378 cvmx_pemx_cfg_wr_t pemx_cfg_wr; 1379 pemx_cfg_wr.u64 = 0; 1380 pemx_cfg_wr.s.addr = cfg_offset; 1381 pemx_cfg_wr.s.data = val; 1382 cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64); 1383 } 1384} 1385 1386 1387/** 1388 * Initialize a PCIe port for use in target(EP) mode. 1389 * 1390 * @param pcie_port PCIe port to initialize 1391 * 1392 * @return Zero on success 1393 */ 1394int cvmx_pcie_ep_initialize(int pcie_port) 1395{ 1396 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1397 { 1398 cvmx_npei_ctl_status_t npei_ctl_status; 1399 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); 1400 if (npei_ctl_status.s.host_mode) 1401 return -1; 1402 } 1403 else 1404 { 1405 cvmx_mio_rst_ctlx_t mio_rst_ctl; 1406 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); 1407 if (mio_rst_ctl.s.host_mode) 1408 return -1; 1409 } 1410 1411 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */ 1412 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) 1413 { 1414 if (pcie_port) 1415 { 1416 cvmx_ciu_qlm1_t ciu_qlm; 1417 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1); 1418 ciu_qlm.s.txbypass = 1; 1419 ciu_qlm.s.txdeemph = 5; 1420 ciu_qlm.s.txmargin = 0x17; 1421 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64); 1422 } 1423 else 1424 { 1425 cvmx_ciu_qlm0_t ciu_qlm; 1426 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0); 1427 ciu_qlm.s.txbypass = 1; 1428 ciu_qlm.s.txdeemph = 5; 1429 ciu_qlm.s.txmargin = 0x17; 1430 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64); 1431 } 1432 } 1433 1434 /* Enable bus master and memory */ 1435 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6); 1436 1437 /* Max Payload Size (PCIE*_CFG030[MPS]) */ 1438 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ 1439 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ 1440 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ 1441 { 1442 cvmx_pcieepx_cfg030_t pcieepx_cfg030; 1443 pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port)); 1444 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) 1445 { 1446 pcieepx_cfg030.s.mps = MPS_CN5XXX; 1447 pcieepx_cfg030.s.mrrs = MRRS_CN5XXX; 1448 } 1449 else 1450 { 1451 pcieepx_cfg030.s.mps = MPS_CN6XXX; 1452 pcieepx_cfg030.s.mrrs = MRRS_CN6XXX; 1453 } 1454 pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */ 1455 pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */ 1456 pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */ 1457 pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */ 1458 pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */ 1459 pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */ 1460 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32); 1461 } 1462 1463 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1464 { 1465 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */ 1466 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 1467 cvmx_npei_ctl_status2_t npei_ctl_status2; 1468 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); 1469 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */ 1470 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */ 1471 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); 1472 } 1473 else 1474 { 1475 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */ 1476 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 1477 cvmx_dpi_sli_prtx_cfg_t prt_cfg; 1478 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl; 1479 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port)); 1480 prt_cfg.s.mps = MPS_CN6XXX; 1481 prt_cfg.s.mrrs = MRRS_CN6XXX; 1482 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64); 1483 1484 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port)); 1485 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX; 1486 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64); 1487 } 1488 1489 /* Setup Mem access SubDID 12 to access Host memory */ 1490 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1491 { 1492 cvmx_npei_mem_access_subidx_t mem_access_subid; 1493 mem_access_subid.u64 = 0; 1494 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1495 mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */ 1496 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */ 1497 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */ 1498 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */ 1499 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */ 1500 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */ 1501 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */ 1502 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 1503 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64); 1504 } 1505 else 1506 { 1507 cvmx_sli_mem_access_subidx_t mem_access_subid; 1508 mem_access_subid.u64 = 0; 1509 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1510 mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */ 1511 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */ 1512 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */ 1513 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1514 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1515 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 1516 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64); 1517 } 1518 return 0; 1519} 1520 1521 1522/** 1523 * Wait for posted PCIe read/writes to reach the other side of 1524 * the internal PCIe switch. This will insure that core 1525 * read/writes are posted before anything after this function 1526 * is called. This may be necessary when writing to memory that 1527 * will later be read using the DMA/PKT engines. 1528 * 1529 * @param pcie_port PCIe port to wait for 1530 */ 1531void cvmx_pcie_wait_for_pending(int pcie_port) 1532{ 1533 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1534 { 1535 cvmx_npei_data_out_cnt_t npei_data_out_cnt; 1536 int a; 1537 int b; 1538 int c; 1539 1540 /* See section 9.8, PCIe Core-initiated Requests, in the manual for a 1541 description of how this code works */ 1542 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT); 1543 if (pcie_port) 1544 { 1545 if (!npei_data_out_cnt.s.p1_fcnt) 1546 return; 1547 a = npei_data_out_cnt.s.p1_ucnt; 1548 b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff; 1549 } 1550 else 1551 { 1552 if (!npei_data_out_cnt.s.p0_fcnt) 1553 return; 1554 a = npei_data_out_cnt.s.p0_ucnt; 1555 b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff; 1556 } 1557 1558 while (1) 1559 { 1560 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT); 1561 c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt; 1562 if (a<=b) 1563 { 1564 if ((c<a) || (c>b)) 1565 return; 1566 } 1567 else 1568 { 1569 if ((c>b) && (c<a)) 1570 return; 1571 } 1572 } 1573 } 1574 else 1575 { 1576 cvmx_sli_data_out_cnt_t sli_data_out_cnt; 1577 int a; 1578 int b; 1579 int c; 1580 1581 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT); 1582 if (pcie_port) 1583 { 1584 if (!sli_data_out_cnt.s.p1_fcnt) 1585 return; 1586 a = sli_data_out_cnt.s.p1_ucnt; 1587 b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff; 1588 } 1589 else 1590 { 1591 if (!sli_data_out_cnt.s.p0_fcnt) 1592 return; 1593 a = sli_data_out_cnt.s.p0_ucnt; 1594 b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff; 1595 } 1596 1597 while (1) 1598 { 1599 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT); 1600 c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt; 1601 if (a<=b) 1602 { 1603 if ((c<a) || (c>b)) 1604 return; 1605 } 1606 else 1607 { 1608 if ((c>b) && (c<a)) 1609 return; 1610 } 1611 } 1612 } 1613} 1614