1/***********************license start*************** 2 * Copyright (c) 2003-2011 Cavium, Inc. <support@cavium.com>. All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40/** 41 * @file 42 * 43 * Interface to PCIe as a host(RC) or target(EP) 44 * 45 * <hr>$Revision: 70030 $<hr> 46 */ 47#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 48#include <asm/octeon/cvmx.h> 49#include <asm/octeon/cvmx-config.h> 50#include <asm/octeon/cvmx-clock.h> 51#include <asm/octeon/cvmx-ciu-defs.h> 52#include <asm/octeon/cvmx-dpi-defs.h> 53#include <asm/octeon/cvmx-mio-defs.h> 54#include <asm/octeon/cvmx-npi-defs.h> 55#include <asm/octeon/cvmx-npei-defs.h> 56#include <asm/octeon/cvmx-pci-defs.h> 57#include <asm/octeon/cvmx-pcieepx-defs.h> 58#include <asm/octeon/cvmx-pciercx-defs.h> 59#include <asm/octeon/cvmx-pemx-defs.h> 60#include <asm/octeon/cvmx-pexp-defs.h> 61#include <asm/octeon/cvmx-pescx-defs.h> 62#include <asm/octeon/cvmx-sli-defs.h> 63#include <asm/octeon/cvmx-sriox-defs.h> 64#include <asm/octeon/cvmx-helper-jtag.h> 65 66#ifdef CONFIG_CAVIUM_DECODE_RSL 67#include <asm/octeon/cvmx-error.h> 68#endif 69#include <asm/octeon/cvmx-helper.h> 70#include <asm/octeon/cvmx-helper-board.h> 71#include <asm/octeon/cvmx-helper-errata.h> 72#include <asm/octeon/cvmx-qlm.h> 73#include <asm/octeon/cvmx-pcie.h> 74#include <asm/octeon/cvmx-sysinfo.h> 75#include <asm/octeon/cvmx-swap.h> 76#include <asm/octeon/cvmx-wqe.h> 77#else 78#include "cvmx.h" 79#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) 80#include "cvmx-csr-db.h" 81#endif 82#include "cvmx-pcie.h" 83#include "cvmx-sysinfo.h" 84#include "cvmx-swap.h" 85#include "cvmx-wqe.h" 86#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) 87#include "cvmx-error.h" 88#endif 89#include "cvmx-helper-errata.h" 90#include "cvmx-qlm.h" 91#endif 92 93#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */ 94#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ 95#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */ 96#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */ 97 98/** 99 * Return the Core virtual base address for PCIe IO access. IOs are 100 * read/written as an offset from this address. 101 * 102 * @param pcie_port PCIe port the IO is for 103 * 104 * @return 64bit Octeon IO base address for read/write 105 */ 106uint64_t cvmx_pcie_get_io_base_address(int pcie_port) 107{ 108 cvmx_pcie_address_t pcie_addr; 109 pcie_addr.u64 = 0; 110 pcie_addr.io.upper = 0; 111 pcie_addr.io.io = 1; 112 pcie_addr.io.did = 3; 113 pcie_addr.io.subdid = 2; 114 pcie_addr.io.es = 1; 115 pcie_addr.io.port = pcie_port; 116 return pcie_addr.u64; 117} 118 119 120/** 121 * Size of the IO address region returned at address 122 * cvmx_pcie_get_io_base_address() 123 * 124 * @param pcie_port PCIe port the IO is for 125 * 126 * @return Size of the IO window 127 */ 128uint64_t cvmx_pcie_get_io_size(int pcie_port) 129{ 130 return 1ull<<32; 131} 132 133 134/** 135 * Return the Core virtual base address for PCIe MEM access. Memory is 136 * read/written as an offset from this address. 137 * 138 * @param pcie_port PCIe port the IO is for 139 * 140 * @return 64bit Octeon IO base address for read/write 141 */ 142uint64_t cvmx_pcie_get_mem_base_address(int pcie_port) 143{ 144 cvmx_pcie_address_t pcie_addr; 145 pcie_addr.u64 = 0; 146 pcie_addr.mem.upper = 0; 147 pcie_addr.mem.io = 1; 148 pcie_addr.mem.did = 3; 149 pcie_addr.mem.subdid = 3 + pcie_port; 150 return pcie_addr.u64; 151} 152 153 154/** 155 * Size of the Mem address region returned at address 156 * cvmx_pcie_get_mem_base_address() 157 * 158 * @param pcie_port PCIe port the IO is for 159 * 160 * @return Size of the Mem window 161 */ 162uint64_t cvmx_pcie_get_mem_size(int pcie_port) 163{ 164 return 1ull<<36; 165} 166 167 168/** 169 * @INTERNAL 170 * Initialize the RC config space CSRs 171 * 172 * @param pcie_port PCIe port to initialize 173 */ 174static void __cvmx_pcie_rc_initialize_config_space(int pcie_port) 175{ 176 /* Max Payload Size (PCIE*_CFG030[MPS]) */ 177 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ 178 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ 179 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ 180 { 181 cvmx_pciercx_cfg030_t pciercx_cfg030; 182 pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port)); 183 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) 184 { 185 pciercx_cfg030.s.mps = MPS_CN5XXX; 186 pciercx_cfg030.s.mrrs = MRRS_CN5XXX; 187 } 188 else 189 { 190 pciercx_cfg030.s.mps = MPS_CN6XXX; 191 pciercx_cfg030.s.mrrs = MRRS_CN6XXX; 192 } 193 pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */ 194 pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */ 195 pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */ 196 pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */ 197 pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */ 198 pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */ 199 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32); 200 } 201 202 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 203 { 204 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */ 205 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 206 cvmx_npei_ctl_status2_t npei_ctl_status2; 207 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); 208 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */ 209 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */ 210 if (pcie_port) 211 npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */ 212 else 213 npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */ 214 215 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); 216 } 217 else 218 { 219 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */ 220 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 221 cvmx_dpi_sli_prtx_cfg_t prt_cfg; 222 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl; 223 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port)); 224 prt_cfg.s.mps = MPS_CN6XXX; 225 prt_cfg.s.mrrs = MRRS_CN6XXX; 226 /* Max outstanding load request. */ 227 prt_cfg.s.molr = 32; 228 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64); 229 230 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port)); 231 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX; 232 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64); 233 } 234 235 /* ECRC Generation (PCIE*_CFG070[GE,CE]) */ 236 { 237 cvmx_pciercx_cfg070_t pciercx_cfg070; 238 pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port)); 239 pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */ 240 pciercx_cfg070.s.ce = 1; /* ECRC check enable. */ 241 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32); 242 } 243 244 /* Access Enables (PCIE*_CFG001[MSAE,ME]) */ 245 /* ME and MSAE should always be set. */ 246 /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */ 247 /* System Error Message Enable (PCIE*_CFG001[SEE]) */ 248 { 249 cvmx_pciercx_cfg001_t pciercx_cfg001; 250 pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port)); 251 pciercx_cfg001.s.msae = 1; /* Memory space enable. */ 252 pciercx_cfg001.s.me = 1; /* Bus master enable. */ 253 pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */ 254 pciercx_cfg001.s.see = 1; /* SERR# enable */ 255 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32); 256 } 257 258 259 /* Advanced Error Recovery Message Enables */ 260 /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */ 261 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0); 262 /* Use CVMX_PCIERCX_CFG067 hardware default */ 263 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0); 264 265 266 /* Active State Power Management (PCIE*_CFG032[ASLPC]) */ 267 { 268 cvmx_pciercx_cfg032_t pciercx_cfg032; 269 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 270 pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */ 271 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32); 272 } 273 274 /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */ 275 /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */ 276 { 277 /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */ 278 cvmx_pciercx_cfg006_t pciercx_cfg006; 279 pciercx_cfg006.u32 = 0; 280 pciercx_cfg006.s.pbnum = 1; 281 pciercx_cfg006.s.sbnum = 1; 282 pciercx_cfg006.s.subbnum = 1; 283 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32); 284 } 285 286 /* Memory-mapped I/O BAR (PCIERCn_CFG008) */ 287 /* Most applications should disable the memory-mapped I/O BAR by */ 288 /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */ 289 { 290 cvmx_pciercx_cfg008_t pciercx_cfg008; 291 pciercx_cfg008.u32 = 0; 292 pciercx_cfg008.s.mb_addr = 0x100; 293 pciercx_cfg008.s.ml_addr = 0; 294 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32); 295 } 296 297 /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */ 298 /* Most applications should disable the prefetchable BAR by setting */ 299 /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */ 300 /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */ 301 { 302 cvmx_pciercx_cfg009_t pciercx_cfg009; 303 cvmx_pciercx_cfg010_t pciercx_cfg010; 304 cvmx_pciercx_cfg011_t pciercx_cfg011; 305 pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port)); 306 pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port)); 307 pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port)); 308 pciercx_cfg009.s.lmem_base = 0x100; 309 pciercx_cfg009.s.lmem_limit = 0; 310 pciercx_cfg010.s.umem_base = 0x100; 311 pciercx_cfg011.s.umem_limit = 0; 312 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32); 313 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32); 314 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32); 315 } 316 317 /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */ 318 /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */ 319 { 320 cvmx_pciercx_cfg035_t pciercx_cfg035; 321 pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port)); 322 pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */ 323 pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */ 324 pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */ 325 pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */ 326 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32); 327 } 328 329 /* Advanced Error Recovery Interrupt Enables */ 330 /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */ 331 { 332 cvmx_pciercx_cfg075_t pciercx_cfg075; 333 pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port)); 334 pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */ 335 pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */ 336 pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */ 337 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32); 338 } 339 340 /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */ 341 /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */ 342 { 343 cvmx_pciercx_cfg034_t pciercx_cfg034; 344 pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port)); 345 pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */ 346 pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */ 347 pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */ 348 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32); 349 } 350} 351 352/** 353 * @INTERNAL 354 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe 355 * port from reset to a link up state. Software can then begin 356 * configuring the rest of the link. 357 * 358 * @param pcie_port PCIe port to initialize 359 * 360 * @return Zero on success 361 */ 362static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port) 363{ 364 uint64_t start_cycle; 365 cvmx_pescx_ctl_status_t pescx_ctl_status; 366 cvmx_pciercx_cfg452_t pciercx_cfg452; 367 cvmx_pciercx_cfg032_t pciercx_cfg032; 368 cvmx_pciercx_cfg448_t pciercx_cfg448; 369 370 /* Set the lane width */ 371 pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port)); 372 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); 373 if (pescx_ctl_status.s.qlm_cfg == 0) 374 { 375 /* We're in 8 lane (56XX) or 4 lane (54XX) mode */ 376 pciercx_cfg452.s.lme = 0xf; 377 } 378 else 379 { 380 /* We're in 4 lane (56XX) or 2 lane (52XX) mode */ 381 pciercx_cfg452.s.lme = 0x7; 382 } 383 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32); 384 385 /* CN52XX pass 1.x has an errata where length mismatches on UR responses can 386 cause bus errors on 64bit memory reads. Turning off length error 387 checking fixes this */ 388 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 389 { 390 cvmx_pciercx_cfg455_t pciercx_cfg455; 391 pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port)); 392 pciercx_cfg455.s.m_cpl_len_err = 1; 393 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32); 394 } 395 396 /* Lane swap needs to be manually enabled for CN52XX */ 397 if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) 398 { 399 switch (cvmx_sysinfo_get()->board_type) 400 { 401#if defined(OCTEON_VENDOR_LANNER) 402 case CVMX_BOARD_TYPE_CUST_LANNER_MR730: 403 break; 404#endif 405 default: 406 pescx_ctl_status.s.lane_swp = 1; 407 break; 408 } 409 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64); 410 } 411 412 /* Bring up the link */ 413 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); 414 pescx_ctl_status.s.lnk_enb = 1; 415 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); 416 417 /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */ 418 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) 419 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0); 420 421 /* Wait for the link to come up */ 422 start_cycle = cvmx_get_cycle(); 423 do 424 { 425 if (cvmx_get_cycle() - start_cycle > 100*cvmx_clock_get_rate(CVMX_CLOCK_CORE)) 426 { 427 cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port); 428 return -1; 429 } 430 cvmx_wait(50000); 431 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 432 } while (pciercx_cfg032.s.dlla == 0); 433 434 /* Clear all pending errors */ 435 cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM)); 436 437 /* Update the Replay Time Limit. Empirically, some PCIe devices take a 438 little longer to respond than expected under load. As a workaround for 439 this we configure the Replay Time Limit to the value expected for a 512 440 byte MPS instead of our actual 256 byte MPS. The numbers below are 441 directly from the PCIe spec table 3-4 */ 442 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); 443 switch (pciercx_cfg032.s.nlw) 444 { 445 case 1: /* 1 lane */ 446 pciercx_cfg448.s.rtl = 1677; 447 break; 448 case 2: /* 2 lanes */ 449 pciercx_cfg448.s.rtl = 867; 450 break; 451 case 4: /* 4 lanes */ 452 pciercx_cfg448.s.rtl = 462; 453 break; 454 case 8: /* 8 lanes */ 455 pciercx_cfg448.s.rtl = 258; 456 break; 457 } 458 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); 459 460 return 0; 461} 462 463static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas) 464{ 465 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 466 pmas->cn68xx.ba++; 467 else 468 pmas->cn63xx.ba++; 469} 470 471/** 472 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate 473 * the bus. 474 * 475 * @param pcie_port PCIe port to initialize 476 * 477 * @return Zero on success 478 */ 479static int __cvmx_pcie_rc_initialize_gen1(int pcie_port) 480{ 481 int i; 482 int base; 483 uint64_t addr_swizzle; 484 cvmx_ciu_soft_prst_t ciu_soft_prst; 485 cvmx_pescx_bist_status_t pescx_bist_status; 486 cvmx_pescx_bist_status2_t pescx_bist_status2; 487 cvmx_npei_ctl_status_t npei_ctl_status; 488 cvmx_npei_mem_access_ctl_t npei_mem_access_ctl; 489 cvmx_npei_mem_access_subidx_t mem_access_subid; 490 cvmx_npei_dbg_data_t npei_dbg_data; 491 cvmx_pescx_ctl_status2_t pescx_ctl_status2; 492 cvmx_pciercx_cfg032_t pciercx_cfg032; 493 cvmx_npei_bar1_indexx_t bar1_index; 494 495retry: 496 /* Make sure we aren't trying to setup a target mode interface in host mode */ 497 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); 498 if ((pcie_port==0) && !npei_ctl_status.s.host_mode) 499 { 500 cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port); 501 return -1; 502 } 503 504 /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */ 505 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) 506 { 507 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 508 if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width) 509 { 510 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n"); 511 return -1; 512 } 513 } 514 515 /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */ 516 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) { 517 cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port); 518 return -1; 519 } 520 521 /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */ 522 npei_ctl_status.s.arb = 1; 523 /* Allow up to 0x20 config retries */ 524 npei_ctl_status.s.cfg_rtry = 0x20; 525 /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */ 526 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 527 { 528 npei_ctl_status.s.p0_ntags = 0x20; 529 npei_ctl_status.s.p1_ntags = 0x20; 530 } 531 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64); 532 533 /* Bring the PCIe out of reset */ 534 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) 535 { 536 /* The EBH5200 board swapped the PCIe reset lines on the board. As a 537 workaround for this bug, we bring both PCIe ports out of reset at 538 the same time instead of on separate calls. So for port 0, we bring 539 both out of reset and do nothing on port 1 */ 540 if (pcie_port == 0) 541 { 542 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 543 /* After a chip reset the PCIe will also be in reset. If it isn't, 544 most likely someone is trying to init it again without a proper 545 PCIe reset */ 546 if (ciu_soft_prst.s.soft_prst == 0) 547 { 548 /* Reset the ports */ 549 ciu_soft_prst.s.soft_prst = 1; 550 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 551 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 552 ciu_soft_prst.s.soft_prst = 1; 553 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 554 /* Wait until pcie resets the ports. */ 555 cvmx_wait_usec(2000); 556 } 557 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 558 ciu_soft_prst.s.soft_prst = 0; 559 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 560 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 561 ciu_soft_prst.s.soft_prst = 0; 562 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 563 } 564 } 565 else 566 { 567 /* The normal case: The PCIe ports are completely separate and can be 568 brought out of reset independently */ 569 if (pcie_port) 570 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 571 else 572 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 573 /* After a chip reset the PCIe will also be in reset. If it isn't, 574 most likely someone is trying to init it again without a proper 575 PCIe reset */ 576 if (ciu_soft_prst.s.soft_prst == 0) 577 { 578 /* Reset the port */ 579 ciu_soft_prst.s.soft_prst = 1; 580 if (pcie_port) 581 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 582 else 583 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 584 /* Wait until pcie resets the ports. */ 585 cvmx_wait_usec(2000); 586 } 587 if (pcie_port) 588 { 589 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 590 ciu_soft_prst.s.soft_prst = 0; 591 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 592 } 593 else 594 { 595 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 596 ciu_soft_prst.s.soft_prst = 0; 597 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 598 } 599 } 600 601 /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll 602 PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */ 603 cvmx_wait(400000); 604 605 /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and 606 CN52XX, so we only probe it on newer chips */ 607 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 608 { 609 /* Clear PCLK_RUN so we can check if the clock is running */ 610 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); 611 pescx_ctl_status2.s.pclk_run = 1; 612 cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64); 613 /* Now that we cleared PCLK_RUN, wait for it to be set again telling 614 us the clock is running */ 615 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port), 616 cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000)) 617 { 618 cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port); 619 return -1; 620 } 621 } 622 623 /* Check and make sure PCIe came out of reset. If it doesn't the board 624 probably hasn't wired the clocks up and the interface should be 625 skipped */ 626 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); 627 if (pescx_ctl_status2.s.pcierst) 628 { 629 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); 630 return -1; 631 } 632 633 /* Check BIST2 status. If any bits are set skip this interface. This 634 is an attempt to catch PCIE-813 on pass 1 parts */ 635 pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port)); 636 if (pescx_bist_status2.u64) 637 { 638 cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port); 639 return -1; 640 } 641 642 /* Check BIST status */ 643 pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port)); 644 if (pescx_bist_status.u64) 645 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64)); 646 647 /* Initialize the config space CSRs */ 648 __cvmx_pcie_rc_initialize_config_space(pcie_port); 649 650 /* Bring the link up */ 651 if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) 652 { 653 cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port); 654 return -1; 655 } 656 657 /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ 658 npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL); 659 npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ 660 npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ 661 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64); 662 663 /* Setup Mem access SubDIDs */ 664 mem_access_subid.u64 = 0; 665 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 666 mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */ 667 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ 668 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ 669 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */ 670 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */ 671 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */ 672 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */ 673 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 674 675 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */ 676 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++) 677 { 678 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); 679 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */ 680 } 681 682 /* Disable the peer to peer forwarding register. This must be setup 683 by the OS after it enumerates the bus and assigns addresses to the 684 PCIe busses */ 685 for (i=0; i<4; i++) 686 { 687 cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1); 688 cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1); 689 } 690 691 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ 692 cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0); 693 694 /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */ 695 cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); 696 697 bar1_index.u32 = 0; 698 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); 699 bar1_index.s.ca = 1; /* Not Cached */ 700 bar1_index.s.end_swp = 1; /* Endian Swap mode */ 701 bar1_index.s.addr_v = 1; /* Valid entry */ 702 703 base = pcie_port ? 16 : 0; 704 705 /* Big endian swizzle for 32-bit PEXP_NCB register. */ 706#ifdef __MIPSEB__ 707 addr_swizzle = 4; 708#else 709 addr_swizzle = 0; 710#endif 711 for (i = 0; i < 16; i++) { 712 cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32); 713 base++; 714 /* 256MB / 16 >> 22 == 4 */ 715 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); 716 } 717 718 /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence 719 where they overlap. It also overlaps with the device addresses, so 720 make sure the peer to peer forwarding is set right */ 721 cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0); 722 723 /* Setup BAR2 attributes */ 724 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */ 725 /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */ 726 /* � WAIT_COM=0 will likely work for all applications. */ 727 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */ 728 if (pcie_port) 729 { 730 cvmx_npei_ctl_port1_t npei_ctl_port; 731 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1); 732 npei_ctl_port.s.bar2_enb = 1; 733 npei_ctl_port.s.bar2_esx = 1; 734 npei_ctl_port.s.bar2_cax = 0; 735 npei_ctl_port.s.ptlp_ro = 1; 736 npei_ctl_port.s.ctlp_ro = 1; 737 npei_ctl_port.s.wait_com = 0; 738 npei_ctl_port.s.waitl_com = 0; 739 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64); 740 } 741 else 742 { 743 cvmx_npei_ctl_port0_t npei_ctl_port; 744 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0); 745 npei_ctl_port.s.bar2_enb = 1; 746 npei_ctl_port.s.bar2_esx = 1; 747 npei_ctl_port.s.bar2_cax = 0; 748 npei_ctl_port.s.ptlp_ro = 1; 749 npei_ctl_port.s.ctlp_ro = 1; 750 npei_ctl_port.s.wait_com = 0; 751 npei_ctl_port.s.waitl_com = 0; 752 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64); 753 } 754 755 /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes 756 TLP ordering to not be preserved after multiple PCIe port resets. This 757 code detects this fault and corrects it by aligning the TLP counters 758 properly. Another link reset is then performed. See PCIE-13340 */ 759 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 760 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 761 { 762 cvmx_npei_dbg_data_t dbg_data; 763 int old_in_fif_p_count; 764 int in_fif_p_count; 765 int out_p_count; 766 int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1; 767 int i; 768 769 /* Choose a write address of 1MB. It should be harmless as all bars 770 haven't been setup */ 771 uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63); 772 773 /* Make sure at least in_p_offset have been executed before we try and 774 read in_fif_p_count */ 775 i = in_p_offset; 776 while (i--) 777 { 778 cvmx_write64_uint32(write_address, 0); 779 cvmx_wait(10000); 780 } 781 782 /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be 783 unstable sometimes so read it twice with a write between the reads. 784 This way we can tell the value is good as it will increment by one 785 due to the write */ 786 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc); 787 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); 788 do 789 { 790 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 791 old_in_fif_p_count = dbg_data.s.data & 0xff; 792 cvmx_write64_uint32(write_address, 0); 793 cvmx_wait(10000); 794 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 795 in_fif_p_count = dbg_data.s.data & 0xff; 796 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff)); 797 798 /* Update in_fif_p_count for it's offset with respect to out_p_count */ 799 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff; 800 801 /* Read the OUT_P_COUNT from the debug select */ 802 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f); 803 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT); 804 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 805 out_p_count = (dbg_data.s.data>>1) & 0xff; 806 807 /* Check that the two counters are aligned */ 808 if (out_p_count != in_fif_p_count) 809 { 810 cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port); 811 while (in_fif_p_count != 0) 812 { 813 cvmx_write64_uint32(write_address, 0); 814 cvmx_wait(10000); 815 in_fif_p_count = (in_fif_p_count + 1) & 0xff; 816 } 817 /* The EBH5200 board swapped the PCIe reset lines on the board. This 818 means we must bring both links down and up, which will cause the 819 PCIe0 to need alignment again. Lots of messages will be displayed, 820 but everything should work */ 821 if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) && 822 (pcie_port == 1)) 823 cvmx_pcie_rc_initialize(0); 824 /* Rety bringing this port up */ 825 goto retry; 826 } 827 } 828 829 /* Display the link status */ 830 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 831 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw); 832 833 return 0; 834} 835 836/** 837 * @INTERNAL 838 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe 839 * port from reset to a link up state. Software can then begin 840 * configuring the rest of the link. 841 * 842 * @param pcie_port PCIe port to initialize 843 * 844 * @return Zero on success 845 */ 846static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port) 847{ 848 uint64_t start_cycle; 849 cvmx_pemx_ctl_status_t pem_ctl_status; 850 cvmx_pciercx_cfg032_t pciercx_cfg032; 851 cvmx_pciercx_cfg448_t pciercx_cfg448; 852 853 /* Bring up the link */ 854 pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); 855 pem_ctl_status.s.lnk_enb = 1; 856 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64); 857 858 /* Wait for the link to come up */ 859 start_cycle = cvmx_get_cycle(); 860 do 861 { 862 if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE)) 863 return -1; 864 cvmx_wait(10000); 865 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 866 } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1)); 867 868 /* Update the Replay Time Limit. Empirically, some PCIe devices take a 869 little longer to respond than expected under load. As a workaround for 870 this we configure the Replay Time Limit to the value expected for a 512 871 byte MPS instead of our actual 256 byte MPS. The numbers below are 872 directly from the PCIe spec table 3-4 */ 873 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); 874 switch (pciercx_cfg032.s.nlw) 875 { 876 case 1: /* 1 lane */ 877 pciercx_cfg448.s.rtl = 1677; 878 break; 879 case 2: /* 2 lanes */ 880 pciercx_cfg448.s.rtl = 867; 881 break; 882 case 4: /* 4 lanes */ 883 pciercx_cfg448.s.rtl = 462; 884 break; 885 case 8: /* 8 lanes */ 886 pciercx_cfg448.s.rtl = 258; 887 break; 888 } 889 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32); 890 891 return 0; 892} 893 894 895/** 896 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate 897 * the bus. 898 * 899 * @param pcie_port PCIe port to initialize 900 * 901 * @return Zero on success 902 */ 903static int __cvmx_pcie_rc_initialize_gen2(int pcie_port) 904{ 905 int i; 906 cvmx_ciu_soft_prst_t ciu_soft_prst; 907 cvmx_mio_rst_ctlx_t mio_rst_ctl; 908 cvmx_pemx_bar_ctl_t pemx_bar_ctl; 909 cvmx_pemx_ctl_status_t pemx_ctl_status; 910 cvmx_pemx_bist_status_t pemx_bist_status; 911 cvmx_pemx_bist_status2_t pemx_bist_status2; 912 cvmx_pciercx_cfg032_t pciercx_cfg032; 913 cvmx_pciercx_cfg515_t pciercx_cfg515; 914 cvmx_sli_ctl_portx_t sli_ctl_portx; 915 cvmx_sli_mem_access_ctl_t sli_mem_access_ctl; 916 cvmx_sli_mem_access_subidx_t mem_access_subid; 917 cvmx_pemx_bar1_indexx_t bar1_index; 918 int ep_mode; 919 920 /* Make sure this interface is PCIe */ 921 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) 922 { 923 /* Requires reading the MIO_QLMX_CFG register to figure 924 out the port type. */ 925 int qlm = pcie_port; 926 int status; 927 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 928 qlm = 3 - (pcie_port * 2); 929 else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) 930 { 931 cvmx_mio_qlmx_cfg_t qlm_cfg; 932 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 933 if (qlm_cfg.s.qlm_cfg == 1) 934 qlm = 1; 935 } 936 /* PCIe is allowed only in QLM1, 1 PCIe port in x2 or 937 2 PCIe ports in x1 */ 938 else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) 939 qlm = 1; 940 status = cvmx_qlm_get_status(qlm); 941 if (status == 4 || status == 5) 942 { 943 cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port); 944 return -1; 945 } 946 if (status == 1) 947 { 948 cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port); 949 return -1; 950 } 951 if (status == 2) 952 { 953 cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port); 954 return -1; 955 } 956 if (status == -1) 957 { 958 cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port); 959 return -1; 960 } 961 } 962 963#if 0 964 /* This code is so that the PCIe analyzer is able to see 63XX traffic */ 965 cvmx_dprintf("PCIE : init for pcie analyzer.\n"); 966 cvmx_helper_qlm_jtag_init(); 967 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); 968 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); 969 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); 970 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); 971 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); 972 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); 973 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); 974 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); 975 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); 976 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85); 977 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1); 978 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86); 979 cvmx_helper_qlm_jtag_update(pcie_port); 980#endif 981 982 /* Make sure we aren't trying to setup a target mode interface in host mode */ 983 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); 984 ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode)); 985 if (ep_mode) 986 { 987 cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port); 988 return -1; 989 } 990 991 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */ 992 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) 993 { 994 if (pcie_port) 995 { 996 cvmx_ciu_qlm1_t ciu_qlm; 997 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1); 998 ciu_qlm.s.txbypass = 1; 999 ciu_qlm.s.txdeemph = 5; 1000 ciu_qlm.s.txmargin = 0x17; 1001 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64); 1002 } 1003 else 1004 { 1005 cvmx_ciu_qlm0_t ciu_qlm; 1006 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0); 1007 ciu_qlm.s.txbypass = 1; 1008 ciu_qlm.s.txdeemph = 5; 1009 ciu_qlm.s.txmargin = 0x17; 1010 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64); 1011 } 1012 } 1013 /* Bring the PCIe out of reset */ 1014 if (pcie_port) 1015 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 1016 else 1017 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 1018 /* After a chip reset the PCIe will also be in reset. If it isn't, 1019 most likely someone is trying to init it again without a proper 1020 PCIe reset */ 1021 if (ciu_soft_prst.s.soft_prst == 0) 1022 { 1023 /* Reset the port */ 1024 ciu_soft_prst.s.soft_prst = 1; 1025 if (pcie_port) 1026 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 1027 else 1028 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 1029 /* Wait until pcie resets the ports. */ 1030 cvmx_wait_usec(2000); 1031 } 1032 if (pcie_port) 1033 { 1034 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 1035 ciu_soft_prst.s.soft_prst = 0; 1036 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 1037 } 1038 else 1039 { 1040 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 1041 ciu_soft_prst.s.soft_prst = 0; 1042 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 1043 } 1044 1045 /* Wait for PCIe reset to complete */ 1046 cvmx_wait_usec(1000); 1047 1048 /* Check and make sure PCIe came out of reset. If it doesn't the board 1049 probably hasn't wired the clocks up and the interface should be 1050 skipped */ 1051 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000)) 1052 { 1053 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port); 1054 return -1; 1055 } 1056 1057 /* Check BIST status */ 1058 pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port)); 1059 if (pemx_bist_status.u64) 1060 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64)); 1061 pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port)); 1062 /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */ 1063 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) 1064 pemx_bist_status2.u64 &= ~0x3full; 1065 if (pemx_bist_status2.u64) 1066 cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64)); 1067 1068 /* Initialize the config space CSRs */ 1069 __cvmx_pcie_rc_initialize_config_space(pcie_port); 1070 1071 /* Enable gen2 speed selection */ 1072 pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port)); 1073 pciercx_cfg515.s.dsc = 1; 1074 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32); 1075 1076 /* Bring the link up */ 1077 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) 1078 { 1079 /* Some gen1 devices don't handle the gen 2 training correctly. Disable 1080 gen2 and try again with only gen1 */ 1081 cvmx_pciercx_cfg031_t pciercx_cfg031; 1082 pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port)); 1083 pciercx_cfg031.s.mls = 1; 1084 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32); 1085 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) 1086 { 1087 cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port); 1088 return -1; 1089 } 1090 } 1091 1092 /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ 1093 sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL); 1094 sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */ 1095 sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */ 1096 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64); 1097 1098 /* Setup Mem access SubDIDs */ 1099 mem_access_subid.u64 = 0; 1100 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1101 mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */ 1102 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ 1103 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */ 1104 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1105 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1106 /* PCIe Adddress Bits <63:34>. */ 1107 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1108 mem_access_subid.cn68xx.ba = 0; 1109 else 1110 mem_access_subid.cn63xx.ba = 0; 1111 1112 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */ 1113 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++) 1114 { 1115 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64); 1116 /* Set each SUBID to extend the addressable range */ 1117 __cvmx_increment_ba(&mem_access_subid); 1118 } 1119 1120 if (!OCTEON_IS_MODEL(OCTEON_CN61XX)) 1121 { 1122 /* Disable the peer to peer forwarding register. This must be setup 1123 by the OS after it enumerates the bus and assigns addresses to the 1124 PCIe busses */ 1125 for (i=0; i<4; i++) 1126 { 1127 cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1); 1128 cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1); 1129 } 1130 } 1131 1132 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ 1133 cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0); 1134 1135 /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence 1136 where they overlap. It also overlaps with the device addresses, so 1137 make sure the peer to peer forwarding is set right */ 1138 cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0); 1139 1140 /* Setup BAR2 attributes */ 1141 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */ 1142 /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */ 1143 /* � WAIT_COM=0 will likely work for all applications. */ 1144 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */ 1145 pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port)); 1146 pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/ 1147 pemx_bar_ctl.s.bar2_enb = 1; 1148 pemx_bar_ctl.s.bar2_esx = 1; 1149 pemx_bar_ctl.s.bar2_cax = 0; 1150 cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64); 1151 sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port)); 1152 sli_ctl_portx.s.ptlp_ro = 1; 1153 sli_ctl_portx.s.ctlp_ro = 1; 1154 sli_ctl_portx.s.wait_com = 0; 1155 sli_ctl_portx.s.waitl_com = 0; 1156 cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64); 1157 1158 /* BAR1 follows BAR2 */ 1159 cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE); 1160 1161 bar1_index.u64 = 0; 1162 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22); 1163 bar1_index.s.ca = 1; /* Not Cached */ 1164 bar1_index.s.end_swp = 1; /* Endian Swap mode */ 1165 bar1_index.s.addr_v = 1; /* Valid entry */ 1166 1167 for (i = 0; i < 16; i++) { 1168 cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64); 1169 /* 256MB / 16 >> 22 == 4 */ 1170 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22); 1171 } 1172 1173 /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES 1174 clock */ 1175 pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port)); 1176 pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000; 1177 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64); 1178 1179 /* Display the link status */ 1180 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); 1181 cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls); 1182 1183 return 0; 1184} 1185 1186/** 1187 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus. 1188 * 1189 * @param pcie_port PCIe port to initialize 1190 * 1191 * @return Zero on success 1192 */ 1193int cvmx_pcie_rc_initialize(int pcie_port) 1194{ 1195 int result; 1196 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1197 result = __cvmx_pcie_rc_initialize_gen1(pcie_port); 1198 else 1199 result = __cvmx_pcie_rc_initialize_gen2(pcie_port); 1200#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL) 1201 if (result == 0) 1202 cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port); 1203#endif 1204 return result; 1205} 1206 1207 1208/** 1209 * Shutdown a PCIe port and put it in reset 1210 * 1211 * @param pcie_port PCIe port to shutdown 1212 * 1213 * @return Zero on success 1214 */ 1215int cvmx_pcie_rc_shutdown(int pcie_port) 1216{ 1217#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL) 1218 cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port); 1219#endif 1220 /* Wait for all pending operations to complete */ 1221 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1222 { 1223 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000)) 1224 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port); 1225 } 1226 else 1227 { 1228 if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000)) 1229 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port); 1230 } 1231 1232 /* Force reset */ 1233 if (pcie_port) 1234 { 1235 cvmx_ciu_soft_prst_t ciu_soft_prst; 1236 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); 1237 ciu_soft_prst.s.soft_prst = 1; 1238 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); 1239 } 1240 else 1241 { 1242 cvmx_ciu_soft_prst_t ciu_soft_prst; 1243 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); 1244 ciu_soft_prst.s.soft_prst = 1; 1245 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); 1246 } 1247 return 0; 1248} 1249 1250 1251/** 1252 * @INTERNAL 1253 * Build a PCIe config space request address for a device 1254 * 1255 * @param pcie_port PCIe port to access 1256 * @param bus Sub bus 1257 * @param dev Device ID 1258 * @param fn Device sub function 1259 * @param reg Register to access 1260 * 1261 * @return 64bit Octeon IO address 1262 */ 1263static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg) 1264{ 1265 cvmx_pcie_address_t pcie_addr; 1266 cvmx_pciercx_cfg006_t pciercx_cfg006; 1267 1268 pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); 1269 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) 1270 return 0; 1271 1272 pcie_addr.u64 = 0; 1273 pcie_addr.config.upper = 2; 1274 pcie_addr.config.io = 1; 1275 pcie_addr.config.did = 3; 1276 pcie_addr.config.subdid = 1; 1277 pcie_addr.config.es = 1; 1278 pcie_addr.config.port = pcie_port; 1279 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum); 1280 pcie_addr.config.bus = bus; 1281 pcie_addr.config.dev = dev; 1282 pcie_addr.config.func = fn; 1283 pcie_addr.config.reg = reg; 1284 return pcie_addr.u64; 1285} 1286 1287 1288/** 1289 * Read 8bits from a Device's config space 1290 * 1291 * @param pcie_port PCIe port the device is on 1292 * @param bus Sub bus 1293 * @param dev Device ID 1294 * @param fn Device sub function 1295 * @param reg Register to access 1296 * 1297 * @return Result of the read 1298 */ 1299uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg) 1300{ 1301 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1302 if (address) 1303 return cvmx_read64_uint8(address); 1304 else 1305 return 0xff; 1306} 1307 1308 1309/** 1310 * Read 16bits from a Device's config space 1311 * 1312 * @param pcie_port PCIe port the device is on 1313 * @param bus Sub bus 1314 * @param dev Device ID 1315 * @param fn Device sub function 1316 * @param reg Register to access 1317 * 1318 * @return Result of the read 1319 */ 1320uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg) 1321{ 1322 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1323 if (address) 1324 return cvmx_le16_to_cpu(cvmx_read64_uint16(address)); 1325 else 1326 return 0xffff; 1327} 1328 1329 1330/** 1331 * Read 32bits from a Device's config space 1332 * 1333 * @param pcie_port PCIe port the device is on 1334 * @param bus Sub bus 1335 * @param dev Device ID 1336 * @param fn Device sub function 1337 * @param reg Register to access 1338 * 1339 * @return Result of the read 1340 */ 1341uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg) 1342{ 1343 uint64_t address; 1344 1345 address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1346 if (address) 1347 return cvmx_le32_to_cpu(cvmx_read64_uint32(address)); 1348 else 1349 return 0xffffffff; 1350} 1351 1352 1353/** 1354 * Write 8bits to a Device's config space 1355 * 1356 * @param pcie_port PCIe port the device is on 1357 * @param bus Sub bus 1358 * @param dev Device ID 1359 * @param fn Device sub function 1360 * @param reg Register to access 1361 * @param val Value to write 1362 */ 1363void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val) 1364{ 1365 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1366 if (address) 1367 cvmx_write64_uint8(address, val); 1368} 1369 1370 1371/** 1372 * Write 16bits to a Device's config space 1373 * 1374 * @param pcie_port PCIe port the device is on 1375 * @param bus Sub bus 1376 * @param dev Device ID 1377 * @param fn Device sub function 1378 * @param reg Register to access 1379 * @param val Value to write 1380 */ 1381void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val) 1382{ 1383 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1384 if (address) 1385 cvmx_write64_uint16(address, cvmx_cpu_to_le16(val)); 1386} 1387 1388 1389/** 1390 * Write 32bits to a Device's config space 1391 * 1392 * @param pcie_port PCIe port the device is on 1393 * @param bus Sub bus 1394 * @param dev Device ID 1395 * @param fn Device sub function 1396 * @param reg Register to access 1397 * @param val Value to write 1398 */ 1399void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val) 1400{ 1401 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); 1402 if (address) 1403 cvmx_write64_uint32(address, cvmx_cpu_to_le32(val)); 1404} 1405 1406 1407/** 1408 * Read a PCIe config space register indirectly. This is used for 1409 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. 1410 * 1411 * @param pcie_port PCIe port to read from 1412 * @param cfg_offset Address to read 1413 * 1414 * @return Value read 1415 */ 1416uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) 1417{ 1418 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1419 { 1420 cvmx_pescx_cfg_rd_t pescx_cfg_rd; 1421 pescx_cfg_rd.u64 = 0; 1422 pescx_cfg_rd.s.addr = cfg_offset; 1423 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); 1424 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); 1425 return pescx_cfg_rd.s.data; 1426 } 1427 else 1428 { 1429 cvmx_pemx_cfg_rd_t pemx_cfg_rd; 1430 pemx_cfg_rd.u64 = 0; 1431 pemx_cfg_rd.s.addr = cfg_offset; 1432 cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64); 1433 pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port)); 1434 return pemx_cfg_rd.s.data; 1435 } 1436} 1437 1438 1439/** 1440 * Write a PCIe config space register indirectly. This is used for 1441 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. 1442 * 1443 * @param pcie_port PCIe port to write to 1444 * @param cfg_offset Address to write 1445 * @param val Value to write 1446 */ 1447void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val) 1448{ 1449 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1450 { 1451 cvmx_pescx_cfg_wr_t pescx_cfg_wr; 1452 pescx_cfg_wr.u64 = 0; 1453 pescx_cfg_wr.s.addr = cfg_offset; 1454 pescx_cfg_wr.s.data = val; 1455 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); 1456 } 1457 else 1458 { 1459 cvmx_pemx_cfg_wr_t pemx_cfg_wr; 1460 pemx_cfg_wr.u64 = 0; 1461 pemx_cfg_wr.s.addr = cfg_offset; 1462 pemx_cfg_wr.s.data = val; 1463 cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64); 1464 } 1465} 1466 1467 1468/** 1469 * Initialize a PCIe port for use in target(EP) mode. 1470 * 1471 * @param pcie_port PCIe port to initialize 1472 * 1473 * @return Zero on success 1474 */ 1475int cvmx_pcie_ep_initialize(int pcie_port) 1476{ 1477 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1478 { 1479 cvmx_npei_ctl_status_t npei_ctl_status; 1480 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); 1481 if (npei_ctl_status.s.host_mode) 1482 return -1; 1483 } 1484 else 1485 { 1486 cvmx_mio_rst_ctlx_t mio_rst_ctl; 1487 int ep_mode; 1488 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port)); 1489 ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode); 1490 if (ep_mode) 1491 return -1; 1492 } 1493 1494 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */ 1495 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) 1496 { 1497 if (pcie_port) 1498 { 1499 cvmx_ciu_qlm1_t ciu_qlm; 1500 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1); 1501 ciu_qlm.s.txbypass = 1; 1502 ciu_qlm.s.txdeemph = 5; 1503 ciu_qlm.s.txmargin = 0x17; 1504 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64); 1505 } 1506 else 1507 { 1508 cvmx_ciu_qlm0_t ciu_qlm; 1509 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0); 1510 ciu_qlm.s.txbypass = 1; 1511 ciu_qlm.s.txdeemph = 5; 1512 ciu_qlm.s.txmargin = 0x17; 1513 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64); 1514 } 1515 } 1516 1517 /* Enable bus master and memory */ 1518 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6); 1519 1520 /* Max Payload Size (PCIE*_CFG030[MPS]) */ 1521 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ 1522 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ 1523 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ 1524 { 1525 cvmx_pcieepx_cfg030_t pcieepx_cfg030; 1526 pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port)); 1527 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) 1528 { 1529 pcieepx_cfg030.s.mps = MPS_CN5XXX; 1530 pcieepx_cfg030.s.mrrs = MRRS_CN5XXX; 1531 } 1532 else 1533 { 1534 pcieepx_cfg030.s.mps = MPS_CN6XXX; 1535 pcieepx_cfg030.s.mrrs = MRRS_CN6XXX; 1536 } 1537 pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */ 1538 pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */ 1539 pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */ 1540 pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */ 1541 pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */ 1542 pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */ 1543 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32); 1544 } 1545 1546 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1547 { 1548 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */ 1549 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 1550 cvmx_npei_ctl_status2_t npei_ctl_status2; 1551 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); 1552 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */ 1553 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */ 1554 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); 1555 } 1556 else 1557 { 1558 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */ 1559 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */ 1560 cvmx_dpi_sli_prtx_cfg_t prt_cfg; 1561 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl; 1562 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port)); 1563 prt_cfg.s.mps = MPS_CN6XXX; 1564 prt_cfg.s.mrrs = MRRS_CN6XXX; 1565 /* Max outstanding load request. */ 1566 prt_cfg.s.molr = 32; 1567 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64); 1568 1569 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port)); 1570 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX; 1571 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64); 1572 } 1573 1574 /* Setup Mem access SubDID 12 to access Host memory */ 1575 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1576 { 1577 cvmx_npei_mem_access_subidx_t mem_access_subid; 1578 mem_access_subid.u64 = 0; 1579 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1580 mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */ 1581 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */ 1582 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */ 1583 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */ 1584 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */ 1585 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */ 1586 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */ 1587 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */ 1588 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64); 1589 } 1590 else 1591 { 1592 cvmx_sli_mem_access_subidx_t mem_access_subid; 1593 mem_access_subid.u64 = 0; 1594 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */ 1595 mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */ 1596 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */ 1597 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */ 1598 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1599 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */ 1600 /* PCIe Adddress Bits <63:34>. */ 1601 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1602 mem_access_subid.cn68xx.ba = 0; 1603 else 1604 mem_access_subid.cn63xx.ba = 0; 1605 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64); 1606 } 1607 return 0; 1608} 1609 1610 1611/** 1612 * Wait for posted PCIe read/writes to reach the other side of 1613 * the internal PCIe switch. This will insure that core 1614 * read/writes are posted before anything after this function 1615 * is called. This may be necessary when writing to memory that 1616 * will later be read using the DMA/PKT engines. 1617 * 1618 * @param pcie_port PCIe port to wait for 1619 */ 1620void cvmx_pcie_wait_for_pending(int pcie_port) 1621{ 1622 if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 1623 { 1624 cvmx_npei_data_out_cnt_t npei_data_out_cnt; 1625 int a; 1626 int b; 1627 int c; 1628 1629 /* See section 9.8, PCIe Core-initiated Requests, in the manual for a 1630 description of how this code works */ 1631 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT); 1632 if (pcie_port) 1633 { 1634 if (!npei_data_out_cnt.s.p1_fcnt) 1635 return; 1636 a = npei_data_out_cnt.s.p1_ucnt; 1637 b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff; 1638 } 1639 else 1640 { 1641 if (!npei_data_out_cnt.s.p0_fcnt) 1642 return; 1643 a = npei_data_out_cnt.s.p0_ucnt; 1644 b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff; 1645 } 1646 1647 while (1) 1648 { 1649 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT); 1650 c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt; 1651 if (a<=b) 1652 { 1653 if ((c<a) || (c>b)) 1654 return; 1655 } 1656 else 1657 { 1658 if ((c>b) && (c<a)) 1659 return; 1660 } 1661 } 1662 } 1663 else 1664 { 1665 cvmx_sli_data_out_cnt_t sli_data_out_cnt; 1666 int a; 1667 int b; 1668 int c; 1669 1670 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT); 1671 if (pcie_port) 1672 { 1673 if (!sli_data_out_cnt.s.p1_fcnt) 1674 return; 1675 a = sli_data_out_cnt.s.p1_ucnt; 1676 b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff; 1677 } 1678 else 1679 { 1680 if (!sli_data_out_cnt.s.p0_fcnt) 1681 return; 1682 a = sli_data_out_cnt.s.p0_ucnt; 1683 b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff; 1684 } 1685 1686 while (1) 1687 { 1688 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT); 1689 c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt; 1690 if (a<=b) 1691 { 1692 if ((c<a) || (c>b)) 1693 return; 1694 } 1695 else 1696 { 1697 if ((c>b) && (c<a)) 1698 return; 1699 } 1700 } 1701 } 1702} 1703