cvmx-pcie.c revision 230040
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Interface to PCIe as a host(RC) or target(EP)
50 *
51 * <hr>$Revision: 52004 $<hr>
52 */
53#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54#include <asm/octeon/cvmx.h>
55#include <asm/octeon/cvmx-config.h>
56#include <asm/octeon/cvmx-clock.h>
57#include <asm/octeon/cvmx-ciu-defs.h>
58#include <asm/octeon/cvmx-dpi-defs.h>
59#include <asm/octeon/cvmx-npi-defs.h>
60#include <asm/octeon/cvmx-npei-defs.h>
61#include <asm/octeon/cvmx-pci-defs.h>
62#include <asm/octeon/cvmx-pcieepx-defs.h>
63#include <asm/octeon/cvmx-pciercx-defs.h>
64#include <asm/octeon/cvmx-pemx-defs.h>
65#include <asm/octeon/cvmx-pexp-defs.h>
66#include <asm/octeon/cvmx-pescx-defs.h>
67#include <asm/octeon/cvmx-sli-defs.h>
68#include <asm/octeon/cvmx-sriox-defs.h>
69
70#ifdef CONFIG_CAVIUM_DECODE_RSL
71#include <asm/octeon/cvmx-error.h>
72#endif
73#include <asm/octeon/cvmx-helper.h>
74#include <asm/octeon/cvmx-helper-board.h>
75#include <asm/octeon/cvmx-helper-errata.h>
76#include <asm/octeon/cvmx-pcie.h>
77#include <asm/octeon/cvmx-sysinfo.h>
78#include <asm/octeon/cvmx-swap.h>
79#include <asm/octeon/cvmx-wqe.h>
80#else
81#include "cvmx.h"
82#include "cvmx-csr-db.h"
83#include "cvmx-pcie.h"
84#include "cvmx-sysinfo.h"
85#include "cvmx-swap.h"
86#include "cvmx-wqe.h"
87#include "cvmx-error.h"
88#include "cvmx-helper-errata.h"
89#endif
90
91#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
92#define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
93#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
94#define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
95
96/**
97 * Return the Core virtual base address for PCIe IO access. IOs are
98 * read/written as an offset from this address.
99 *
100 * @param pcie_port PCIe port the IO is for
101 *
102 * @return 64bit Octeon IO base address for read/write
103 */
104uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
105{
106    cvmx_pcie_address_t pcie_addr;
107    pcie_addr.u64 = 0;
108    pcie_addr.io.upper = 0;
109    pcie_addr.io.io = 1;
110    pcie_addr.io.did = 3;
111    pcie_addr.io.subdid = 2;
112    pcie_addr.io.es = 1;
113    pcie_addr.io.port = pcie_port;
114    return pcie_addr.u64;
115}
116
117
118/**
119 * Size of the IO address region returned at address
120 * cvmx_pcie_get_io_base_address()
121 *
122 * @param pcie_port PCIe port the IO is for
123 *
124 * @return Size of the IO window
125 */
126uint64_t cvmx_pcie_get_io_size(int pcie_port)
127{
128    return 1ull<<32;
129}
130
131
132/**
133 * Return the Core virtual base address for PCIe MEM access. Memory is
134 * read/written as an offset from this address.
135 *
136 * @param pcie_port PCIe port the IO is for
137 *
138 * @return 64bit Octeon IO base address for read/write
139 */
140uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
141{
142    cvmx_pcie_address_t pcie_addr;
143    pcie_addr.u64 = 0;
144    pcie_addr.mem.upper = 0;
145    pcie_addr.mem.io = 1;
146    pcie_addr.mem.did = 3;
147    pcie_addr.mem.subdid = 3 + pcie_port;
148    return pcie_addr.u64;
149}
150
151
152/**
153 * Size of the Mem address region returned at address
154 * cvmx_pcie_get_mem_base_address()
155 *
156 * @param pcie_port PCIe port the IO is for
157 *
158 * @return Size of the Mem window
159 */
160uint64_t cvmx_pcie_get_mem_size(int pcie_port)
161{
162    return 1ull<<36;
163}
164
165
166/**
167 * @INTERNAL
168 * Initialize the RC config space CSRs
169 *
170 * @param pcie_port PCIe port to initialize
171 */
172static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
173{
174    /* Max Payload Size (PCIE*_CFG030[MPS]) */
175    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
176    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
177    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
178    {
179        cvmx_pciercx_cfg030_t pciercx_cfg030;
180        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
181        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
182        {
183            pciercx_cfg030.s.mps = MPS_CN5XXX;
184            pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
185        }
186        else
187        {
188            pciercx_cfg030.s.mps = MPS_CN6XXX;
189            pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
190        }
191        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
192        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
193        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
194        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
195        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
196        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
197        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
198    }
199
200    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
201    {
202        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
203        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
204        cvmx_npei_ctl_status2_t npei_ctl_status2;
205        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
206        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
207        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
208        if (pcie_port)
209            npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
210        else
211            npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
212
213        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
214    }
215    else
216    {
217        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
218        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
219        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
220        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
221        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
222        prt_cfg.s.mps = MPS_CN6XXX;
223        prt_cfg.s.mrrs = MRRS_CN6XXX;
224        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
225
226        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
227        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
228        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
229    }
230
231    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
232    {
233        cvmx_pciercx_cfg070_t pciercx_cfg070;
234        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
235        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
236        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
237        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
238    }
239
240    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
241        /* ME and MSAE should always be set. */
242    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
243    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
244    {
245        cvmx_pciercx_cfg001_t pciercx_cfg001;
246        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
247        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
248        pciercx_cfg001.s.me = 1; /* Bus master enable. */
249        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
250        pciercx_cfg001.s.see = 1; /* SERR# enable */
251        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
252    }
253
254
255    /* Advanced Error Recovery Message Enables */
256    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
257    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
258    /* Use CVMX_PCIERCX_CFG067 hardware default */
259    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
260
261
262    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
263    {
264        cvmx_pciercx_cfg032_t pciercx_cfg032;
265        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
266        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
267        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
268    }
269
270    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
271    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
272    {
273        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
274        cvmx_pciercx_cfg006_t pciercx_cfg006;
275        pciercx_cfg006.u32 = 0;
276        pciercx_cfg006.s.pbnum = 1;
277        pciercx_cfg006.s.sbnum = 1;
278        pciercx_cfg006.s.subbnum = 1;
279        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
280    }
281
282    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
283    /* Most applications should disable the memory-mapped I/O BAR by */
284    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
285    {
286        cvmx_pciercx_cfg008_t pciercx_cfg008;
287        pciercx_cfg008.u32 = 0;
288        pciercx_cfg008.s.mb_addr = 0x100;
289        pciercx_cfg008.s.ml_addr = 0;
290        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
291    }
292
293    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
294    /* Most applications should disable the prefetchable BAR by setting */
295    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
296    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
297    {
298        cvmx_pciercx_cfg009_t pciercx_cfg009;
299        cvmx_pciercx_cfg010_t pciercx_cfg010;
300        cvmx_pciercx_cfg011_t pciercx_cfg011;
301        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
302        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
303        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
304        pciercx_cfg009.s.lmem_base = 0x100;
305        pciercx_cfg009.s.lmem_limit = 0;
306        pciercx_cfg010.s.umem_base = 0x100;
307        pciercx_cfg011.s.umem_limit = 0;
308        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
309        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
310        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
311    }
312
313    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
314    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
315    {
316        cvmx_pciercx_cfg035_t pciercx_cfg035;
317        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
318        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
319        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
320        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
321        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
322        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
323    }
324
325    /* Advanced Error Recovery Interrupt Enables */
326    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
327    {
328        cvmx_pciercx_cfg075_t pciercx_cfg075;
329        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
330        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
331        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
332        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
333        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
334    }
335
336    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
337    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
338    {
339        cvmx_pciercx_cfg034_t pciercx_cfg034;
340        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
341        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
342        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
343        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
344        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
345    }
346}
347
348/**
349 * @INTERNAL
350 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
351 * port from reset to a link up state. Software can then begin
352 * configuring the rest of the link.
353 *
354 * @param pcie_port PCIe port to initialize
355 *
356 * @return Zero on success
357 */
358static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
359{
360    uint64_t start_cycle;
361    cvmx_pescx_ctl_status_t pescx_ctl_status;
362    cvmx_pciercx_cfg452_t pciercx_cfg452;
363    cvmx_pciercx_cfg032_t pciercx_cfg032;
364    cvmx_pciercx_cfg448_t pciercx_cfg448;
365
366    /* Set the lane width */
367    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
368    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
369    if (pescx_ctl_status.s.qlm_cfg == 0)
370    {
371        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
372        pciercx_cfg452.s.lme = 0xf;
373    }
374    else
375    {
376        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
377        pciercx_cfg452.s.lme = 0x7;
378    }
379    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
380
381    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
382        cause bus errors on 64bit memory reads. Turning off length error
383        checking fixes this */
384    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
385    {
386        cvmx_pciercx_cfg455_t pciercx_cfg455;
387        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
388        pciercx_cfg455.s.m_cpl_len_err = 1;
389        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
390    }
391
392    /* Lane swap needs to be manually enabled for CN52XX */
393    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
394    {
395      switch (cvmx_sysinfo_get()->board_type)
396      {
397#if defined(OCTEON_VENDOR_LANNER)
398	case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
399	  break;
400#endif
401	default:
402	  pescx_ctl_status.s.lane_swp = 1;
403	  break;
404      }
405      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
406    }
407
408    /* Bring up the link */
409    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
410    pescx_ctl_status.s.lnk_enb = 1;
411    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
412
413    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
414    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
415        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
416
417    /* Wait for the link to come up */
418    start_cycle = cvmx_get_cycle();
419    do
420    {
421        if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
422        {
423            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
424            return -1;
425        }
426        cvmx_wait(10000);
427        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
428    } while (pciercx_cfg032.s.dlla == 0);
429
430    /* Clear all pending errors */
431    cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
432
433    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
434        little longer to respond than expected under load. As a workaround for
435        this we configure the Replay Time Limit to the value expected for a 512
436        byte MPS instead of our actual 256 byte MPS. The numbers below are
437        directly from the PCIe spec table 3-4 */
438    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
439    switch (pciercx_cfg032.s.nlw)
440    {
441        case 1: /* 1 lane */
442            pciercx_cfg448.s.rtl = 1677;
443            break;
444        case 2: /* 2 lanes */
445            pciercx_cfg448.s.rtl = 867;
446            break;
447        case 4: /* 4 lanes */
448            pciercx_cfg448.s.rtl = 462;
449            break;
450        case 8: /* 8 lanes */
451            pciercx_cfg448.s.rtl = 258;
452            break;
453    }
454    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
455
456    return 0;
457}
458
459
460/**
461 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
462 * the bus.
463 *
464 * @param pcie_port PCIe port to initialize
465 *
466 * @return Zero on success
467 */
468static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
469{
470    int i;
471    int base;
472    uint64_t addr_swizzle;
473    cvmx_ciu_soft_prst_t ciu_soft_prst;
474    cvmx_pescx_bist_status_t pescx_bist_status;
475    cvmx_pescx_bist_status2_t pescx_bist_status2;
476    cvmx_npei_ctl_status_t npei_ctl_status;
477    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
478    cvmx_npei_mem_access_subidx_t mem_access_subid;
479    cvmx_npei_dbg_data_t npei_dbg_data;
480    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
481    cvmx_pciercx_cfg032_t pciercx_cfg032;
482    cvmx_npei_bar1_indexx_t bar1_index;
483
484retry:
485    /* Make sure we aren't trying to setup a target mode interface in host mode */
486    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
487    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
488    {
489        cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
490        return -1;
491    }
492
493    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
494    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
495    {
496        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
497        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
498        {
499            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
500            return -1;
501        }
502    }
503
504    /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
505    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
506        cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
507        return -1;
508    }
509
510    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
511    npei_ctl_status.s.arb = 1;
512    /* Allow up to 0x20 config retries */
513    npei_ctl_status.s.cfg_rtry = 0x20;
514    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
515    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
516    {
517        npei_ctl_status.s.p0_ntags = 0x20;
518        npei_ctl_status.s.p1_ntags = 0x20;
519    }
520    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
521
522    /* Bring the PCIe out of reset */
523    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
524    {
525        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
526            workaround for this bug, we bring both PCIe ports out of reset at
527            the same time instead of on separate calls. So for port 0, we bring
528            both out of reset and do nothing on port 1 */
529        if (pcie_port == 0)
530        {
531            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
532            /* After a chip reset the PCIe will also be in reset. If it isn't,
533                most likely someone is trying to init it again without a proper
534                PCIe reset */
535            if (ciu_soft_prst.s.soft_prst == 0)
536            {
537		/* Reset the ports */
538		ciu_soft_prst.s.soft_prst = 1;
539		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
540		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
541		ciu_soft_prst.s.soft_prst = 1;
542		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
543		/* Wait until pcie resets the ports. */
544		cvmx_wait_usec(2000);
545            }
546            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
547            ciu_soft_prst.s.soft_prst = 0;
548            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
549            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
550            ciu_soft_prst.s.soft_prst = 0;
551            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
552        }
553    }
554    else
555    {
556        /* The normal case: The PCIe ports are completely separate and can be
557            brought out of reset independently */
558        if (pcie_port)
559            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
560        else
561            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
562        /* After a chip reset the PCIe will also be in reset. If it isn't,
563            most likely someone is trying to init it again without a proper
564            PCIe reset */
565        if (ciu_soft_prst.s.soft_prst == 0)
566        {
567	    /* Reset the port */
568	    ciu_soft_prst.s.soft_prst = 1;
569	    if (pcie_port)
570		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
571 	    else
572		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
573	    /* Wait until pcie resets the ports. */
574	    cvmx_wait_usec(2000);
575        }
576        if (pcie_port)
577        {
578            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
579            ciu_soft_prst.s.soft_prst = 0;
580            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
581        }
582        else
583        {
584            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
585            ciu_soft_prst.s.soft_prst = 0;
586            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
587        }
588    }
589
590    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
591       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
592    cvmx_wait(400000);
593
594    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
595        CN52XX, so we only probe it on newer chips */
596    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
597    {
598        /* Clear PCLK_RUN so we can check if the clock is running */
599        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
600        pescx_ctl_status2.s.pclk_run = 1;
601        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
602        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
603            us the clock is running */
604        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
605            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
606        {
607            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
608            return -1;
609        }
610    }
611
612    /* Check and make sure PCIe came out of reset. If it doesn't the board
613        probably hasn't wired the clocks up and the interface should be
614        skipped */
615    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
616    if (pescx_ctl_status2.s.pcierst)
617    {
618        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
619        return -1;
620    }
621
622    /* Check BIST2 status. If any bits are set skip this interface. This
623        is an attempt to catch PCIE-813 on pass 1 parts */
624    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
625    if (pescx_bist_status2.u64)
626    {
627        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
628        return -1;
629    }
630
631    /* Check BIST status */
632    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
633    if (pescx_bist_status.u64)
634        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
635
636    /* Initialize the config space CSRs */
637    __cvmx_pcie_rc_initialize_config_space(pcie_port);
638
639    /* Bring the link up */
640    if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
641    {
642        cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
643        return -1;
644    }
645
646    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
647    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
648    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
649    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
650    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
651
652    /* Setup Mem access SubDIDs */
653    mem_access_subid.u64 = 0;
654    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
655    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
656    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
657    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
658    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
659    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
660    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
661    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
662    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
663
664    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
665    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
666    {
667        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
668        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
669    }
670
671    /* Disable the peer to peer forwarding register. This must be setup
672        by the OS after it enumerates the bus and assigns addresses to the
673        PCIe busses */
674    for (i=0; i<4; i++)
675    {
676        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
677        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
678    }
679
680    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
681    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
682
683    /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
684    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
685
686    bar1_index.u32 = 0;
687    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
688    bar1_index.s.ca = 1;       /* Not Cached */
689    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
690    bar1_index.s.addr_v = 1;   /* Valid entry */
691
692    base = pcie_port ? 16 : 0;
693
694    /* Big endian swizzle for 32-bit PEXP_NCB register. */
695#ifdef __MIPSEB__
696    addr_swizzle = 4;
697#else
698    addr_swizzle = 0;
699#endif
700    for (i = 0; i < 16; i++) {
701        cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
702        base++;
703        /* 256MB / 16 >> 22 == 4 */
704        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
705    }
706
707    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
708        where they overlap. It also overlaps with the device addresses, so
709        make sure the peer to peer forwarding is set right */
710    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
711
712    /* Setup BAR2 attributes */
713    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
714    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
715    /* � WAIT_COM=0 will likely work for all applications. */
716    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
717    if (pcie_port)
718    {
719        cvmx_npei_ctl_port1_t npei_ctl_port;
720        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
721        npei_ctl_port.s.bar2_enb = 1;
722        npei_ctl_port.s.bar2_esx = 1;
723        npei_ctl_port.s.bar2_cax = 0;
724        npei_ctl_port.s.ptlp_ro = 1;
725        npei_ctl_port.s.ctlp_ro = 1;
726        npei_ctl_port.s.wait_com = 0;
727        npei_ctl_port.s.waitl_com = 0;
728        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
729    }
730    else
731    {
732        cvmx_npei_ctl_port0_t npei_ctl_port;
733        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
734        npei_ctl_port.s.bar2_enb = 1;
735        npei_ctl_port.s.bar2_esx = 1;
736        npei_ctl_port.s.bar2_cax = 0;
737        npei_ctl_port.s.ptlp_ro = 1;
738        npei_ctl_port.s.ctlp_ro = 1;
739        npei_ctl_port.s.wait_com = 0;
740        npei_ctl_port.s.waitl_com = 0;
741        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
742    }
743
744    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
745        TLP ordering to not be preserved after multiple PCIe port resets. This
746        code detects this fault and corrects it by aligning the TLP counters
747        properly. Another link reset is then performed. See PCIE-13340 */
748    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
749        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
750    {
751        cvmx_npei_dbg_data_t dbg_data;
752        int old_in_fif_p_count;
753        int in_fif_p_count;
754        int out_p_count;
755        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
756        int i;
757
758        /* Choose a write address of 1MB. It should be harmless as all bars
759            haven't been setup */
760        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
761
762        /* Make sure at least in_p_offset have been executed before we try and
763            read in_fif_p_count */
764        i = in_p_offset;
765        while (i--)
766        {
767            cvmx_write64_uint32(write_address, 0);
768            cvmx_wait(10000);
769        }
770
771        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
772            unstable sometimes so read it twice with a write between the reads.
773            This way we can tell the value is good as it will increment by one
774            due to the write */
775        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
776        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
777        do
778        {
779            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
780            old_in_fif_p_count = dbg_data.s.data & 0xff;
781            cvmx_write64_uint32(write_address, 0);
782            cvmx_wait(10000);
783            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
784            in_fif_p_count = dbg_data.s.data & 0xff;
785        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
786
787        /* Update in_fif_p_count for it's offset with respect to out_p_count */
788        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
789
790        /* Read the OUT_P_COUNT from the debug select */
791        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
792        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
793        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
794        out_p_count = (dbg_data.s.data>>1) & 0xff;
795
796        /* Check that the two counters are aligned */
797        if (out_p_count != in_fif_p_count)
798        {
799            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
800            while (in_fif_p_count != 0)
801            {
802                cvmx_write64_uint32(write_address, 0);
803                cvmx_wait(10000);
804                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
805            }
806            /* The EBH5200 board swapped the PCIe reset lines on the board. This
807                means we must bring both links down and up, which will cause the
808                PCIe0 to need alignment again. Lots of messages will be displayed,
809                but everything should work */
810            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
811                (pcie_port == 1))
812                cvmx_pcie_rc_initialize(0);
813            /* Rety bringing this port up */
814            goto retry;
815        }
816    }
817
818    /* Display the link status */
819    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
820    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
821
822    return 0;
823}
824
825
826/**
827 * @INTERNAL
828 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
829 * port from reset to a link up state. Software can then begin
830 * configuring the rest of the link.
831 *
832 * @param pcie_port PCIe port to initialize
833 *
834 * @return Zero on success
835 */
836static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
837{
838    uint64_t start_cycle;
839    cvmx_pemx_ctl_status_t pem_ctl_status;
840    cvmx_pciercx_cfg032_t pciercx_cfg032;
841    cvmx_pciercx_cfg448_t pciercx_cfg448;
842
843    /* Bring up the link */
844    pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
845    pem_ctl_status.s.lnk_enb = 1;
846    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
847
848    /* Wait for the link to come up */
849    start_cycle = cvmx_get_cycle();
850    do
851    {
852        if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
853            return -1;
854        cvmx_wait(10000);
855        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
856    } while (pciercx_cfg032.s.dlla == 0);
857
858    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
859        little longer to respond than expected under load. As a workaround for
860        this we configure the Replay Time Limit to the value expected for a 512
861        byte MPS instead of our actual 256 byte MPS. The numbers below are
862        directly from the PCIe spec table 3-4 */
863    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
864    switch (pciercx_cfg032.s.nlw)
865    {
866        case 1: /* 1 lane */
867            pciercx_cfg448.s.rtl = 1677;
868            break;
869        case 2: /* 2 lanes */
870            pciercx_cfg448.s.rtl = 867;
871            break;
872        case 4: /* 4 lanes */
873            pciercx_cfg448.s.rtl = 462;
874            break;
875        case 8: /* 8 lanes */
876            pciercx_cfg448.s.rtl = 258;
877            break;
878    }
879    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
880
881    return 0;
882}
883
884
885/**
886 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
887 * the bus.
888 *
889 * @param pcie_port PCIe port to initialize
890 *
891 * @return Zero on success
892 */
893static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
894{
895    int i;
896    cvmx_ciu_soft_prst_t ciu_soft_prst;
897    cvmx_mio_rst_ctlx_t mio_rst_ctl;
898    cvmx_pemx_bar_ctl_t pemx_bar_ctl;
899    cvmx_pemx_ctl_status_t pemx_ctl_status;
900    cvmx_pemx_bist_status_t pemx_bist_status;
901    cvmx_pemx_bist_status2_t pemx_bist_status2;
902    cvmx_pciercx_cfg032_t pciercx_cfg032;
903    cvmx_pciercx_cfg515_t pciercx_cfg515;
904    cvmx_sli_ctl_portx_t sli_ctl_portx;
905    cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
906    cvmx_sli_mem_access_subidx_t mem_access_subid;
907    cvmx_mio_rst_ctlx_t mio_rst_ctlx;
908    cvmx_sriox_status_reg_t sriox_status_reg;
909    cvmx_pemx_bar1_indexx_t bar1_index;
910
911    /* Make sure this interface isn't SRIO */
912    sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
913    if (sriox_status_reg.s.srio)
914    {
915        cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
916        return -1;
917    }
918
919    /* Make sure we aren't trying to setup a target mode interface in host mode */
920    mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
921    if (!mio_rst_ctl.s.host_mode)
922    {
923        cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
924        return -1;
925    }
926
927    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
928    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
929    {
930        if (pcie_port)
931        {
932            cvmx_ciu_qlm1_t ciu_qlm;
933            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
934            ciu_qlm.s.txbypass = 1;
935            ciu_qlm.s.txdeemph = 5;
936            ciu_qlm.s.txmargin = 0x17;
937            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
938        }
939        else
940        {
941            cvmx_ciu_qlm0_t ciu_qlm;
942            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
943            ciu_qlm.s.txbypass = 1;
944            ciu_qlm.s.txdeemph = 5;
945            ciu_qlm.s.txmargin = 0x17;
946            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
947        }
948    }
949
950    /* Bring the PCIe out of reset */
951    if (pcie_port)
952        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
953    else
954        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
955    /* After a chip reset the PCIe will also be in reset. If it isn't,
956        most likely someone is trying to init it again without a proper
957        PCIe reset */
958    if (ciu_soft_prst.s.soft_prst == 0)
959    {
960        /* Reset the port */
961        ciu_soft_prst.s.soft_prst = 1;
962        if (pcie_port)
963            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
964        else
965            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
966        /* Wait until pcie resets the ports. */
967        cvmx_wait_usec(2000);
968    }
969    if (pcie_port)
970    {
971        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
972        ciu_soft_prst.s.soft_prst = 0;
973        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
974    }
975    else
976    {
977        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
978        ciu_soft_prst.s.soft_prst = 0;
979        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
980    }
981
982    /* Wait for PCIe reset to complete */
983    cvmx_wait_usec(1000);
984
985    /* Check and make sure PCIe came out of reset. If it doesn't the board
986        probably hasn't wired the clocks up and the interface should be
987        skipped */
988    mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
989    if (!mio_rst_ctlx.s.rst_done)
990    {
991        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
992        return -1;
993    }
994
995    /* Check BIST status */
996    pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
997    if (pemx_bist_status.u64)
998        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
999    pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
1000    if (pemx_bist_status2.u64)
1001        cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
1002
1003    /* Initialize the config space CSRs */
1004    __cvmx_pcie_rc_initialize_config_space(pcie_port);
1005
1006    /* Enable gen2 speed selection */
1007    pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1008    pciercx_cfg515.s.dsc = 1;
1009    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1010
1011    /* Bring the link up */
1012    if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1013    {
1014        /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1015            gen2 and try again with only gen1 */
1016        cvmx_pciercx_cfg031_t pciercx_cfg031;
1017        pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1018        pciercx_cfg031.s.mls = 1;
1019        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32);
1020        if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1021        {
1022            cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1023            return -1;
1024        }
1025    }
1026
1027    /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1028    sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1029    sli_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
1030    sli_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
1031    cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1032
1033    /* Setup Mem access SubDIDs */
1034    mem_access_subid.u64 = 0;
1035    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1036    mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
1037    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
1038    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
1039    mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1040    mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1041    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1042
1043    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1044    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1045    {
1046        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1047        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
1048    }
1049
1050    /* Disable the peer to peer forwarding register. This must be setup
1051        by the OS after it enumerates the bus and assigns addresses to the
1052        PCIe busses */
1053    for (i=0; i<4; i++)
1054    {
1055        cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1056        cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1057    }
1058
1059    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1060    cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1061
1062    /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1063        where they overlap. It also overlaps with the device addresses, so
1064        make sure the peer to peer forwarding is set right */
1065    cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1066
1067    /* Setup BAR2 attributes */
1068    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1069    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
1070    /* � WAIT_COM=0 will likely work for all applications. */
1071    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1072    pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1073    pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1074    pemx_bar_ctl.s.bar2_enb = 1;
1075    pemx_bar_ctl.s.bar2_esx = 1;
1076    pemx_bar_ctl.s.bar2_cax = 0;
1077    cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1078    sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1079    sli_ctl_portx.s.ptlp_ro = 1;
1080    sli_ctl_portx.s.ctlp_ro = 1;
1081    sli_ctl_portx.s.wait_com = 0;
1082    sli_ctl_portx.s.waitl_com = 0;
1083    cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1084
1085    /* BAR1 follows BAR2 */
1086    cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1087
1088    bar1_index.u64 = 0;
1089    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1090    bar1_index.s.ca = 1;       /* Not Cached */
1091    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1092    bar1_index.s.addr_v = 1;   /* Valid entry */
1093
1094    for (i = 0; i < 16; i++) {
1095        cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1096        /* 256MB / 16 >> 22 == 4 */
1097        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1098    }
1099
1100    /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1101        clock */
1102    pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1103    pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1104    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1105
1106    /* Display the link status */
1107    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1108    cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1109
1110    return 0;
1111}
1112
1113/**
1114 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1115 *
1116 * @param pcie_port PCIe port to initialize
1117 *
1118 * @return Zero on success
1119 */
1120int cvmx_pcie_rc_initialize(int pcie_port)
1121{
1122    int result;
1123    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1124        result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1125    else
1126        result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1127#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1128    if (result == 0)
1129        cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1130#endif
1131    return result;
1132}
1133
1134
1135/**
1136 * Shutdown a PCIe port and put it in reset
1137 *
1138 * @param pcie_port PCIe port to shutdown
1139 *
1140 * @return Zero on success
1141 */
1142int cvmx_pcie_rc_shutdown(int pcie_port)
1143{
1144#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1145    cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1146#endif
1147    /* Wait for all pending operations to complete */
1148    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1149    {
1150        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1151            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1152    }
1153    else
1154    {
1155        if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1156            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1157    }
1158
1159    /* Force reset */
1160    if (pcie_port)
1161    {
1162        cvmx_ciu_soft_prst_t ciu_soft_prst;
1163        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1164        ciu_soft_prst.s.soft_prst = 1;
1165        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1166    }
1167    else
1168    {
1169        cvmx_ciu_soft_prst_t ciu_soft_prst;
1170        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1171        ciu_soft_prst.s.soft_prst = 1;
1172        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1173    }
1174    return 0;
1175}
1176
1177
1178/**
1179 * @INTERNAL
1180 * Build a PCIe config space request address for a device
1181 *
1182 * @param pcie_port PCIe port to access
1183 * @param bus       Sub bus
1184 * @param dev       Device ID
1185 * @param fn        Device sub function
1186 * @param reg       Register to access
1187 *
1188 * @return 64bit Octeon IO address
1189 */
1190static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1191{
1192    cvmx_pcie_address_t pcie_addr;
1193    cvmx_pciercx_cfg006_t pciercx_cfg006;
1194
1195    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1196    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1197        return 0;
1198
1199    pcie_addr.u64 = 0;
1200    pcie_addr.config.upper = 2;
1201    pcie_addr.config.io = 1;
1202    pcie_addr.config.did = 3;
1203    pcie_addr.config.subdid = 1;
1204    pcie_addr.config.es = 1;
1205    pcie_addr.config.port = pcie_port;
1206    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1207    pcie_addr.config.bus = bus;
1208    pcie_addr.config.dev = dev;
1209    pcie_addr.config.func = fn;
1210    pcie_addr.config.reg = reg;
1211    return pcie_addr.u64;
1212}
1213
1214
1215/**
1216 * Read 8bits from a Device's config space
1217 *
1218 * @param pcie_port PCIe port the device is on
1219 * @param bus       Sub bus
1220 * @param dev       Device ID
1221 * @param fn        Device sub function
1222 * @param reg       Register to access
1223 *
1224 * @return Result of the read
1225 */
1226uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1227{
1228    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1229    if (address)
1230        return cvmx_read64_uint8(address);
1231    else
1232        return 0xff;
1233}
1234
1235
1236/**
1237 * Read 16bits from a Device's config space
1238 *
1239 * @param pcie_port PCIe port the device is on
1240 * @param bus       Sub bus
1241 * @param dev       Device ID
1242 * @param fn        Device sub function
1243 * @param reg       Register to access
1244 *
1245 * @return Result of the read
1246 */
1247uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1248{
1249    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1250    if (address)
1251        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1252    else
1253        return 0xffff;
1254}
1255
1256
1257/**
1258 * Read 32bits from a Device's config space
1259 *
1260 * @param pcie_port PCIe port the device is on
1261 * @param bus       Sub bus
1262 * @param dev       Device ID
1263 * @param fn        Device sub function
1264 * @param reg       Register to access
1265 *
1266 * @return Result of the read
1267 */
1268uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1269{
1270    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1271    if (address)
1272        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1273    else
1274        return 0xffffffff;
1275}
1276
1277
1278/**
1279 * Write 8bits to a Device's config space
1280 *
1281 * @param pcie_port PCIe port the device is on
1282 * @param bus       Sub bus
1283 * @param dev       Device ID
1284 * @param fn        Device sub function
1285 * @param reg       Register to access
1286 * @param val       Value to write
1287 */
1288void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1289{
1290    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1291    if (address)
1292        cvmx_write64_uint8(address, val);
1293}
1294
1295
1296/**
1297 * Write 16bits to a Device's config space
1298 *
1299 * @param pcie_port PCIe port the device is on
1300 * @param bus       Sub bus
1301 * @param dev       Device ID
1302 * @param fn        Device sub function
1303 * @param reg       Register to access
1304 * @param val       Value to write
1305 */
1306void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1307{
1308    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1309    if (address)
1310        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1311}
1312
1313
1314/**
1315 * Write 32bits to a Device's config space
1316 *
1317 * @param pcie_port PCIe port the device is on
1318 * @param bus       Sub bus
1319 * @param dev       Device ID
1320 * @param fn        Device sub function
1321 * @param reg       Register to access
1322 * @param val       Value to write
1323 */
1324void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1325{
1326    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1327    if (address)
1328        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1329}
1330
1331
1332/**
1333 * Read a PCIe config space register indirectly. This is used for
1334 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1335 *
1336 * @param pcie_port  PCIe port to read from
1337 * @param cfg_offset Address to read
1338 *
1339 * @return Value read
1340 */
1341uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1342{
1343    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1344    {
1345        cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1346        pescx_cfg_rd.u64 = 0;
1347        pescx_cfg_rd.s.addr = cfg_offset;
1348        cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1349        pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1350        return pescx_cfg_rd.s.data;
1351    }
1352    else
1353    {
1354        cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1355        pemx_cfg_rd.u64 = 0;
1356        pemx_cfg_rd.s.addr = cfg_offset;
1357        cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1358        pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1359        return pemx_cfg_rd.s.data;
1360    }
1361}
1362
1363
1364/**
1365 * Write a PCIe config space register indirectly. This is used for
1366 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1367 *
1368 * @param pcie_port  PCIe port to write to
1369 * @param cfg_offset Address to write
1370 * @param val        Value to write
1371 */
1372void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1373{
1374    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1375    {
1376        cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1377        pescx_cfg_wr.u64 = 0;
1378        pescx_cfg_wr.s.addr = cfg_offset;
1379        pescx_cfg_wr.s.data = val;
1380        cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1381    }
1382    else
1383    {
1384        cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1385        pemx_cfg_wr.u64 = 0;
1386        pemx_cfg_wr.s.addr = cfg_offset;
1387        pemx_cfg_wr.s.data = val;
1388        cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1389    }
1390}
1391
1392
1393/**
1394 * Initialize a PCIe port for use in target(EP) mode.
1395 *
1396 * @param pcie_port PCIe port to initialize
1397 *
1398 * @return Zero on success
1399 */
1400int cvmx_pcie_ep_initialize(int pcie_port)
1401{
1402    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1403    {
1404        cvmx_npei_ctl_status_t npei_ctl_status;
1405        npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1406        if (npei_ctl_status.s.host_mode)
1407            return -1;
1408    }
1409    else
1410    {
1411        cvmx_mio_rst_ctlx_t mio_rst_ctl;
1412        mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1413        if (mio_rst_ctl.s.host_mode)
1414            return -1;
1415    }
1416
1417    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1418    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1419    {
1420        if (pcie_port)
1421        {
1422            cvmx_ciu_qlm1_t ciu_qlm;
1423            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1424            ciu_qlm.s.txbypass = 1;
1425            ciu_qlm.s.txdeemph = 5;
1426            ciu_qlm.s.txmargin = 0x17;
1427            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1428        }
1429        else
1430        {
1431            cvmx_ciu_qlm0_t ciu_qlm;
1432            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1433            ciu_qlm.s.txbypass = 1;
1434            ciu_qlm.s.txdeemph = 5;
1435            ciu_qlm.s.txmargin = 0x17;
1436            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1437        }
1438    }
1439
1440    /* Enable bus master and memory */
1441    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1442
1443    /* Max Payload Size (PCIE*_CFG030[MPS]) */
1444    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1445    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1446    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1447    {
1448        cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1449        pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1450        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1451        {
1452            pcieepx_cfg030.s.mps = MPS_CN5XXX;
1453            pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1454        }
1455        else
1456        {
1457            pcieepx_cfg030.s.mps = MPS_CN6XXX;
1458            pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1459        }
1460        pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1461        pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1462        pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1463        pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1464        pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1465        pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1466        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1467    }
1468
1469    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1470    {
1471        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1472        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1473        cvmx_npei_ctl_status2_t npei_ctl_status2;
1474        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1475        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1476        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1477        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1478    }
1479    else
1480    {
1481        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1482        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1483        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1484        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1485        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1486        prt_cfg.s.mps = MPS_CN6XXX;
1487        prt_cfg.s.mrrs = MRRS_CN6XXX;
1488        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1489
1490        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1491        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1492        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1493    }
1494
1495    /* Setup Mem access SubDID 12 to access Host memory */
1496    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1497    {
1498        cvmx_npei_mem_access_subidx_t mem_access_subid;
1499        mem_access_subid.u64 = 0;
1500        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1501        mem_access_subid.s.nmerge = 1;  /* Merging is not allowed in this window. */
1502        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1503        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1504        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1505        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1506        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1507        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1508        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1509        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1510    }
1511    else
1512    {
1513        cvmx_sli_mem_access_subidx_t mem_access_subid;
1514        mem_access_subid.u64 = 0;
1515        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1516        mem_access_subid.s.nmerge = 0;  /* Merging is allowed in this window. */
1517        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1518        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1519        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1520        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1521        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1522        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1523    }
1524    return 0;
1525}
1526
1527
1528/**
1529 * Wait for posted PCIe read/writes to reach the other side of
1530 * the internal PCIe switch. This will insure that core
1531 * read/writes are posted before anything after this function
1532 * is called. This may be necessary when writing to memory that
1533 * will later be read using the DMA/PKT engines.
1534 *
1535 * @param pcie_port PCIe port to wait for
1536 */
1537void cvmx_pcie_wait_for_pending(int pcie_port)
1538{
1539    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1540    {
1541        cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1542        int a;
1543        int b;
1544        int c;
1545
1546        /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1547            description of how this code works */
1548        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1549        if (pcie_port)
1550        {
1551            if (!npei_data_out_cnt.s.p1_fcnt)
1552                return;
1553            a = npei_data_out_cnt.s.p1_ucnt;
1554            b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1555        }
1556        else
1557        {
1558            if (!npei_data_out_cnt.s.p0_fcnt)
1559                return;
1560            a = npei_data_out_cnt.s.p0_ucnt;
1561            b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1562        }
1563
1564        while (1)
1565        {
1566            npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1567            c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1568            if (a<=b)
1569            {
1570                if ((c<a) || (c>b))
1571                    return;
1572            }
1573            else
1574            {
1575                if ((c>b) && (c<a))
1576                    return;
1577            }
1578        }
1579    }
1580    else
1581    {
1582        cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1583        int a;
1584        int b;
1585        int c;
1586
1587        sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1588        if (pcie_port)
1589        {
1590            if (!sli_data_out_cnt.s.p1_fcnt)
1591                return;
1592            a = sli_data_out_cnt.s.p1_ucnt;
1593            b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1594        }
1595        else
1596        {
1597            if (!sli_data_out_cnt.s.p0_fcnt)
1598                return;
1599            a = sli_data_out_cnt.s.p0_ucnt;
1600            b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1601        }
1602
1603        while (1)
1604        {
1605            sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1606            c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
1607            if (a<=b)
1608            {
1609                if ((c<a) || (c>b))
1610                    return;
1611            }
1612            else
1613            {
1614                if ((c>b) && (c<a))
1615                    return;
1616            }
1617        }
1618    }
1619}
1620