cvmx-pcie.c revision 216476
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Interface to PCIe as a host(RC) or target(EP)
50 *
51 * <hr>$Revision: 52004 $<hr>
52 */
53#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54#include <asm/octeon/cvmx.h>
55#include <asm/octeon/cvmx-config.h>
56#include <asm/octeon/cvmx-clock.h>
57#include <asm/octeon/cvmx-ciu-defs.h>
58#include <asm/octeon/cvmx-dpi-defs.h>
59#include <asm/octeon/cvmx-npi-defs.h>
60#include <asm/octeon/cvmx-npei-defs.h>
61#include <asm/octeon/cvmx-pci-defs.h>
62#include <asm/octeon/cvmx-pcieepx-defs.h>
63#include <asm/octeon/cvmx-pciercx-defs.h>
64#include <asm/octeon/cvmx-pemx-defs.h>
65#include <asm/octeon/cvmx-pexp-defs.h>
66#include <asm/octeon/cvmx-pescx-defs.h>
67#include <asm/octeon/cvmx-sli-defs.h>
68#include <asm/octeon/cvmx-sriox-defs.h>
69
70#ifdef CONFIG_CAVIUM_DECODE_RSL
71#include <asm/octeon/cvmx-error.h>
72#endif
73#include <asm/octeon/cvmx-helper.h>
74#include <asm/octeon/cvmx-helper-board.h>
75#include <asm/octeon/cvmx-helper-errata.h>
76#include <asm/octeon/cvmx-pcie.h>
77#include <asm/octeon/cvmx-sysinfo.h>
78#include <asm/octeon/cvmx-swap.h>
79#include <asm/octeon/cvmx-wqe.h>
80#else
81#include "cvmx.h"
82#include "cvmx-csr-db.h"
83#include "cvmx-pcie.h"
84#include "cvmx-sysinfo.h"
85#include "cvmx-swap.h"
86#include "cvmx-wqe.h"
87#include "cvmx-error.h"
88#include "cvmx-helper-errata.h"
89#endif
90
91#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
92#define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
93#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
94#define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
95
96/**
97 * Return the Core virtual base address for PCIe IO access. IOs are
98 * read/written as an offset from this address.
99 *
100 * @param pcie_port PCIe port the IO is for
101 *
102 * @return 64bit Octeon IO base address for read/write
103 */
104uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
105{
106    cvmx_pcie_address_t pcie_addr;
107    pcie_addr.u64 = 0;
108    pcie_addr.io.upper = 0;
109    pcie_addr.io.io = 1;
110    pcie_addr.io.did = 3;
111    pcie_addr.io.subdid = 2;
112    pcie_addr.io.es = 1;
113    pcie_addr.io.port = pcie_port;
114    return pcie_addr.u64;
115}
116
117
118/**
119 * Size of the IO address region returned at address
120 * cvmx_pcie_get_io_base_address()
121 *
122 * @param pcie_port PCIe port the IO is for
123 *
124 * @return Size of the IO window
125 */
126uint64_t cvmx_pcie_get_io_size(int pcie_port)
127{
128    return 1ull<<32;
129}
130
131
132/**
133 * Return the Core virtual base address for PCIe MEM access. Memory is
134 * read/written as an offset from this address.
135 *
136 * @param pcie_port PCIe port the IO is for
137 *
138 * @return 64bit Octeon IO base address for read/write
139 */
140uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
141{
142    cvmx_pcie_address_t pcie_addr;
143    pcie_addr.u64 = 0;
144    pcie_addr.mem.upper = 0;
145    pcie_addr.mem.io = 1;
146    pcie_addr.mem.did = 3;
147    pcie_addr.mem.subdid = 3 + pcie_port;
148    return pcie_addr.u64;
149}
150
151
152/**
153 * Size of the Mem address region returned at address
154 * cvmx_pcie_get_mem_base_address()
155 *
156 * @param pcie_port PCIe port the IO is for
157 *
158 * @return Size of the Mem window
159 */
160uint64_t cvmx_pcie_get_mem_size(int pcie_port)
161{
162    return 1ull<<36;
163}
164
165
166/**
167 * @INTERNAL
168 * Initialize the RC config space CSRs
169 *
170 * @param pcie_port PCIe port to initialize
171 */
172static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
173{
174    /* Max Payload Size (PCIE*_CFG030[MPS]) */
175    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
176    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
177    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
178    {
179        cvmx_pciercx_cfg030_t pciercx_cfg030;
180        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
181        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
182        {
183            pciercx_cfg030.s.mps = MPS_CN5XXX;
184            pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
185        }
186        else
187        {
188            pciercx_cfg030.s.mps = MPS_CN6XXX;
189            pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
190        }
191        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
192        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
193        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
194        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
195        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
196        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
197        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
198    }
199
200    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
201    {
202        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
203        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
204        cvmx_npei_ctl_status2_t npei_ctl_status2;
205        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
206        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
207        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
208        if (pcie_port)
209            npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
210        else
211            npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
212
213        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
214    }
215    else
216    {
217        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
218        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
219        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
220        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
221        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
222        prt_cfg.s.mps = MPS_CN6XXX;
223        prt_cfg.s.mrrs = MRRS_CN6XXX;
224        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
225
226        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
227        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
228        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
229    }
230
231    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
232    {
233        cvmx_pciercx_cfg070_t pciercx_cfg070;
234        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
235        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
236        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
237        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
238    }
239
240    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
241        /* ME and MSAE should always be set. */
242    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
243    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
244    {
245        cvmx_pciercx_cfg001_t pciercx_cfg001;
246        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
247        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
248        pciercx_cfg001.s.me = 1; /* Bus master enable. */
249        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
250        pciercx_cfg001.s.see = 1; /* SERR# enable */
251        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
252    }
253
254
255    /* Advanced Error Recovery Message Enables */
256    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
257    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
258    /* Use CVMX_PCIERCX_CFG067 hardware default */
259    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
260
261
262    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
263    {
264        cvmx_pciercx_cfg032_t pciercx_cfg032;
265        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
266        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
267        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
268    }
269
270    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
271    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
272    {
273        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
274        cvmx_pciercx_cfg006_t pciercx_cfg006;
275        pciercx_cfg006.u32 = 0;
276        pciercx_cfg006.s.pbnum = 1;
277        pciercx_cfg006.s.sbnum = 1;
278        pciercx_cfg006.s.subbnum = 1;
279        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
280    }
281
282    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
283    /* Most applications should disable the memory-mapped I/O BAR by */
284    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
285    {
286        cvmx_pciercx_cfg008_t pciercx_cfg008;
287        pciercx_cfg008.u32 = 0;
288        pciercx_cfg008.s.mb_addr = 0x100;
289        pciercx_cfg008.s.ml_addr = 0;
290        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
291    }
292
293    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
294    /* Most applications should disable the prefetchable BAR by setting */
295    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
296    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
297    {
298        cvmx_pciercx_cfg009_t pciercx_cfg009;
299        cvmx_pciercx_cfg010_t pciercx_cfg010;
300        cvmx_pciercx_cfg011_t pciercx_cfg011;
301        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
302        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
303        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
304        pciercx_cfg009.s.lmem_base = 0x100;
305        pciercx_cfg009.s.lmem_limit = 0;
306        pciercx_cfg010.s.umem_base = 0x100;
307        pciercx_cfg011.s.umem_limit = 0;
308        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
309        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
310        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
311    }
312
313    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
314    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
315    {
316        cvmx_pciercx_cfg035_t pciercx_cfg035;
317        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
318        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
319        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
320        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
321        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
322        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
323    }
324
325    /* Advanced Error Recovery Interrupt Enables */
326    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
327    {
328        cvmx_pciercx_cfg075_t pciercx_cfg075;
329        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
330        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
331        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
332        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
333        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
334    }
335
336    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
337    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
338    {
339        cvmx_pciercx_cfg034_t pciercx_cfg034;
340        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
341        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
342        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
343        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
344        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
345    }
346}
347
348/**
349 * @INTERNAL
350 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
351 * port from reset to a link up state. Software can then begin
352 * configuring the rest of the link.
353 *
354 * @param pcie_port PCIe port to initialize
355 *
356 * @return Zero on success
357 */
358static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
359{
360    uint64_t start_cycle;
361    cvmx_pescx_ctl_status_t pescx_ctl_status;
362    cvmx_pciercx_cfg452_t pciercx_cfg452;
363    cvmx_pciercx_cfg032_t pciercx_cfg032;
364    cvmx_pciercx_cfg448_t pciercx_cfg448;
365
366    /* Set the lane width */
367    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
368    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
369    if (pescx_ctl_status.s.qlm_cfg == 0)
370    {
371        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
372        pciercx_cfg452.s.lme = 0xf;
373    }
374    else
375    {
376        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
377        pciercx_cfg452.s.lme = 0x7;
378    }
379    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
380
381    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
382        cause bus errors on 64bit memory reads. Turning off length error
383        checking fixes this */
384    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
385    {
386        cvmx_pciercx_cfg455_t pciercx_cfg455;
387        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
388        pciercx_cfg455.s.m_cpl_len_err = 1;
389        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
390    }
391
392    /* Lane swap needs to be manually enabled for CN52XX */
393    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
394    {
395      switch (cvmx_sysinfo_get()->board_type)
396      {
397#if defined(OCTEON_VENDOR_LANNER)
398	case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
399	  break;
400#endif
401	default:
402	  pescx_ctl_status.s.lane_swp = 1;
403	  break;
404      }
405      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
406    }
407
408    /* Bring up the link */
409    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
410    pescx_ctl_status.s.lnk_enb = 1;
411    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
412
413    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
414    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
415        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
416
417    /* Wait for the link to come up */
418    start_cycle = cvmx_get_cycle();
419    do
420    {
421        if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
422        {
423            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
424            return -1;
425        }
426        cvmx_wait(10000);
427        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
428    } while (pciercx_cfg032.s.dlla == 0);
429
430    /* Clear all pending errors */
431    cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
432
433    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
434        little longer to respond than expected under load. As a workaround for
435        this we configure the Replay Time Limit to the value expected for a 512
436        byte MPS instead of our actual 256 byte MPS. The numbers below are
437        directly from the PCIe spec table 3-4 */
438    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
439    switch (pciercx_cfg032.s.nlw)
440    {
441        case 1: /* 1 lane */
442            pciercx_cfg448.s.rtl = 1677;
443            break;
444        case 2: /* 2 lanes */
445            pciercx_cfg448.s.rtl = 867;
446            break;
447        case 4: /* 4 lanes */
448            pciercx_cfg448.s.rtl = 462;
449            break;
450        case 8: /* 8 lanes */
451            pciercx_cfg448.s.rtl = 258;
452            break;
453    }
454    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
455
456    return 0;
457}
458
459
460/**
461 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
462 * the bus.
463 *
464 * @param pcie_port PCIe port to initialize
465 *
466 * @return Zero on success
467 */
468static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
469{
470    int i;
471    int base;
472    uint64_t addr_swizzle;
473    cvmx_ciu_soft_prst_t ciu_soft_prst;
474    cvmx_pescx_bist_status_t pescx_bist_status;
475    cvmx_pescx_bist_status2_t pescx_bist_status2;
476    cvmx_npei_ctl_status_t npei_ctl_status;
477    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
478    cvmx_npei_mem_access_subidx_t mem_access_subid;
479    cvmx_npei_dbg_data_t npei_dbg_data;
480    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
481    cvmx_pciercx_cfg032_t pciercx_cfg032;
482    cvmx_npei_bar1_indexx_t bar1_index;
483
484retry:
485    /* Make sure we aren't trying to setup a target mode interface in host mode */
486    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
487    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
488    {
489        cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
490        return -1;
491    }
492
493    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
494    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
495    {
496        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
497        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
498        {
499            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
500            return -1;
501        }
502    }
503
504    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
505    npei_ctl_status.s.arb = 1;
506    /* Allow up to 0x20 config retries */
507    npei_ctl_status.s.cfg_rtry = 0x20;
508    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
509    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
510    {
511        npei_ctl_status.s.p0_ntags = 0x20;
512        npei_ctl_status.s.p1_ntags = 0x20;
513    }
514    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
515
516    /* Bring the PCIe out of reset */
517    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
518    {
519        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
520            workaround for this bug, we bring both PCIe ports out of reset at
521            the same time instead of on separate calls. So for port 0, we bring
522            both out of reset and do nothing on port 1 */
523        if (pcie_port == 0)
524        {
525            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
526            /* After a chip reset the PCIe will also be in reset. If it isn't,
527                most likely someone is trying to init it again without a proper
528                PCIe reset */
529            if (ciu_soft_prst.s.soft_prst == 0)
530            {
531		/* Reset the ports */
532		ciu_soft_prst.s.soft_prst = 1;
533		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
534		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
535		ciu_soft_prst.s.soft_prst = 1;
536		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
537		/* Wait until pcie resets the ports. */
538		cvmx_wait_usec(2000);
539            }
540            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
541            ciu_soft_prst.s.soft_prst = 0;
542            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
543            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
544            ciu_soft_prst.s.soft_prst = 0;
545            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
546        }
547    }
548    else
549    {
550        /* The normal case: The PCIe ports are completely separate and can be
551            brought out of reset independently */
552        if (pcie_port)
553            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
554        else
555            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
556        /* After a chip reset the PCIe will also be in reset. If it isn't,
557            most likely someone is trying to init it again without a proper
558            PCIe reset */
559        if (ciu_soft_prst.s.soft_prst == 0)
560        {
561	    /* Reset the port */
562	    ciu_soft_prst.s.soft_prst = 1;
563	    if (pcie_port)
564		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
565 	    else
566		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
567	    /* Wait until pcie resets the ports. */
568	    cvmx_wait_usec(2000);
569        }
570        if (pcie_port)
571        {
572            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
573            ciu_soft_prst.s.soft_prst = 0;
574            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
575        }
576        else
577        {
578            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
579            ciu_soft_prst.s.soft_prst = 0;
580            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
581        }
582    }
583
584    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
585       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
586    cvmx_wait(400000);
587
588    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
589        CN52XX, so we only probe it on newer chips */
590    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
591    {
592        /* Clear PCLK_RUN so we can check if the clock is running */
593        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
594        pescx_ctl_status2.s.pclk_run = 1;
595        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
596        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
597            us the clock is running */
598        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
599            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
600        {
601            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
602            return -1;
603        }
604    }
605
606    /* Check and make sure PCIe came out of reset. If it doesn't the board
607        probably hasn't wired the clocks up and the interface should be
608        skipped */
609    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
610    if (pescx_ctl_status2.s.pcierst)
611    {
612        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
613        return -1;
614    }
615
616    /* Check BIST2 status. If any bits are set skip this interface. This
617        is an attempt to catch PCIE-813 on pass 1 parts */
618    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
619    if (pescx_bist_status2.u64)
620    {
621        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
622        return -1;
623    }
624
625    /* Check BIST status */
626    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
627    if (pescx_bist_status.u64)
628        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
629
630    /* Initialize the config space CSRs */
631    __cvmx_pcie_rc_initialize_config_space(pcie_port);
632
633    /* Bring the link up */
634    if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
635    {
636        cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
637        return -1;
638    }
639
640    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
641    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
642    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
643    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
644    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
645
646    /* Setup Mem access SubDIDs */
647    mem_access_subid.u64 = 0;
648    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
649    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
650    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
651    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
652    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
653    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
654    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
655    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
656    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
657
658    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
659    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
660    {
661        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
662        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
663    }
664
665    /* Disable the peer to peer forwarding register. This must be setup
666        by the OS after it enumerates the bus and assigns addresses to the
667        PCIe busses */
668    for (i=0; i<4; i++)
669    {
670        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
671        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
672    }
673
674    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
675    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
676
677    /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
678    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
679
680    bar1_index.u32 = 0;
681    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
682    bar1_index.s.ca = 1;       /* Not Cached */
683    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
684    bar1_index.s.addr_v = 1;   /* Valid entry */
685
686    base = pcie_port ? 16 : 0;
687
688    /* Big endian swizzle for 32-bit PEXP_NCB register. */
689#ifdef __MIPSEB__
690    addr_swizzle = 4;
691#else
692    addr_swizzle = 0;
693#endif
694    for (i = 0; i < 16; i++) {
695        cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
696        base++;
697        /* 256MB / 16 >> 22 == 4 */
698        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
699    }
700
701    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
702        where they overlap. It also overlaps with the device addresses, so
703        make sure the peer to peer forwarding is set right */
704    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
705
706    /* Setup BAR2 attributes */
707    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
708    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
709    /* � WAIT_COM=0 will likely work for all applications. */
710    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
711    if (pcie_port)
712    {
713        cvmx_npei_ctl_port1_t npei_ctl_port;
714        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
715        npei_ctl_port.s.bar2_enb = 1;
716        npei_ctl_port.s.bar2_esx = 1;
717        npei_ctl_port.s.bar2_cax = 0;
718        npei_ctl_port.s.ptlp_ro = 1;
719        npei_ctl_port.s.ctlp_ro = 1;
720        npei_ctl_port.s.wait_com = 0;
721        npei_ctl_port.s.waitl_com = 0;
722        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
723    }
724    else
725    {
726        cvmx_npei_ctl_port0_t npei_ctl_port;
727        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
728        npei_ctl_port.s.bar2_enb = 1;
729        npei_ctl_port.s.bar2_esx = 1;
730        npei_ctl_port.s.bar2_cax = 0;
731        npei_ctl_port.s.ptlp_ro = 1;
732        npei_ctl_port.s.ctlp_ro = 1;
733        npei_ctl_port.s.wait_com = 0;
734        npei_ctl_port.s.waitl_com = 0;
735        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
736    }
737
738    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
739        TLP ordering to not be preserved after multiple PCIe port resets. This
740        code detects this fault and corrects it by aligning the TLP counters
741        properly. Another link reset is then performed. See PCIE-13340 */
742    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
743        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
744    {
745        cvmx_npei_dbg_data_t dbg_data;
746        int old_in_fif_p_count;
747        int in_fif_p_count;
748        int out_p_count;
749        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
750        int i;
751
752        /* Choose a write address of 1MB. It should be harmless as all bars
753            haven't been setup */
754        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
755
756        /* Make sure at least in_p_offset have been executed before we try and
757            read in_fif_p_count */
758        i = in_p_offset;
759        while (i--)
760        {
761            cvmx_write64_uint32(write_address, 0);
762            cvmx_wait(10000);
763        }
764
765        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
766            unstable sometimes so read it twice with a write between the reads.
767            This way we can tell the value is good as it will increment by one
768            due to the write */
769        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
770        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
771        do
772        {
773            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
774            old_in_fif_p_count = dbg_data.s.data & 0xff;
775            cvmx_write64_uint32(write_address, 0);
776            cvmx_wait(10000);
777            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
778            in_fif_p_count = dbg_data.s.data & 0xff;
779        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
780
781        /* Update in_fif_p_count for it's offset with respect to out_p_count */
782        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
783
784        /* Read the OUT_P_COUNT from the debug select */
785        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
786        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
787        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
788        out_p_count = (dbg_data.s.data>>1) & 0xff;
789
790        /* Check that the two counters are aligned */
791        if (out_p_count != in_fif_p_count)
792        {
793            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
794            while (in_fif_p_count != 0)
795            {
796                cvmx_write64_uint32(write_address, 0);
797                cvmx_wait(10000);
798                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
799            }
800            /* The EBH5200 board swapped the PCIe reset lines on the board. This
801                means we must bring both links down and up, which will cause the
802                PCIe0 to need alignment again. Lots of messages will be displayed,
803                but everything should work */
804            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
805                (pcie_port == 1))
806                cvmx_pcie_rc_initialize(0);
807            /* Rety bringing this port up */
808            goto retry;
809        }
810    }
811
812    /* Display the link status */
813    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
814    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
815
816    return 0;
817}
818
819
820/**
821 * @INTERNAL
822 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
823 * port from reset to a link up state. Software can then begin
824 * configuring the rest of the link.
825 *
826 * @param pcie_port PCIe port to initialize
827 *
828 * @return Zero on success
829 */
830static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
831{
832    uint64_t start_cycle;
833    cvmx_pemx_ctl_status_t pem_ctl_status;
834    cvmx_pciercx_cfg032_t pciercx_cfg032;
835    cvmx_pciercx_cfg448_t pciercx_cfg448;
836
837    /* Bring up the link */
838    pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
839    pem_ctl_status.s.lnk_enb = 1;
840    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
841
842    /* Wait for the link to come up */
843    start_cycle = cvmx_get_cycle();
844    do
845    {
846        if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
847            return -1;
848        cvmx_wait(10000);
849        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
850    } while (pciercx_cfg032.s.dlla == 0);
851
852    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
853        little longer to respond than expected under load. As a workaround for
854        this we configure the Replay Time Limit to the value expected for a 512
855        byte MPS instead of our actual 256 byte MPS. The numbers below are
856        directly from the PCIe spec table 3-4 */
857    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
858    switch (pciercx_cfg032.s.nlw)
859    {
860        case 1: /* 1 lane */
861            pciercx_cfg448.s.rtl = 1677;
862            break;
863        case 2: /* 2 lanes */
864            pciercx_cfg448.s.rtl = 867;
865            break;
866        case 4: /* 4 lanes */
867            pciercx_cfg448.s.rtl = 462;
868            break;
869        case 8: /* 8 lanes */
870            pciercx_cfg448.s.rtl = 258;
871            break;
872    }
873    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
874
875    return 0;
876}
877
878
879/**
880 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
881 * the bus.
882 *
883 * @param pcie_port PCIe port to initialize
884 *
885 * @return Zero on success
886 */
887static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
888{
889    int i;
890    cvmx_ciu_soft_prst_t ciu_soft_prst;
891    cvmx_mio_rst_ctlx_t mio_rst_ctl;
892    cvmx_pemx_bar_ctl_t pemx_bar_ctl;
893    cvmx_pemx_ctl_status_t pemx_ctl_status;
894    cvmx_pemx_bist_status_t pemx_bist_status;
895    cvmx_pemx_bist_status2_t pemx_bist_status2;
896    cvmx_pciercx_cfg032_t pciercx_cfg032;
897    cvmx_pciercx_cfg515_t pciercx_cfg515;
898    cvmx_sli_ctl_portx_t sli_ctl_portx;
899    cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
900    cvmx_sli_mem_access_subidx_t mem_access_subid;
901    cvmx_mio_rst_ctlx_t mio_rst_ctlx;
902    cvmx_sriox_status_reg_t sriox_status_reg;
903    cvmx_pemx_bar1_indexx_t bar1_index;
904
905    /* Make sure this interface isn't SRIO */
906    sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
907    if (sriox_status_reg.s.srio)
908    {
909        cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
910        return -1;
911    }
912
913    /* Make sure we aren't trying to setup a target mode interface in host mode */
914    mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
915    if (!mio_rst_ctl.s.host_mode)
916    {
917        cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
918        return -1;
919    }
920
921    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
922    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
923    {
924        if (pcie_port)
925        {
926            cvmx_ciu_qlm1_t ciu_qlm;
927            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
928            ciu_qlm.s.txbypass = 1;
929            ciu_qlm.s.txdeemph = 5;
930            ciu_qlm.s.txmargin = 0x17;
931            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
932        }
933        else
934        {
935            cvmx_ciu_qlm0_t ciu_qlm;
936            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
937            ciu_qlm.s.txbypass = 1;
938            ciu_qlm.s.txdeemph = 5;
939            ciu_qlm.s.txmargin = 0x17;
940            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
941        }
942    }
943
944    /* Bring the PCIe out of reset */
945    if (pcie_port)
946        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
947    else
948        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
949    /* After a chip reset the PCIe will also be in reset. If it isn't,
950        most likely someone is trying to init it again without a proper
951        PCIe reset */
952    if (ciu_soft_prst.s.soft_prst == 0)
953    {
954        /* Reset the port */
955        ciu_soft_prst.s.soft_prst = 1;
956        if (pcie_port)
957            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
958        else
959            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
960        /* Wait until pcie resets the ports. */
961        cvmx_wait_usec(2000);
962    }
963    if (pcie_port)
964    {
965        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
966        ciu_soft_prst.s.soft_prst = 0;
967        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
968    }
969    else
970    {
971        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
972        ciu_soft_prst.s.soft_prst = 0;
973        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
974    }
975
976    /* Wait for PCIe reset to complete */
977    cvmx_wait_usec(1000);
978
979    /* Check and make sure PCIe came out of reset. If it doesn't the board
980        probably hasn't wired the clocks up and the interface should be
981        skipped */
982    mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
983    if (!mio_rst_ctlx.s.rst_done)
984    {
985        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
986        return -1;
987    }
988
989    /* Check BIST status */
990    pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
991    if (pemx_bist_status.u64)
992        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
993    pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
994    if (pemx_bist_status2.u64)
995        cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
996
997    /* Initialize the config space CSRs */
998    __cvmx_pcie_rc_initialize_config_space(pcie_port);
999
1000    /* Enable gen2 speed selection */
1001    pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1002    pciercx_cfg515.s.dsc = 1;
1003    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1004
1005    /* Bring the link up */
1006    if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1007    {
1008        /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1009            gen2 and try again with only gen1 */
1010        cvmx_pciercx_cfg031_t pciercx_cfg031;
1011        pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1012        pciercx_cfg031.s.mls = 1;
1013        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32);
1014        if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1015        {
1016            cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1017            return -1;
1018        }
1019    }
1020
1021    /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1022    sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1023    sli_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
1024    sli_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
1025    cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1026
1027    /* Setup Mem access SubDIDs */
1028    mem_access_subid.u64 = 0;
1029    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1030    mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
1031    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
1032    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
1033    mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1034    mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1035    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1036
1037    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1038    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1039    {
1040        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1041        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
1042    }
1043
1044    /* Disable the peer to peer forwarding register. This must be setup
1045        by the OS after it enumerates the bus and assigns addresses to the
1046        PCIe busses */
1047    for (i=0; i<4; i++)
1048    {
1049        cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1050        cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1051    }
1052
1053    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1054    cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1055
1056    /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1057        where they overlap. It also overlaps with the device addresses, so
1058        make sure the peer to peer forwarding is set right */
1059    cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1060
1061    /* Setup BAR2 attributes */
1062    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1063    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
1064    /* � WAIT_COM=0 will likely work for all applications. */
1065    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1066    pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1067    pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1068    pemx_bar_ctl.s.bar2_enb = 1;
1069    pemx_bar_ctl.s.bar2_esx = 1;
1070    pemx_bar_ctl.s.bar2_cax = 0;
1071    cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1072    sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1073    sli_ctl_portx.s.ptlp_ro = 1;
1074    sli_ctl_portx.s.ctlp_ro = 1;
1075    sli_ctl_portx.s.wait_com = 0;
1076    sli_ctl_portx.s.waitl_com = 0;
1077    cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1078
1079    /* BAR1 follows BAR2 */
1080    cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1081
1082    bar1_index.u64 = 0;
1083    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1084    bar1_index.s.ca = 1;       /* Not Cached */
1085    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1086    bar1_index.s.addr_v = 1;   /* Valid entry */
1087
1088    for (i = 0; i < 16; i++) {
1089        cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1090        /* 256MB / 16 >> 22 == 4 */
1091        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1092    }
1093
1094    /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1095        clock */
1096    pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1097    pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1098    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1099
1100    /* Display the link status */
1101    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1102    cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1103
1104    return 0;
1105}
1106
1107/**
1108 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1109 *
1110 * @param pcie_port PCIe port to initialize
1111 *
1112 * @return Zero on success
1113 */
1114int cvmx_pcie_rc_initialize(int pcie_port)
1115{
1116    int result;
1117    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1118        result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1119    else
1120        result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1121#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1122    if (result == 0)
1123        cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1124#endif
1125    return result;
1126}
1127
1128
1129/**
1130 * Shutdown a PCIe port and put it in reset
1131 *
1132 * @param pcie_port PCIe port to shutdown
1133 *
1134 * @return Zero on success
1135 */
1136int cvmx_pcie_rc_shutdown(int pcie_port)
1137{
1138#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1139    cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1140#endif
1141    /* Wait for all pending operations to complete */
1142    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1143    {
1144        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1145            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1146    }
1147    else
1148    {
1149        if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1150            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1151    }
1152
1153    /* Force reset */
1154    if (pcie_port)
1155    {
1156        cvmx_ciu_soft_prst_t ciu_soft_prst;
1157        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1158        ciu_soft_prst.s.soft_prst = 1;
1159        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1160    }
1161    else
1162    {
1163        cvmx_ciu_soft_prst_t ciu_soft_prst;
1164        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1165        ciu_soft_prst.s.soft_prst = 1;
1166        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1167    }
1168    return 0;
1169}
1170
1171
1172/**
1173 * @INTERNAL
1174 * Build a PCIe config space request address for a device
1175 *
1176 * @param pcie_port PCIe port to access
1177 * @param bus       Sub bus
1178 * @param dev       Device ID
1179 * @param fn        Device sub function
1180 * @param reg       Register to access
1181 *
1182 * @return 64bit Octeon IO address
1183 */
1184static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1185{
1186    cvmx_pcie_address_t pcie_addr;
1187    cvmx_pciercx_cfg006_t pciercx_cfg006;
1188
1189    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1190    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1191        return 0;
1192
1193    pcie_addr.u64 = 0;
1194    pcie_addr.config.upper = 2;
1195    pcie_addr.config.io = 1;
1196    pcie_addr.config.did = 3;
1197    pcie_addr.config.subdid = 1;
1198    pcie_addr.config.es = 1;
1199    pcie_addr.config.port = pcie_port;
1200    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1201    pcie_addr.config.bus = bus;
1202    pcie_addr.config.dev = dev;
1203    pcie_addr.config.func = fn;
1204    pcie_addr.config.reg = reg;
1205    return pcie_addr.u64;
1206}
1207
1208
1209/**
1210 * Read 8bits from a Device's config space
1211 *
1212 * @param pcie_port PCIe port the device is on
1213 * @param bus       Sub bus
1214 * @param dev       Device ID
1215 * @param fn        Device sub function
1216 * @param reg       Register to access
1217 *
1218 * @return Result of the read
1219 */
1220uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1221{
1222    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1223    if (address)
1224        return cvmx_read64_uint8(address);
1225    else
1226        return 0xff;
1227}
1228
1229
1230/**
1231 * Read 16bits from a Device's config space
1232 *
1233 * @param pcie_port PCIe port the device is on
1234 * @param bus       Sub bus
1235 * @param dev       Device ID
1236 * @param fn        Device sub function
1237 * @param reg       Register to access
1238 *
1239 * @return Result of the read
1240 */
1241uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1242{
1243    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1244    if (address)
1245        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1246    else
1247        return 0xffff;
1248}
1249
1250
1251/**
1252 * Read 32bits from a Device's config space
1253 *
1254 * @param pcie_port PCIe port the device is on
1255 * @param bus       Sub bus
1256 * @param dev       Device ID
1257 * @param fn        Device sub function
1258 * @param reg       Register to access
1259 *
1260 * @return Result of the read
1261 */
1262uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1263{
1264    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1265    if (address)
1266        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1267    else
1268        return 0xffffffff;
1269}
1270
1271
1272/**
1273 * Write 8bits to a Device's config space
1274 *
1275 * @param pcie_port PCIe port the device is on
1276 * @param bus       Sub bus
1277 * @param dev       Device ID
1278 * @param fn        Device sub function
1279 * @param reg       Register to access
1280 * @param val       Value to write
1281 */
1282void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1283{
1284    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1285    if (address)
1286        cvmx_write64_uint8(address, val);
1287}
1288
1289
1290/**
1291 * Write 16bits to a Device's config space
1292 *
1293 * @param pcie_port PCIe port the device is on
1294 * @param bus       Sub bus
1295 * @param dev       Device ID
1296 * @param fn        Device sub function
1297 * @param reg       Register to access
1298 * @param val       Value to write
1299 */
1300void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1301{
1302    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1303    if (address)
1304        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1305}
1306
1307
1308/**
1309 * Write 32bits to a Device's config space
1310 *
1311 * @param pcie_port PCIe port the device is on
1312 * @param bus       Sub bus
1313 * @param dev       Device ID
1314 * @param fn        Device sub function
1315 * @param reg       Register to access
1316 * @param val       Value to write
1317 */
1318void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1319{
1320    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1321    if (address)
1322        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1323}
1324
1325
1326/**
1327 * Read a PCIe config space register indirectly. This is used for
1328 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1329 *
1330 * @param pcie_port  PCIe port to read from
1331 * @param cfg_offset Address to read
1332 *
1333 * @return Value read
1334 */
1335uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1336{
1337    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1338    {
1339        cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1340        pescx_cfg_rd.u64 = 0;
1341        pescx_cfg_rd.s.addr = cfg_offset;
1342        cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1343        pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1344        return pescx_cfg_rd.s.data;
1345    }
1346    else
1347    {
1348        cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1349        pemx_cfg_rd.u64 = 0;
1350        pemx_cfg_rd.s.addr = cfg_offset;
1351        cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1352        pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1353        return pemx_cfg_rd.s.data;
1354    }
1355}
1356
1357
1358/**
1359 * Write a PCIe config space register indirectly. This is used for
1360 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1361 *
1362 * @param pcie_port  PCIe port to write to
1363 * @param cfg_offset Address to write
1364 * @param val        Value to write
1365 */
1366void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1367{
1368    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1369    {
1370        cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1371        pescx_cfg_wr.u64 = 0;
1372        pescx_cfg_wr.s.addr = cfg_offset;
1373        pescx_cfg_wr.s.data = val;
1374        cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1375    }
1376    else
1377    {
1378        cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1379        pemx_cfg_wr.u64 = 0;
1380        pemx_cfg_wr.s.addr = cfg_offset;
1381        pemx_cfg_wr.s.data = val;
1382        cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1383    }
1384}
1385
1386
1387/**
1388 * Initialize a PCIe port for use in target(EP) mode.
1389 *
1390 * @param pcie_port PCIe port to initialize
1391 *
1392 * @return Zero on success
1393 */
1394int cvmx_pcie_ep_initialize(int pcie_port)
1395{
1396    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1397    {
1398        cvmx_npei_ctl_status_t npei_ctl_status;
1399        npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1400        if (npei_ctl_status.s.host_mode)
1401            return -1;
1402    }
1403    else
1404    {
1405        cvmx_mio_rst_ctlx_t mio_rst_ctl;
1406        mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1407        if (mio_rst_ctl.s.host_mode)
1408            return -1;
1409    }
1410
1411    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1412    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1413    {
1414        if (pcie_port)
1415        {
1416            cvmx_ciu_qlm1_t ciu_qlm;
1417            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1418            ciu_qlm.s.txbypass = 1;
1419            ciu_qlm.s.txdeemph = 5;
1420            ciu_qlm.s.txmargin = 0x17;
1421            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1422        }
1423        else
1424        {
1425            cvmx_ciu_qlm0_t ciu_qlm;
1426            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1427            ciu_qlm.s.txbypass = 1;
1428            ciu_qlm.s.txdeemph = 5;
1429            ciu_qlm.s.txmargin = 0x17;
1430            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1431        }
1432    }
1433
1434    /* Enable bus master and memory */
1435    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1436
1437    /* Max Payload Size (PCIE*_CFG030[MPS]) */
1438    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1439    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1440    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1441    {
1442        cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1443        pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1444        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1445        {
1446            pcieepx_cfg030.s.mps = MPS_CN5XXX;
1447            pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1448        }
1449        else
1450        {
1451            pcieepx_cfg030.s.mps = MPS_CN6XXX;
1452            pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1453        }
1454        pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1455        pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1456        pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1457        pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1458        pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1459        pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1460        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1461    }
1462
1463    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1464    {
1465        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1466        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1467        cvmx_npei_ctl_status2_t npei_ctl_status2;
1468        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1469        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1470        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1471        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1472    }
1473    else
1474    {
1475        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1476        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1477        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1478        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1479        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1480        prt_cfg.s.mps = MPS_CN6XXX;
1481        prt_cfg.s.mrrs = MRRS_CN6XXX;
1482        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1483
1484        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1485        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1486        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1487    }
1488
1489    /* Setup Mem access SubDID 12 to access Host memory */
1490    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1491    {
1492        cvmx_npei_mem_access_subidx_t mem_access_subid;
1493        mem_access_subid.u64 = 0;
1494        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1495        mem_access_subid.s.nmerge = 1;  /* Merging is not allowed in this window. */
1496        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1497        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1498        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1499        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1500        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1501        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1502        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1503        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1504    }
1505    else
1506    {
1507        cvmx_sli_mem_access_subidx_t mem_access_subid;
1508        mem_access_subid.u64 = 0;
1509        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1510        mem_access_subid.s.nmerge = 0;  /* Merging is allowed in this window. */
1511        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1512        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1513        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1514        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1515        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1516        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1517    }
1518    return 0;
1519}
1520
1521
1522/**
1523 * Wait for posted PCIe read/writes to reach the other side of
1524 * the internal PCIe switch. This will insure that core
1525 * read/writes are posted before anything after this function
1526 * is called. This may be necessary when writing to memory that
1527 * will later be read using the DMA/PKT engines.
1528 *
1529 * @param pcie_port PCIe port to wait for
1530 */
1531void cvmx_pcie_wait_for_pending(int pcie_port)
1532{
1533    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1534    {
1535        cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1536        int a;
1537        int b;
1538        int c;
1539
1540        /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1541            description of how this code works */
1542        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1543        if (pcie_port)
1544        {
1545            if (!npei_data_out_cnt.s.p1_fcnt)
1546                return;
1547            a = npei_data_out_cnt.s.p1_ucnt;
1548            b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1549        }
1550        else
1551        {
1552            if (!npei_data_out_cnt.s.p0_fcnt)
1553                return;
1554            a = npei_data_out_cnt.s.p0_ucnt;
1555            b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1556        }
1557
1558        while (1)
1559        {
1560            npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1561            c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1562            if (a<=b)
1563            {
1564                if ((c<a) || (c>b))
1565                    return;
1566            }
1567            else
1568            {
1569                if ((c>b) && (c<a))
1570                    return;
1571            }
1572        }
1573    }
1574    else
1575    {
1576        cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1577        int a;
1578        int b;
1579        int c;
1580
1581        sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1582        if (pcie_port)
1583        {
1584            if (!sli_data_out_cnt.s.p1_fcnt)
1585                return;
1586            a = sli_data_out_cnt.s.p1_ucnt;
1587            b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1588        }
1589        else
1590        {
1591            if (!sli_data_out_cnt.s.p0_fcnt)
1592                return;
1593            a = sli_data_out_cnt.s.p0_ucnt;
1594            b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1595        }
1596
1597        while (1)
1598        {
1599            sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1600            c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
1601            if (a<=b)
1602            {
1603                if ((c<a) || (c>b))
1604                    return;
1605            }
1606            else
1607            {
1608                if ((c>b) && (c<a))
1609                    return;
1610            }
1611        }
1612    }
1613}
1614