cvmx-pcie.c revision 210286
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Interface to PCIe as a host(RC) or target(EP)
48 *
49 * <hr>$Revision: 41586 $<hr>
50 */
51#include "cvmx.h"
52#include "cvmx-csr-db.h"
53#include "cvmx-pcie.h"
54#include "cvmx-sysinfo.h"
55#include "cvmx-swap.h"
56#include "cvmx-wqe.h"
57#include "cvmx-helper-errata.h"
58
59
60/**
61 * Return the Core virtual base address for PCIe IO access. IOs are
62 * read/written as an offset from this address.
63 *
64 * @param pcie_port PCIe port the IO is for
65 *
66 * @return 64bit Octeon IO base address for read/write
67 */
68uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
69{
70    cvmx_pcie_address_t pcie_addr;
71    pcie_addr.u64 = 0;
72    pcie_addr.io.upper = 0;
73    pcie_addr.io.io = 1;
74    pcie_addr.io.did = 3;
75    pcie_addr.io.subdid = 2;
76    pcie_addr.io.es = 1;
77    pcie_addr.io.port = pcie_port;
78    return pcie_addr.u64;
79}
80
81
82/**
83 * Size of the IO address region returned at address
84 * cvmx_pcie_get_io_base_address()
85 *
86 * @param pcie_port PCIe port the IO is for
87 *
88 * @return Size of the IO window
89 */
90uint64_t cvmx_pcie_get_io_size(int pcie_port)
91{
92    return 1ull<<32;
93}
94
95
96/**
97 * Return the Core virtual base address for PCIe MEM access. Memory is
98 * read/written as an offset from this address.
99 *
100 * @param pcie_port PCIe port the IO is for
101 *
102 * @return 64bit Octeon IO base address for read/write
103 */
104uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
105{
106    cvmx_pcie_address_t pcie_addr;
107    pcie_addr.u64 = 0;
108    pcie_addr.mem.upper = 0;
109    pcie_addr.mem.io = 1;
110    pcie_addr.mem.did = 3;
111    pcie_addr.mem.subdid = 3 + pcie_port;
112    return pcie_addr.u64;
113}
114
115
116/**
117 * Size of the Mem address region returned at address
118 * cvmx_pcie_get_mem_base_address()
119 *
120 * @param pcie_port PCIe port the IO is for
121 *
122 * @return Size of the Mem window
123 */
124uint64_t cvmx_pcie_get_mem_size(int pcie_port)
125{
126    return 1ull<<36;
127}
128
129
130/**
131 * @INTERNAL
132 * Initialize the RC config space CSRs
133 *
134 * @param pcie_port PCIe port to initialize
135 */
136static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
137{
138    /* Max Payload Size (PCIE*_CFG030[MPS]) */
139    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
140    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
141    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
142    {
143        cvmx_pciercx_cfg030_t pciercx_cfg030;
144        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
145        pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
146        pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
147        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
148        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
149        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
150        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
151        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
152        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
153        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
154    }
155
156    /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
157    /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
158    {
159        cvmx_npei_ctl_status2_t npei_ctl_status2;
160        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
161        npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
162        npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
163        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
164    }
165
166    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
167    {
168        cvmx_pciercx_cfg070_t pciercx_cfg070;
169        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
170        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
171        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
172        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
173    }
174
175    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
176        /* ME and MSAE should always be set. */
177    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
178    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
179    {
180        cvmx_pciercx_cfg001_t pciercx_cfg001;
181        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
182        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
183        pciercx_cfg001.s.me = 1; /* Bus master enable. */
184        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
185        pciercx_cfg001.s.see = 1; /* SERR# enable */
186        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
187    }
188
189
190    /* Advanced Error Recovery Message Enables */
191    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
192    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
193    /* Use CVMX_PCIERCX_CFG067 hardware default */
194    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
195
196
197    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
198    {
199        cvmx_pciercx_cfg032_t pciercx_cfg032;
200        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
201        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
202        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
203    }
204
205    /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */
206    // FIXME: Anything needed here?
207
208    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
209    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
210    {
211        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
212        cvmx_pciercx_cfg006_t pciercx_cfg006;
213        pciercx_cfg006.u32 = 0;
214        pciercx_cfg006.s.pbnum = 1;
215        pciercx_cfg006.s.sbnum = 1;
216        pciercx_cfg006.s.subbnum = 1;
217        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
218    }
219
220    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
221    /* Most applications should disable the memory-mapped I/O BAR by */
222    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
223    {
224        cvmx_pciercx_cfg008_t pciercx_cfg008;
225        pciercx_cfg008.u32 = 0;
226        pciercx_cfg008.s.mb_addr = 0x100;
227        pciercx_cfg008.s.ml_addr = 0;
228        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
229    }
230
231    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
232    /* Most applications should disable the prefetchable BAR by setting */
233    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
234    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
235    {
236        cvmx_pciercx_cfg009_t pciercx_cfg009;
237        cvmx_pciercx_cfg010_t pciercx_cfg010;
238        cvmx_pciercx_cfg011_t pciercx_cfg011;
239        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
240        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
241        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
242        pciercx_cfg009.s.lmem_base = 0x100;
243        pciercx_cfg009.s.lmem_limit = 0;
244        pciercx_cfg010.s.umem_base = 0x100;
245        pciercx_cfg011.s.umem_limit = 0;
246        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
247        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
248        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
249    }
250
251    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
252    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
253    {
254        cvmx_pciercx_cfg035_t pciercx_cfg035;
255        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
256        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
257        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
258        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
259        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
260        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
261    }
262
263    /* Advanced Error Recovery Interrupt Enables */
264    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
265    {
266        cvmx_pciercx_cfg075_t pciercx_cfg075;
267        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
268        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
269        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
270        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
271        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
272    }
273
274    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
275    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
276    {
277        cvmx_pciercx_cfg034_t pciercx_cfg034;
278        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
279        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
280        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
281        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
282        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
283    }
284}
285
286
287/**
288 * @INTERNAL
289 * Initialize a host mode PCIe link. This function takes a PCIe
290 * port from reset to a link up state. Software can then begin
291 * configuring the rest of the link.
292 *
293 * @param pcie_port PCIe port to initialize
294 *
295 * @return Zero on success
296 */
297static int __cvmx_pcie_rc_initialize_link(int pcie_port)
298{
299    uint64_t start_cycle;
300    cvmx_pescx_ctl_status_t pescx_ctl_status;
301    cvmx_pciercx_cfg452_t pciercx_cfg452;
302    cvmx_pciercx_cfg032_t pciercx_cfg032;
303    cvmx_pciercx_cfg448_t pciercx_cfg448;
304
305    /* Set the lane width */
306    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
307    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
308    if (pescx_ctl_status.s.qlm_cfg == 0)
309    {
310        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
311        pciercx_cfg452.s.lme = 0xf;
312    }
313    else
314    {
315        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
316        pciercx_cfg452.s.lme = 0x7;
317    }
318    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
319
320    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
321        cause bus errors on 64bit memory reads. Turning off length error
322        checking fixes this */
323    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
324    {
325        cvmx_pciercx_cfg455_t pciercx_cfg455;
326        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
327        pciercx_cfg455.s.m_cpl_len_err = 1;
328        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
329    }
330
331    /* Lane swap needs to be manually enabled for CN52XX */
332    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
333    {
334      pescx_ctl_status.s.lane_swp = 1;
335      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
336    }
337
338    /* Bring up the link */
339    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
340    pescx_ctl_status.s.lnk_enb = 1;
341    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
342
343    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
344    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
345        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
346
347    /* Wait for the link to come up */
348    start_cycle = cvmx_get_cycle();
349    do
350    {
351        if (cvmx_get_cycle() - start_cycle > 2*cvmx_sysinfo_get()->cpu_clock_hz)
352        {
353            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
354            return -1;
355        }
356        cvmx_wait(10000);
357        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
358    } while (pciercx_cfg032.s.dlla == 0);
359
360    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
361        little longer to respond than expected under load. As a workaround for
362        this we configure the Replay Time Limit to the value expected for a 512
363        byte MPS instead of our actual 256 byte MPS. The numbers below are
364        directly from the PCIe spec table 3-4 */
365    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
366    switch (pciercx_cfg032.s.nlw)
367    {
368        case 1: /* 1 lane */
369            pciercx_cfg448.s.rtl = 1677;
370            break;
371        case 2: /* 2 lanes */
372            pciercx_cfg448.s.rtl = 867;
373            break;
374        case 4: /* 4 lanes */
375            pciercx_cfg448.s.rtl = 462;
376            break;
377        case 8: /* 8 lanes */
378            pciercx_cfg448.s.rtl = 258;
379            break;
380    }
381    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
382
383    return 0;
384}
385
386
387/**
388 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
389 *
390 * @param pcie_port PCIe port to initialize
391 *
392 * @return Zero on success
393 */
394int cvmx_pcie_rc_initialize(int pcie_port)
395{
396    int i;
397    cvmx_ciu_soft_prst_t ciu_soft_prst;
398    cvmx_pescx_bist_status_t pescx_bist_status;
399    cvmx_pescx_bist_status2_t pescx_bist_status2;
400    cvmx_npei_ctl_status_t npei_ctl_status;
401    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
402    cvmx_npei_mem_access_subidx_t mem_access_subid;
403    cvmx_npei_dbg_data_t npei_dbg_data;
404    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
405    cvmx_pciercx_cfg032_t pciercx_cfg032;
406
407retry:
408    /* Make sure we aren't trying to setup a target mode interface in host mode */
409    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
410    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
411    {
412        cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port0, but port0 is not in host mode\n");
413        return -1;
414    }
415
416    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
417    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
418    {
419        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
420        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
421        {
422            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
423            return -1;
424        }
425    }
426
427    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
428    npei_ctl_status.s.arb = 1;
429    /* Allow up to 0x20 config retries */
430    npei_ctl_status.s.cfg_rtry = 0x20;
431    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
432    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
433    {
434        npei_ctl_status.s.p0_ntags = 0x20;
435        npei_ctl_status.s.p1_ntags = 0x20;
436    }
437    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
438
439    /* Bring the PCIe out of reset */
440    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
441    {
442        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
443            workaround for this bug, we bring both PCIe ports out of reset at
444            the same time instead of on separate calls. So for port 0, we bring
445            both out of reset and do nothing on port 1 */
446        if (pcie_port == 0)
447        {
448            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
449            /* After a chip reset the PCIe will also be in reset. If it isn't,
450                most likely someone is trying to init it again without a proper
451                PCIe reset */
452            if (ciu_soft_prst.s.soft_prst == 0)
453            {
454		/* Reset the ports */
455		ciu_soft_prst.s.soft_prst = 1;
456		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
457		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
458		ciu_soft_prst.s.soft_prst = 1;
459		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
460		/* Wait until pcie resets the ports. */
461		cvmx_wait_usec(2000);
462            }
463            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
464            ciu_soft_prst.s.soft_prst = 0;
465            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
466            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
467            ciu_soft_prst.s.soft_prst = 0;
468            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
469        }
470    }
471    else
472    {
473        /* The normal case: The PCIe ports are completely separate and can be
474            brought out of reset independently */
475        if (pcie_port)
476            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
477        else
478            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
479        /* After a chip reset the PCIe will also be in reset. If it isn't,
480            most likely someone is trying to init it again without a proper
481            PCIe reset */
482        if (ciu_soft_prst.s.soft_prst == 0)
483        {
484	    /* Reset the port */
485	    ciu_soft_prst.s.soft_prst = 1;
486	    if (pcie_port)
487		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
488 	    else
489		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
490	    /* Wait until pcie resets the ports. */
491	    cvmx_wait_usec(2000);
492        }
493        if (pcie_port)
494        {
495            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
496            ciu_soft_prst.s.soft_prst = 0;
497            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
498        }
499        else
500        {
501            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
502            ciu_soft_prst.s.soft_prst = 0;
503            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
504        }
505    }
506
507    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
508       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
509    cvmx_wait(400000);
510
511    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
512        CN52XX, so we only probe it on newer chips */
513    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
514    {
515        /* Clear PCLK_RUN so we can check if the clock is running */
516        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
517        pescx_ctl_status2.s.pclk_run = 1;
518        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
519        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
520            us the clock is running */
521        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
522            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
523        {
524            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
525            return -1;
526        }
527    }
528
529    /* Check and make sure PCIe came out of reset. If it doesn't the board
530        probably hasn't wired the clocks up and the interface should be
531        skipped */
532    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
533    if (pescx_ctl_status2.s.pcierst)
534    {
535        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
536        return -1;
537    }
538
539    /* Check BIST2 status. If any bits are set skip this interface. This
540        is an attempt to catch PCIE-813 on pass 1 parts */
541    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
542    if (pescx_bist_status2.u64)
543    {
544        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
545        return -1;
546    }
547
548    /* Check BIST status */
549    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
550    if (pescx_bist_status.u64)
551        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
552
553    /* Initialize the config space CSRs */
554    __cvmx_pcie_rc_initialize_config_space(pcie_port);
555
556    /* Bring the link up */
557    if (__cvmx_pcie_rc_initialize_link(pcie_port))
558    {
559        cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n");
560        return -1;
561    }
562
563    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
564    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
565    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
566    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
567    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
568
569    /* Setup Mem access SubDIDs */
570    mem_access_subid.u64 = 0;
571    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
572    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
573    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
574    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
575    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
576    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
577    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
578    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
579    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
580
581    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
582    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
583    {
584        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
585        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
586    }
587
588    /* Disable the peer to peer forwarding register. This must be setup
589        by the OS after it enumerates the bus and assigns addresses to the
590        PCIe busses */
591    for (i=0; i<4; i++)
592    {
593        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
594        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
595    }
596
597    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
598    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
599
600    /* Disable Octeon's BAR1. It isn't needed in RC mode since BAR2
601        maps all of memory. BAR2 also maps 256MB-512MB into the 2nd
602        256MB of memory */
603    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1);
604
605    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
606        where they overlap. It also overlaps with the device addresses, so
607        make sure the peer to peer forwarding is set right */
608    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
609
610    /* Setup BAR2 attributes */
611    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
612    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
613    /* � WAIT_COM=0 will likely work for all applications. */
614    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
615    if (pcie_port)
616    {
617        cvmx_npei_ctl_port1_t npei_ctl_port;
618        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
619        npei_ctl_port.s.bar2_enb = 1;
620        npei_ctl_port.s.bar2_esx = 1;
621        npei_ctl_port.s.bar2_cax = 0;
622        npei_ctl_port.s.ptlp_ro = 1;
623        npei_ctl_port.s.ctlp_ro = 1;
624        npei_ctl_port.s.wait_com = 0;
625        npei_ctl_port.s.waitl_com = 0;
626        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
627    }
628    else
629    {
630        cvmx_npei_ctl_port0_t npei_ctl_port;
631        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
632        npei_ctl_port.s.bar2_enb = 1;
633        npei_ctl_port.s.bar2_esx = 1;
634        npei_ctl_port.s.bar2_cax = 0;
635        npei_ctl_port.s.ptlp_ro = 1;
636        npei_ctl_port.s.ctlp_ro = 1;
637        npei_ctl_port.s.wait_com = 0;
638        npei_ctl_port.s.waitl_com = 0;
639        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
640    }
641
642    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
643        TLP ordering to not be preserved after multiple PCIe port resets. This
644        code detects this fault and corrects it by aligning the TLP counters
645        properly. Another link reset is then performed. See PCIE-13340 */
646    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
647        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
648    {
649        cvmx_npei_dbg_data_t dbg_data;
650        int old_in_fif_p_count;
651        int in_fif_p_count;
652        int out_p_count;
653        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
654        int i;
655
656        /* Choose a write address of 1MB. It should be harmless as all bars
657            haven't been setup */
658        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
659
660        /* Make sure at least in_p_offset have been executed before we try and
661            read in_fif_p_count */
662        i = in_p_offset;
663        while (i--)
664        {
665            cvmx_write64_uint32(write_address, 0);
666            cvmx_wait(10000);
667        }
668
669        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
670            unstable sometimes so read it twice with a write between the reads.
671            This way we can tell the value is good as it will increment by one
672            due to the write */
673        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
674        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
675        do
676        {
677            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
678            old_in_fif_p_count = dbg_data.s.data & 0xff;
679            cvmx_write64_uint32(write_address, 0);
680            cvmx_wait(10000);
681            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
682            in_fif_p_count = dbg_data.s.data & 0xff;
683        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
684
685        /* Update in_fif_p_count for it's offset with respect to out_p_count */
686        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
687
688        /* Read the OUT_P_COUNT from the debug select */
689        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
690        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
691        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
692        out_p_count = (dbg_data.s.data>>1) & 0xff;
693
694        /* Check that the two counters are aligned */
695        if (out_p_count != in_fif_p_count)
696        {
697            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
698            while (in_fif_p_count != 0)
699            {
700                cvmx_write64_uint32(write_address, 0);
701                cvmx_wait(10000);
702                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
703            }
704            /* The EBH5200 board swapped the PCIe reset lines on the board. This
705                means we must bring both links down and up, which will cause the
706                PCIe0 to need alignment again. Lots of messages will be displayed,
707                but everything should work */
708            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
709                (pcie_port == 1))
710                cvmx_pcie_rc_initialize(0);
711            /* Rety bringing this port up */
712            goto retry;
713        }
714    }
715
716    /* Display the link status */
717    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
718    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
719
720    return 0;
721}
722
723
724/**
725 * Shutdown a PCIe port and put it in reset
726 *
727 * @param pcie_port PCIe port to shutdown
728 *
729 * @return Zero on success
730 */
731int cvmx_pcie_rc_shutdown(int pcie_port)
732{
733    /* Wait for all pending operations to complete */
734    if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
735        cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
736
737    /* Force reset */
738    if (pcie_port)
739    {
740        cvmx_ciu_soft_prst_t ciu_soft_prst;
741        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
742        ciu_soft_prst.s.soft_prst = 1;
743        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
744    }
745    else
746    {
747        cvmx_ciu_soft_prst_t ciu_soft_prst;
748        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
749        ciu_soft_prst.s.soft_prst = 1;
750        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
751    }
752    return 0;
753}
754
755
756/**
757 * @INTERNAL
758 * Build a PCIe config space request address for a device
759 *
760 * @param pcie_port PCIe port to access
761 * @param bus       Sub bus
762 * @param dev       Device ID
763 * @param fn        Device sub function
764 * @param reg       Register to access
765 *
766 * @return 64bit Octeon IO address
767 */
768static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
769{
770    cvmx_pcie_address_t pcie_addr;
771    cvmx_pciercx_cfg006_t pciercx_cfg006;
772
773    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
774    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
775        return 0;
776
777    pcie_addr.u64 = 0;
778    pcie_addr.config.upper = 2;
779    pcie_addr.config.io = 1;
780    pcie_addr.config.did = 3;
781    pcie_addr.config.subdid = 1;
782    pcie_addr.config.es = 1;
783    pcie_addr.config.port = pcie_port;
784    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
785    pcie_addr.config.bus = bus;
786    pcie_addr.config.dev = dev;
787    pcie_addr.config.func = fn;
788    pcie_addr.config.reg = reg;
789    return pcie_addr.u64;
790}
791
792
793/**
794 * Read 8bits from a Device's config space
795 *
796 * @param pcie_port PCIe port the device is on
797 * @param bus       Sub bus
798 * @param dev       Device ID
799 * @param fn        Device sub function
800 * @param reg       Register to access
801 *
802 * @return Result of the read
803 */
804uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
805{
806    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
807    if (address)
808        return cvmx_read64_uint8(address);
809    else
810        return 0xff;
811}
812
813
814/**
815 * Read 16bits from a Device's config space
816 *
817 * @param pcie_port PCIe port the device is on
818 * @param bus       Sub bus
819 * @param dev       Device ID
820 * @param fn        Device sub function
821 * @param reg       Register to access
822 *
823 * @return Result of the read
824 */
825uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
826{
827    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
828    if (address)
829        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
830    else
831        return 0xffff;
832}
833
834
835/**
836 * Read 32bits from a Device's config space
837 *
838 * @param pcie_port PCIe port the device is on
839 * @param bus       Sub bus
840 * @param dev       Device ID
841 * @param fn        Device sub function
842 * @param reg       Register to access
843 *
844 * @return Result of the read
845 */
846uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
847{
848    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
849    if (address)
850        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
851    else
852        return 0xffffffff;
853}
854
855
856/**
857 * Write 8bits to a Device's config space
858 *
859 * @param pcie_port PCIe port the device is on
860 * @param bus       Sub bus
861 * @param dev       Device ID
862 * @param fn        Device sub function
863 * @param reg       Register to access
864 * @param val       Value to write
865 */
866void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
867{
868    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
869    if (address)
870        cvmx_write64_uint8(address, val);
871}
872
873
874/**
875 * Write 16bits to a Device's config space
876 *
877 * @param pcie_port PCIe port the device is on
878 * @param bus       Sub bus
879 * @param dev       Device ID
880 * @param fn        Device sub function
881 * @param reg       Register to access
882 * @param val       Value to write
883 */
884void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
885{
886    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
887    if (address)
888        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
889}
890
891
892/**
893 * Write 32bits to a Device's config space
894 *
895 * @param pcie_port PCIe port the device is on
896 * @param bus       Sub bus
897 * @param dev       Device ID
898 * @param fn        Device sub function
899 * @param reg       Register to access
900 * @param val       Value to write
901 */
902void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
903{
904    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
905    if (address)
906        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
907}
908
909
910/**
911 * Read a PCIe config space register indirectly. This is used for
912 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
913 *
914 * @param pcie_port  PCIe port to read from
915 * @param cfg_offset Address to read
916 *
917 * @return Value read
918 */
919uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
920{
921    cvmx_pescx_cfg_rd_t pescx_cfg_rd;
922    pescx_cfg_rd.u64 = 0;
923    pescx_cfg_rd.s.addr = cfg_offset;
924    cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
925    pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
926    return pescx_cfg_rd.s.data;
927}
928
929
930/**
931 * Write a PCIe config space register indirectly. This is used for
932 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
933 *
934 * @param pcie_port  PCIe port to write to
935 * @param cfg_offset Address to write
936 * @param val        Value to write
937 */
938void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
939{
940    cvmx_pescx_cfg_wr_t pescx_cfg_wr;
941    pescx_cfg_wr.u64 = 0;
942    pescx_cfg_wr.s.addr = cfg_offset;
943    pescx_cfg_wr.s.data = val;
944    cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
945}
946
947
948/**
949 * Initialize a PCIe port for use in target(EP) mode.
950 *
951 * @return Zero on success
952 */
953int cvmx_pcie_ep_initialize(void)
954{
955    int pcie_port = 0;
956    cvmx_npei_ctl_status_t npei_ctl_status;
957
958    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
959    if (npei_ctl_status.s.host_mode)
960        return -1;
961
962    /* Enable bus master and memory */
963    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEP_CFG001, 0x6);
964
965    /* Max Payload Size (PCIE*_CFG030[MPS]) */
966    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
967    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
968    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
969    {
970        cvmx_pciercx_cfg030_t pciercx_cfg030;
971        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
972        pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
973        pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
974        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
975        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop. */
976        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
977        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
978        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
979        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
980        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
981    }
982
983    /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
984    /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
985    {
986        cvmx_npei_ctl_status2_t npei_ctl_status2;
987        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
988        npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
989        npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
990        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
991    }
992
993    /* Setup Mem access SubDID 12 to access Host memory */
994    {
995        cvmx_npei_mem_access_subidx_t mem_access_subid;
996        mem_access_subid.u64 = 0;
997        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
998        mem_access_subid.s.nmerge = 1;  /* Merging is allowed in this window. */
999        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1000        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1001        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1002        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1003        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1004        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1005        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1006        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1007    }
1008    return 0;
1009}
1010
1011
1012/**
1013 * Wait for posted PCIe read/writes to reach the other side of
1014 * the internal PCIe switch. This will insure that core
1015 * read/writes are posted before anything after this function
1016 * is called. This may be necessary when writing to memory that
1017 * will later be read using the DMA/PKT engines.
1018 *
1019 * @param pcie_port PCIe port to wait for
1020 */
1021void cvmx_pcie_wait_for_pending(int pcie_port)
1022{
1023    cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1024    int a;
1025    int b;
1026    int c;
1027
1028    /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1029        description of how this code works */
1030    npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1031    if (pcie_port)
1032    {
1033        if (!npei_data_out_cnt.s.p1_fcnt)
1034            return;
1035        a = npei_data_out_cnt.s.p1_ucnt;
1036        b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1037    }
1038    else
1039    {
1040        if (!npei_data_out_cnt.s.p0_fcnt)
1041            return;
1042        a = npei_data_out_cnt.s.p0_ucnt;
1043        b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1044    }
1045
1046    while (1)
1047    {
1048        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1049        c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1050        if (a<=b)
1051        {
1052            if ((c<a) || (c>b))
1053                return;
1054        }
1055        else
1056        {
1057            if ((c>b) && (c<a))
1058                return;
1059        }
1060    }
1061}
1062
1063