cvmx-pcie.c revision 215990
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Interface to PCIe as a host(RC) or target(EP)
50 *
51 * <hr>$Revision: 52004 $<hr>
52 */
53#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54#include <asm/octeon/cvmx.h>
55#include <asm/octeon/cvmx-config.h>
56#include <asm/octeon/cvmx-clock.h>
57#include <asm/octeon/cvmx-ciu-defs.h>
58#include <asm/octeon/cvmx-dpi-defs.h>
59#include <asm/octeon/cvmx-npi-defs.h>
60#include <asm/octeon/cvmx-npei-defs.h>
61#include <asm/octeon/cvmx-pci-defs.h>
62#include <asm/octeon/cvmx-pcieepx-defs.h>
63#include <asm/octeon/cvmx-pciercx-defs.h>
64#include <asm/octeon/cvmx-pemx-defs.h>
65#include <asm/octeon/cvmx-pexp-defs.h>
66#include <asm/octeon/cvmx-pescx-defs.h>
67#include <asm/octeon/cvmx-sli-defs.h>
68#include <asm/octeon/cvmx-sriox-defs.h>
69
70#ifdef CONFIG_CAVIUM_DECODE_RSL
71#include <asm/octeon/cvmx-error.h>
72#endif
73#include <asm/octeon/cvmx-helper.h>
74#include <asm/octeon/cvmx-helper-board.h>
75#include <asm/octeon/cvmx-helper-errata.h>
76#include <asm/octeon/cvmx-pcie.h>
77#include <asm/octeon/cvmx-sysinfo.h>
78#include <asm/octeon/cvmx-swap.h>
79#include <asm/octeon/cvmx-wqe.h>
80#else
81#include "cvmx.h"
82#include "cvmx-csr-db.h"
83#include "cvmx-pcie.h"
84#include "cvmx-sysinfo.h"
85#include "cvmx-swap.h"
86#include "cvmx-wqe.h"
87#include "cvmx-error.h"
88#include "cvmx-helper-errata.h"
89#endif
90
91#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
92#define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
93#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
94#define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
95
96/**
97 * Return the Core virtual base address for PCIe IO access. IOs are
98 * read/written as an offset from this address.
99 *
100 * @param pcie_port PCIe port the IO is for
101 *
102 * @return 64bit Octeon IO base address for read/write
103 */
104uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
105{
106    cvmx_pcie_address_t pcie_addr;
107    pcie_addr.u64 = 0;
108    pcie_addr.io.upper = 0;
109    pcie_addr.io.io = 1;
110    pcie_addr.io.did = 3;
111    pcie_addr.io.subdid = 2;
112    pcie_addr.io.es = 1;
113    pcie_addr.io.port = pcie_port;
114    return pcie_addr.u64;
115}
116
117
118/**
119 * Size of the IO address region returned at address
120 * cvmx_pcie_get_io_base_address()
121 *
122 * @param pcie_port PCIe port the IO is for
123 *
124 * @return Size of the IO window
125 */
126uint64_t cvmx_pcie_get_io_size(int pcie_port)
127{
128    return 1ull<<32;
129}
130
131
132/**
133 * Return the Core virtual base address for PCIe MEM access. Memory is
134 * read/written as an offset from this address.
135 *
136 * @param pcie_port PCIe port the IO is for
137 *
138 * @return 64bit Octeon IO base address for read/write
139 */
140uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
141{
142    cvmx_pcie_address_t pcie_addr;
143    pcie_addr.u64 = 0;
144    pcie_addr.mem.upper = 0;
145    pcie_addr.mem.io = 1;
146    pcie_addr.mem.did = 3;
147    pcie_addr.mem.subdid = 3 + pcie_port;
148    return pcie_addr.u64;
149}
150
151
152/**
153 * Size of the Mem address region returned at address
154 * cvmx_pcie_get_mem_base_address()
155 *
156 * @param pcie_port PCIe port the IO is for
157 *
158 * @return Size of the Mem window
159 */
160uint64_t cvmx_pcie_get_mem_size(int pcie_port)
161{
162    return 1ull<<36;
163}
164
165
166/**
167 * @INTERNAL
168 * Initialize the RC config space CSRs
169 *
170 * @param pcie_port PCIe port to initialize
171 */
172static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
173{
174    /* Max Payload Size (PCIE*_CFG030[MPS]) */
175    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
176    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
177    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
178    {
179        cvmx_pciercx_cfg030_t pciercx_cfg030;
180        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
181        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
182        {
183            pciercx_cfg030.s.mps = MPS_CN5XXX;
184            pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
185        }
186        else
187        {
188            pciercx_cfg030.s.mps = MPS_CN6XXX;
189            pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
190        }
191        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
192        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
193        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
194        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
195        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
196        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
197        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
198    }
199
200    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
201    {
202        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
203        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
204        cvmx_npei_ctl_status2_t npei_ctl_status2;
205        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
206        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
207        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
208        if (pcie_port)
209            npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
210        else
211            npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
212
213        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
214    }
215    else
216    {
217        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
218        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
219        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
220        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
221        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
222        prt_cfg.s.mps = MPS_CN6XXX;
223        prt_cfg.s.mrrs = MRRS_CN6XXX;
224        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
225
226        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
227        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
228        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
229    }
230
231    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
232    {
233        cvmx_pciercx_cfg070_t pciercx_cfg070;
234        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
235        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
236        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
237        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
238    }
239
240    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
241        /* ME and MSAE should always be set. */
242    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
243    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
244    {
245        cvmx_pciercx_cfg001_t pciercx_cfg001;
246        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
247        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
248        pciercx_cfg001.s.me = 1; /* Bus master enable. */
249        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
250        pciercx_cfg001.s.see = 1; /* SERR# enable */
251        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
252    }
253
254
255    /* Advanced Error Recovery Message Enables */
256    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
257    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
258    /* Use CVMX_PCIERCX_CFG067 hardware default */
259    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
260
261
262    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
263    {
264        cvmx_pciercx_cfg032_t pciercx_cfg032;
265        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
266        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
267        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
268    }
269
270    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
271    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
272    {
273        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
274        cvmx_pciercx_cfg006_t pciercx_cfg006;
275        pciercx_cfg006.u32 = 0;
276        pciercx_cfg006.s.pbnum = 1;
277        pciercx_cfg006.s.sbnum = 1;
278        pciercx_cfg006.s.subbnum = 1;
279        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
280    }
281
282    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
283    /* Most applications should disable the memory-mapped I/O BAR by */
284    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
285    {
286        cvmx_pciercx_cfg008_t pciercx_cfg008;
287        pciercx_cfg008.u32 = 0;
288        pciercx_cfg008.s.mb_addr = 0x100;
289        pciercx_cfg008.s.ml_addr = 0;
290        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
291    }
292
293    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
294    /* Most applications should disable the prefetchable BAR by setting */
295    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
296    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
297    {
298        cvmx_pciercx_cfg009_t pciercx_cfg009;
299        cvmx_pciercx_cfg010_t pciercx_cfg010;
300        cvmx_pciercx_cfg011_t pciercx_cfg011;
301        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
302        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
303        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
304        pciercx_cfg009.s.lmem_base = 0x100;
305        pciercx_cfg009.s.lmem_limit = 0;
306        pciercx_cfg010.s.umem_base = 0x100;
307        pciercx_cfg011.s.umem_limit = 0;
308        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
309        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
310        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
311    }
312
313    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
314    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
315    {
316        cvmx_pciercx_cfg035_t pciercx_cfg035;
317        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
318        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
319        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
320        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
321        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
322        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
323    }
324
325    /* Advanced Error Recovery Interrupt Enables */
326    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
327    {
328        cvmx_pciercx_cfg075_t pciercx_cfg075;
329        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
330        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
331        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
332        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
333        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
334    }
335
336    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
337    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
338    {
339        cvmx_pciercx_cfg034_t pciercx_cfg034;
340        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
341        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
342        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
343        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
344        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
345    }
346}
347
348/**
349 * @INTERNAL
350 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
351 * port from reset to a link up state. Software can then begin
352 * configuring the rest of the link.
353 *
354 * @param pcie_port PCIe port to initialize
355 *
356 * @return Zero on success
357 */
358static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
359{
360    uint64_t start_cycle;
361    cvmx_pescx_ctl_status_t pescx_ctl_status;
362    cvmx_pciercx_cfg452_t pciercx_cfg452;
363    cvmx_pciercx_cfg032_t pciercx_cfg032;
364    cvmx_pciercx_cfg448_t pciercx_cfg448;
365
366    /* Set the lane width */
367    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
368    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
369    if (pescx_ctl_status.s.qlm_cfg == 0)
370    {
371        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
372        pciercx_cfg452.s.lme = 0xf;
373    }
374    else
375    {
376        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
377        pciercx_cfg452.s.lme = 0x7;
378    }
379    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
380
381    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
382        cause bus errors on 64bit memory reads. Turning off length error
383        checking fixes this */
384    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
385    {
386        cvmx_pciercx_cfg455_t pciercx_cfg455;
387        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
388        pciercx_cfg455.s.m_cpl_len_err = 1;
389        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
390    }
391
392    /* Lane swap needs to be manually enabled for CN52XX */
393    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
394    {
395      pescx_ctl_status.s.lane_swp = 1;
396      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
397    }
398
399    /* Bring up the link */
400    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
401    pescx_ctl_status.s.lnk_enb = 1;
402    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
403
404    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
405    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
406        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
407
408    /* Wait for the link to come up */
409    start_cycle = cvmx_get_cycle();
410    do
411    {
412        if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
413        {
414            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
415            return -1;
416        }
417        cvmx_wait(10000);
418        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
419    } while (pciercx_cfg032.s.dlla == 0);
420
421    /* Clear all pending errors */
422    cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
423
424    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
425        little longer to respond than expected under load. As a workaround for
426        this we configure the Replay Time Limit to the value expected for a 512
427        byte MPS instead of our actual 256 byte MPS. The numbers below are
428        directly from the PCIe spec table 3-4 */
429    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
430    switch (pciercx_cfg032.s.nlw)
431    {
432        case 1: /* 1 lane */
433            pciercx_cfg448.s.rtl = 1677;
434            break;
435        case 2: /* 2 lanes */
436            pciercx_cfg448.s.rtl = 867;
437            break;
438        case 4: /* 4 lanes */
439            pciercx_cfg448.s.rtl = 462;
440            break;
441        case 8: /* 8 lanes */
442            pciercx_cfg448.s.rtl = 258;
443            break;
444    }
445    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
446
447    return 0;
448}
449
450
451/**
452 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
453 * the bus.
454 *
455 * @param pcie_port PCIe port to initialize
456 *
457 * @return Zero on success
458 */
459static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
460{
461    int i;
462    int base;
463    uint64_t addr_swizzle;
464    cvmx_ciu_soft_prst_t ciu_soft_prst;
465    cvmx_pescx_bist_status_t pescx_bist_status;
466    cvmx_pescx_bist_status2_t pescx_bist_status2;
467    cvmx_npei_ctl_status_t npei_ctl_status;
468    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
469    cvmx_npei_mem_access_subidx_t mem_access_subid;
470    cvmx_npei_dbg_data_t npei_dbg_data;
471    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
472    cvmx_pciercx_cfg032_t pciercx_cfg032;
473    cvmx_npei_bar1_indexx_t bar1_index;
474
475retry:
476    /* Make sure we aren't trying to setup a target mode interface in host mode */
477    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
478    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
479    {
480        cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
481        return -1;
482    }
483
484    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
485    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
486    {
487        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
488        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
489        {
490            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
491            return -1;
492        }
493    }
494
495    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
496    npei_ctl_status.s.arb = 1;
497    /* Allow up to 0x20 config retries */
498    npei_ctl_status.s.cfg_rtry = 0x20;
499    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
500    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
501    {
502        npei_ctl_status.s.p0_ntags = 0x20;
503        npei_ctl_status.s.p1_ntags = 0x20;
504    }
505    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
506
507    /* Bring the PCIe out of reset */
508    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
509    {
510        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
511            workaround for this bug, we bring both PCIe ports out of reset at
512            the same time instead of on separate calls. So for port 0, we bring
513            both out of reset and do nothing on port 1 */
514        if (pcie_port == 0)
515        {
516            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
517            /* After a chip reset the PCIe will also be in reset. If it isn't,
518                most likely someone is trying to init it again without a proper
519                PCIe reset */
520            if (ciu_soft_prst.s.soft_prst == 0)
521            {
522		/* Reset the ports */
523		ciu_soft_prst.s.soft_prst = 1;
524		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
525		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
526		ciu_soft_prst.s.soft_prst = 1;
527		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
528		/* Wait until pcie resets the ports. */
529		cvmx_wait_usec(2000);
530            }
531            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
532            ciu_soft_prst.s.soft_prst = 0;
533            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
534            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
535            ciu_soft_prst.s.soft_prst = 0;
536            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
537        }
538    }
539    else
540    {
541        /* The normal case: The PCIe ports are completely separate and can be
542            brought out of reset independently */
543        if (pcie_port)
544            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
545        else
546            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
547        /* After a chip reset the PCIe will also be in reset. If it isn't,
548            most likely someone is trying to init it again without a proper
549            PCIe reset */
550        if (ciu_soft_prst.s.soft_prst == 0)
551        {
552	    /* Reset the port */
553	    ciu_soft_prst.s.soft_prst = 1;
554	    if (pcie_port)
555		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
556 	    else
557		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
558	    /* Wait until pcie resets the ports. */
559	    cvmx_wait_usec(2000);
560        }
561        if (pcie_port)
562        {
563            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
564            ciu_soft_prst.s.soft_prst = 0;
565            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
566        }
567        else
568        {
569            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
570            ciu_soft_prst.s.soft_prst = 0;
571            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
572        }
573    }
574
575    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
576       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
577    cvmx_wait(400000);
578
579    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
580        CN52XX, so we only probe it on newer chips */
581    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
582    {
583        /* Clear PCLK_RUN so we can check if the clock is running */
584        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
585        pescx_ctl_status2.s.pclk_run = 1;
586        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
587        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
588            us the clock is running */
589        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
590            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
591        {
592            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
593            return -1;
594        }
595    }
596
597    /* Check and make sure PCIe came out of reset. If it doesn't the board
598        probably hasn't wired the clocks up and the interface should be
599        skipped */
600    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
601    if (pescx_ctl_status2.s.pcierst)
602    {
603        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
604        return -1;
605    }
606
607    /* Check BIST2 status. If any bits are set skip this interface. This
608        is an attempt to catch PCIE-813 on pass 1 parts */
609    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
610    if (pescx_bist_status2.u64)
611    {
612        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
613        return -1;
614    }
615
616    /* Check BIST status */
617    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
618    if (pescx_bist_status.u64)
619        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
620
621    /* Initialize the config space CSRs */
622    __cvmx_pcie_rc_initialize_config_space(pcie_port);
623
624    /* Bring the link up */
625    if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
626    {
627        cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
628        return -1;
629    }
630
631    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
632    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
633    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
634    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
635    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
636
637    /* Setup Mem access SubDIDs */
638    mem_access_subid.u64 = 0;
639    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
640    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
641    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
642    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
643    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
644    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
645    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
646    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
647    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
648
649    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
650    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
651    {
652        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
653        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
654    }
655
656    /* Disable the peer to peer forwarding register. This must be setup
657        by the OS after it enumerates the bus and assigns addresses to the
658        PCIe busses */
659    for (i=0; i<4; i++)
660    {
661        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
662        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
663    }
664
665    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
666    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
667
668    /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
669    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
670
671    bar1_index.u32 = 0;
672    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
673    bar1_index.s.ca = 1;       /* Not Cached */
674    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
675    bar1_index.s.addr_v = 1;   /* Valid entry */
676
677    base = pcie_port ? 16 : 0;
678
679    /* Big endian swizzle for 32-bit PEXP_NCB register. */
680#ifdef __MIPSEB__
681    addr_swizzle = 4;
682#else
683    addr_swizzle = 0;
684#endif
685    for (i = 0; i < 16; i++) {
686        cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
687        base++;
688        /* 256MB / 16 >> 22 == 4 */
689        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
690    }
691
692    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
693        where they overlap. It also overlaps with the device addresses, so
694        make sure the peer to peer forwarding is set right */
695    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
696
697    /* Setup BAR2 attributes */
698    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
699    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
700    /* � WAIT_COM=0 will likely work for all applications. */
701    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
702    if (pcie_port)
703    {
704        cvmx_npei_ctl_port1_t npei_ctl_port;
705        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
706        npei_ctl_port.s.bar2_enb = 1;
707        npei_ctl_port.s.bar2_esx = 1;
708        npei_ctl_port.s.bar2_cax = 0;
709        npei_ctl_port.s.ptlp_ro = 1;
710        npei_ctl_port.s.ctlp_ro = 1;
711        npei_ctl_port.s.wait_com = 0;
712        npei_ctl_port.s.waitl_com = 0;
713        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
714    }
715    else
716    {
717        cvmx_npei_ctl_port0_t npei_ctl_port;
718        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
719        npei_ctl_port.s.bar2_enb = 1;
720        npei_ctl_port.s.bar2_esx = 1;
721        npei_ctl_port.s.bar2_cax = 0;
722        npei_ctl_port.s.ptlp_ro = 1;
723        npei_ctl_port.s.ctlp_ro = 1;
724        npei_ctl_port.s.wait_com = 0;
725        npei_ctl_port.s.waitl_com = 0;
726        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
727    }
728
729    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
730        TLP ordering to not be preserved after multiple PCIe port resets. This
731        code detects this fault and corrects it by aligning the TLP counters
732        properly. Another link reset is then performed. See PCIE-13340 */
733    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
734        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
735    {
736        cvmx_npei_dbg_data_t dbg_data;
737        int old_in_fif_p_count;
738        int in_fif_p_count;
739        int out_p_count;
740        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
741        int i;
742
743        /* Choose a write address of 1MB. It should be harmless as all bars
744            haven't been setup */
745        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
746
747        /* Make sure at least in_p_offset have been executed before we try and
748            read in_fif_p_count */
749        i = in_p_offset;
750        while (i--)
751        {
752            cvmx_write64_uint32(write_address, 0);
753            cvmx_wait(10000);
754        }
755
756        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
757            unstable sometimes so read it twice with a write between the reads.
758            This way we can tell the value is good as it will increment by one
759            due to the write */
760        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
761        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
762        do
763        {
764            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
765            old_in_fif_p_count = dbg_data.s.data & 0xff;
766            cvmx_write64_uint32(write_address, 0);
767            cvmx_wait(10000);
768            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
769            in_fif_p_count = dbg_data.s.data & 0xff;
770        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
771
772        /* Update in_fif_p_count for it's offset with respect to out_p_count */
773        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
774
775        /* Read the OUT_P_COUNT from the debug select */
776        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
777        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
778        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
779        out_p_count = (dbg_data.s.data>>1) & 0xff;
780
781        /* Check that the two counters are aligned */
782        if (out_p_count != in_fif_p_count)
783        {
784            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
785            while (in_fif_p_count != 0)
786            {
787                cvmx_write64_uint32(write_address, 0);
788                cvmx_wait(10000);
789                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
790            }
791            /* The EBH5200 board swapped the PCIe reset lines on the board. This
792                means we must bring both links down and up, which will cause the
793                PCIe0 to need alignment again. Lots of messages will be displayed,
794                but everything should work */
795            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
796                (pcie_port == 1))
797                cvmx_pcie_rc_initialize(0);
798            /* Rety bringing this port up */
799            goto retry;
800        }
801    }
802
803    /* Display the link status */
804    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
805    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
806
807    return 0;
808}
809
810
811/**
812 * @INTERNAL
813 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
814 * port from reset to a link up state. Software can then begin
815 * configuring the rest of the link.
816 *
817 * @param pcie_port PCIe port to initialize
818 *
819 * @return Zero on success
820 */
821static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
822{
823    uint64_t start_cycle;
824    cvmx_pemx_ctl_status_t pem_ctl_status;
825    cvmx_pciercx_cfg032_t pciercx_cfg032;
826    cvmx_pciercx_cfg448_t pciercx_cfg448;
827
828    /* Bring up the link */
829    pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
830    pem_ctl_status.s.lnk_enb = 1;
831    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
832
833    /* Wait for the link to come up */
834    start_cycle = cvmx_get_cycle();
835    do
836    {
837        if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
838            return -1;
839        cvmx_wait(10000);
840        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
841    } while (pciercx_cfg032.s.dlla == 0);
842
843    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
844        little longer to respond than expected under load. As a workaround for
845        this we configure the Replay Time Limit to the value expected for a 512
846        byte MPS instead of our actual 256 byte MPS. The numbers below are
847        directly from the PCIe spec table 3-4 */
848    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
849    switch (pciercx_cfg032.s.nlw)
850    {
851        case 1: /* 1 lane */
852            pciercx_cfg448.s.rtl = 1677;
853            break;
854        case 2: /* 2 lanes */
855            pciercx_cfg448.s.rtl = 867;
856            break;
857        case 4: /* 4 lanes */
858            pciercx_cfg448.s.rtl = 462;
859            break;
860        case 8: /* 8 lanes */
861            pciercx_cfg448.s.rtl = 258;
862            break;
863    }
864    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
865
866    return 0;
867}
868
869
870/**
871 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
872 * the bus.
873 *
874 * @param pcie_port PCIe port to initialize
875 *
876 * @return Zero on success
877 */
878static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
879{
880    int i;
881    cvmx_ciu_soft_prst_t ciu_soft_prst;
882    cvmx_mio_rst_ctlx_t mio_rst_ctl;
883    cvmx_pemx_bar_ctl_t pemx_bar_ctl;
884    cvmx_pemx_ctl_status_t pemx_ctl_status;
885    cvmx_pemx_bist_status_t pemx_bist_status;
886    cvmx_pemx_bist_status2_t pemx_bist_status2;
887    cvmx_pciercx_cfg032_t pciercx_cfg032;
888    cvmx_pciercx_cfg515_t pciercx_cfg515;
889    cvmx_sli_ctl_portx_t sli_ctl_portx;
890    cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
891    cvmx_sli_mem_access_subidx_t mem_access_subid;
892    cvmx_mio_rst_ctlx_t mio_rst_ctlx;
893    cvmx_sriox_status_reg_t sriox_status_reg;
894    cvmx_pemx_bar1_indexx_t bar1_index;
895
896    /* Make sure this interface isn't SRIO */
897    sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
898    if (sriox_status_reg.s.srio)
899    {
900        cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
901        return -1;
902    }
903
904    /* Make sure we aren't trying to setup a target mode interface in host mode */
905    mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
906    if (!mio_rst_ctl.s.host_mode)
907    {
908        cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
909        return -1;
910    }
911
912    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
913    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
914    {
915        if (pcie_port)
916        {
917            cvmx_ciu_qlm1_t ciu_qlm;
918            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
919            ciu_qlm.s.txbypass = 1;
920            ciu_qlm.s.txdeemph = 5;
921            ciu_qlm.s.txmargin = 0x17;
922            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
923        }
924        else
925        {
926            cvmx_ciu_qlm0_t ciu_qlm;
927            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
928            ciu_qlm.s.txbypass = 1;
929            ciu_qlm.s.txdeemph = 5;
930            ciu_qlm.s.txmargin = 0x17;
931            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
932        }
933    }
934
935    /* Bring the PCIe out of reset */
936    if (pcie_port)
937        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
938    else
939        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
940    /* After a chip reset the PCIe will also be in reset. If it isn't,
941        most likely someone is trying to init it again without a proper
942        PCIe reset */
943    if (ciu_soft_prst.s.soft_prst == 0)
944    {
945        /* Reset the port */
946        ciu_soft_prst.s.soft_prst = 1;
947        if (pcie_port)
948            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
949        else
950            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
951        /* Wait until pcie resets the ports. */
952        cvmx_wait_usec(2000);
953    }
954    if (pcie_port)
955    {
956        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
957        ciu_soft_prst.s.soft_prst = 0;
958        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
959    }
960    else
961    {
962        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
963        ciu_soft_prst.s.soft_prst = 0;
964        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
965    }
966
967    /* Wait for PCIe reset to complete */
968    cvmx_wait_usec(1000);
969
970    /* Check and make sure PCIe came out of reset. If it doesn't the board
971        probably hasn't wired the clocks up and the interface should be
972        skipped */
973    mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
974    if (!mio_rst_ctlx.s.rst_done)
975    {
976        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
977        return -1;
978    }
979
980    /* Check BIST status */
981    pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
982    if (pemx_bist_status.u64)
983        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
984    pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
985    if (pemx_bist_status2.u64)
986        cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
987
988    /* Initialize the config space CSRs */
989    __cvmx_pcie_rc_initialize_config_space(pcie_port);
990
991    /* Enable gen2 speed selection */
992    pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
993    pciercx_cfg515.s.dsc = 1;
994    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
995
996    /* Bring the link up */
997    if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
998    {
999        /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1000            gen2 and try again with only gen1 */
1001        cvmx_pciercx_cfg031_t pciercx_cfg031;
1002        pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1003        pciercx_cfg031.s.mls = 1;
1004        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32);
1005        if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1006        {
1007            cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1008            return -1;
1009        }
1010    }
1011
1012    /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1013    sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1014    sli_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
1015    sli_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
1016    cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1017
1018    /* Setup Mem access SubDIDs */
1019    mem_access_subid.u64 = 0;
1020    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1021    mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
1022    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
1023    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
1024    mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1025    mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1026    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1027
1028    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1029    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1030    {
1031        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1032        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
1033    }
1034
1035    /* Disable the peer to peer forwarding register. This must be setup
1036        by the OS after it enumerates the bus and assigns addresses to the
1037        PCIe busses */
1038    for (i=0; i<4; i++)
1039    {
1040        cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1041        cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1042    }
1043
1044    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1045    cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1046
1047    /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1048        where they overlap. It also overlaps with the device addresses, so
1049        make sure the peer to peer forwarding is set right */
1050    cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1051
1052    /* Setup BAR2 attributes */
1053    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1054    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
1055    /* � WAIT_COM=0 will likely work for all applications. */
1056    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1057    pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1058    pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1059    pemx_bar_ctl.s.bar2_enb = 1;
1060    pemx_bar_ctl.s.bar2_esx = 1;
1061    pemx_bar_ctl.s.bar2_cax = 0;
1062    cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1063    sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1064    sli_ctl_portx.s.ptlp_ro = 1;
1065    sli_ctl_portx.s.ctlp_ro = 1;
1066    sli_ctl_portx.s.wait_com = 0;
1067    sli_ctl_portx.s.waitl_com = 0;
1068    cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1069
1070    /* BAR1 follows BAR2 */
1071    cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1072
1073    bar1_index.u64 = 0;
1074    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1075    bar1_index.s.ca = 1;       /* Not Cached */
1076    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1077    bar1_index.s.addr_v = 1;   /* Valid entry */
1078
1079    for (i = 0; i < 16; i++) {
1080        cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1081        /* 256MB / 16 >> 22 == 4 */
1082        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1083    }
1084
1085    /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1086        clock */
1087    pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1088    pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1089    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1090
1091    /* Display the link status */
1092    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1093    cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1094
1095    return 0;
1096}
1097
1098/**
1099 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1100 *
1101 * @param pcie_port PCIe port to initialize
1102 *
1103 * @return Zero on success
1104 */
1105int cvmx_pcie_rc_initialize(int pcie_port)
1106{
1107    int result;
1108    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1109        result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1110    else
1111        result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1112#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1113    if (result == 0)
1114        cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1115#endif
1116    return result;
1117}
1118
1119
1120/**
1121 * Shutdown a PCIe port and put it in reset
1122 *
1123 * @param pcie_port PCIe port to shutdown
1124 *
1125 * @return Zero on success
1126 */
1127int cvmx_pcie_rc_shutdown(int pcie_port)
1128{
1129#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1130    cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1131#endif
1132    /* Wait for all pending operations to complete */
1133    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1134    {
1135        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1136            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1137    }
1138    else
1139    {
1140        if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1141            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1142    }
1143
1144    /* Force reset */
1145    if (pcie_port)
1146    {
1147        cvmx_ciu_soft_prst_t ciu_soft_prst;
1148        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1149        ciu_soft_prst.s.soft_prst = 1;
1150        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1151    }
1152    else
1153    {
1154        cvmx_ciu_soft_prst_t ciu_soft_prst;
1155        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1156        ciu_soft_prst.s.soft_prst = 1;
1157        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1158    }
1159    return 0;
1160}
1161
1162
1163/**
1164 * @INTERNAL
1165 * Build a PCIe config space request address for a device
1166 *
1167 * @param pcie_port PCIe port to access
1168 * @param bus       Sub bus
1169 * @param dev       Device ID
1170 * @param fn        Device sub function
1171 * @param reg       Register to access
1172 *
1173 * @return 64bit Octeon IO address
1174 */
1175static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1176{
1177    cvmx_pcie_address_t pcie_addr;
1178    cvmx_pciercx_cfg006_t pciercx_cfg006;
1179
1180    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1181    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1182        return 0;
1183
1184    pcie_addr.u64 = 0;
1185    pcie_addr.config.upper = 2;
1186    pcie_addr.config.io = 1;
1187    pcie_addr.config.did = 3;
1188    pcie_addr.config.subdid = 1;
1189    pcie_addr.config.es = 1;
1190    pcie_addr.config.port = pcie_port;
1191    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1192    pcie_addr.config.bus = bus;
1193    pcie_addr.config.dev = dev;
1194    pcie_addr.config.func = fn;
1195    pcie_addr.config.reg = reg;
1196    return pcie_addr.u64;
1197}
1198
1199
1200/**
1201 * Read 8bits from a Device's config space
1202 *
1203 * @param pcie_port PCIe port the device is on
1204 * @param bus       Sub bus
1205 * @param dev       Device ID
1206 * @param fn        Device sub function
1207 * @param reg       Register to access
1208 *
1209 * @return Result of the read
1210 */
1211uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1212{
1213    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1214    if (address)
1215        return cvmx_read64_uint8(address);
1216    else
1217        return 0xff;
1218}
1219
1220
1221/**
1222 * Read 16bits from a Device's config space
1223 *
1224 * @param pcie_port PCIe port the device is on
1225 * @param bus       Sub bus
1226 * @param dev       Device ID
1227 * @param fn        Device sub function
1228 * @param reg       Register to access
1229 *
1230 * @return Result of the read
1231 */
1232uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1233{
1234    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1235    if (address)
1236        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1237    else
1238        return 0xffff;
1239}
1240
1241
1242/**
1243 * Read 32bits from a Device's config space
1244 *
1245 * @param pcie_port PCIe port the device is on
1246 * @param bus       Sub bus
1247 * @param dev       Device ID
1248 * @param fn        Device sub function
1249 * @param reg       Register to access
1250 *
1251 * @return Result of the read
1252 */
1253uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1254{
1255    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1256    if (address)
1257        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1258    else
1259        return 0xffffffff;
1260}
1261
1262
1263/**
1264 * Write 8bits to a Device's config space
1265 *
1266 * @param pcie_port PCIe port the device is on
1267 * @param bus       Sub bus
1268 * @param dev       Device ID
1269 * @param fn        Device sub function
1270 * @param reg       Register to access
1271 * @param val       Value to write
1272 */
1273void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1274{
1275    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1276    if (address)
1277        cvmx_write64_uint8(address, val);
1278}
1279
1280
1281/**
1282 * Write 16bits to a Device's config space
1283 *
1284 * @param pcie_port PCIe port the device is on
1285 * @param bus       Sub bus
1286 * @param dev       Device ID
1287 * @param fn        Device sub function
1288 * @param reg       Register to access
1289 * @param val       Value to write
1290 */
1291void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1292{
1293    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1294    if (address)
1295        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1296}
1297
1298
1299/**
1300 * Write 32bits to a Device's config space
1301 *
1302 * @param pcie_port PCIe port the device is on
1303 * @param bus       Sub bus
1304 * @param dev       Device ID
1305 * @param fn        Device sub function
1306 * @param reg       Register to access
1307 * @param val       Value to write
1308 */
1309void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1310{
1311    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1312    if (address)
1313        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1314}
1315
1316
1317/**
1318 * Read a PCIe config space register indirectly. This is used for
1319 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1320 *
1321 * @param pcie_port  PCIe port to read from
1322 * @param cfg_offset Address to read
1323 *
1324 * @return Value read
1325 */
1326uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1327{
1328    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1329    {
1330        cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1331        pescx_cfg_rd.u64 = 0;
1332        pescx_cfg_rd.s.addr = cfg_offset;
1333        cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1334        pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1335        return pescx_cfg_rd.s.data;
1336    }
1337    else
1338    {
1339        cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1340        pemx_cfg_rd.u64 = 0;
1341        pemx_cfg_rd.s.addr = cfg_offset;
1342        cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1343        pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1344        return pemx_cfg_rd.s.data;
1345    }
1346}
1347
1348
1349/**
1350 * Write a PCIe config space register indirectly. This is used for
1351 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1352 *
1353 * @param pcie_port  PCIe port to write to
1354 * @param cfg_offset Address to write
1355 * @param val        Value to write
1356 */
1357void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1358{
1359    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1360    {
1361        cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1362        pescx_cfg_wr.u64 = 0;
1363        pescx_cfg_wr.s.addr = cfg_offset;
1364        pescx_cfg_wr.s.data = val;
1365        cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1366    }
1367    else
1368    {
1369        cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1370        pemx_cfg_wr.u64 = 0;
1371        pemx_cfg_wr.s.addr = cfg_offset;
1372        pemx_cfg_wr.s.data = val;
1373        cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1374    }
1375}
1376
1377
1378/**
1379 * Initialize a PCIe port for use in target(EP) mode.
1380 *
1381 * @param pcie_port PCIe port to initialize
1382 *
1383 * @return Zero on success
1384 */
1385int cvmx_pcie_ep_initialize(int pcie_port)
1386{
1387    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1388    {
1389        cvmx_npei_ctl_status_t npei_ctl_status;
1390        npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1391        if (npei_ctl_status.s.host_mode)
1392            return -1;
1393    }
1394    else
1395    {
1396        cvmx_mio_rst_ctlx_t mio_rst_ctl;
1397        mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1398        if (mio_rst_ctl.s.host_mode)
1399            return -1;
1400    }
1401
1402    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1403    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1404    {
1405        if (pcie_port)
1406        {
1407            cvmx_ciu_qlm1_t ciu_qlm;
1408            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1409            ciu_qlm.s.txbypass = 1;
1410            ciu_qlm.s.txdeemph = 5;
1411            ciu_qlm.s.txmargin = 0x17;
1412            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1413        }
1414        else
1415        {
1416            cvmx_ciu_qlm0_t ciu_qlm;
1417            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1418            ciu_qlm.s.txbypass = 1;
1419            ciu_qlm.s.txdeemph = 5;
1420            ciu_qlm.s.txmargin = 0x17;
1421            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1422        }
1423    }
1424
1425    /* Enable bus master and memory */
1426    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1427
1428    /* Max Payload Size (PCIE*_CFG030[MPS]) */
1429    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1430    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1431    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1432    {
1433        cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1434        pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1435        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1436        {
1437            pcieepx_cfg030.s.mps = MPS_CN5XXX;
1438            pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1439        }
1440        else
1441        {
1442            pcieepx_cfg030.s.mps = MPS_CN6XXX;
1443            pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1444        }
1445        pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1446        pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1447        pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1448        pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1449        pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1450        pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1451        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1452    }
1453
1454    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1455    {
1456        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1457        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1458        cvmx_npei_ctl_status2_t npei_ctl_status2;
1459        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1460        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1461        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1462        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1463    }
1464    else
1465    {
1466        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1467        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1468        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1469        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1470        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1471        prt_cfg.s.mps = MPS_CN6XXX;
1472        prt_cfg.s.mrrs = MRRS_CN6XXX;
1473        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1474
1475        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1476        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1477        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1478    }
1479
1480    /* Setup Mem access SubDID 12 to access Host memory */
1481    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1482    {
1483        cvmx_npei_mem_access_subidx_t mem_access_subid;
1484        mem_access_subid.u64 = 0;
1485        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1486        mem_access_subid.s.nmerge = 1;  /* Merging is not allowed in this window. */
1487        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1488        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1489        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1490        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1491        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1492        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1493        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1494        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1495    }
1496    else
1497    {
1498        cvmx_sli_mem_access_subidx_t mem_access_subid;
1499        mem_access_subid.u64 = 0;
1500        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1501        mem_access_subid.s.nmerge = 0;  /* Merging is allowed in this window. */
1502        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1503        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1504        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1505        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1506        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1507        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1508    }
1509    return 0;
1510}
1511
1512
1513/**
1514 * Wait for posted PCIe read/writes to reach the other side of
1515 * the internal PCIe switch. This will insure that core
1516 * read/writes are posted before anything after this function
1517 * is called. This may be necessary when writing to memory that
1518 * will later be read using the DMA/PKT engines.
1519 *
1520 * @param pcie_port PCIe port to wait for
1521 */
1522void cvmx_pcie_wait_for_pending(int pcie_port)
1523{
1524    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1525    {
1526        cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1527        int a;
1528        int b;
1529        int c;
1530
1531        /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1532            description of how this code works */
1533        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1534        if (pcie_port)
1535        {
1536            if (!npei_data_out_cnt.s.p1_fcnt)
1537                return;
1538            a = npei_data_out_cnt.s.p1_ucnt;
1539            b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1540        }
1541        else
1542        {
1543            if (!npei_data_out_cnt.s.p0_fcnt)
1544                return;
1545            a = npei_data_out_cnt.s.p0_ucnt;
1546            b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1547        }
1548
1549        while (1)
1550        {
1551            npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1552            c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1553            if (a<=b)
1554            {
1555                if ((c<a) || (c>b))
1556                    return;
1557            }
1558            else
1559            {
1560                if ((c>b) && (c<a))
1561                    return;
1562            }
1563        }
1564    }
1565    else
1566    {
1567        cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1568        int a;
1569        int b;
1570        int c;
1571
1572        sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1573        if (pcie_port)
1574        {
1575            if (!sli_data_out_cnt.s.p1_fcnt)
1576                return;
1577            a = sli_data_out_cnt.s.p1_ucnt;
1578            b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1579        }
1580        else
1581        {
1582            if (!sli_data_out_cnt.s.p0_fcnt)
1583                return;
1584            a = sli_data_out_cnt.s.p0_ucnt;
1585            b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1586        }
1587
1588        while (1)
1589        {
1590            sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1591            c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
1592            if (a<=b)
1593            {
1594                if ((c<a) || (c>b))
1595                    return;
1596            }
1597            else
1598            {
1599                if ((c>b) && (c<a))
1600                    return;
1601            }
1602        }
1603    }
1604}
1605