cvmx-pcie.c revision 232812
1/***********************license start***************
2 * Copyright (c) 2003-2011  Cavium, Inc. <support@cavium.com>.  All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40/**
41 * @file
42 *
43 * Interface to PCIe as a host(RC) or target(EP)
44 *
45 * <hr>$Revision: 70030 $<hr>
46 */
47#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
48#include <asm/octeon/cvmx.h>
49#include <asm/octeon/cvmx-config.h>
50#include <asm/octeon/cvmx-clock.h>
51#include <asm/octeon/cvmx-ciu-defs.h>
52#include <asm/octeon/cvmx-dpi-defs.h>
53#include <asm/octeon/cvmx-mio-defs.h>
54#include <asm/octeon/cvmx-npi-defs.h>
55#include <asm/octeon/cvmx-npei-defs.h>
56#include <asm/octeon/cvmx-pci-defs.h>
57#include <asm/octeon/cvmx-pcieepx-defs.h>
58#include <asm/octeon/cvmx-pciercx-defs.h>
59#include <asm/octeon/cvmx-pemx-defs.h>
60#include <asm/octeon/cvmx-pexp-defs.h>
61#include <asm/octeon/cvmx-pescx-defs.h>
62#include <asm/octeon/cvmx-sli-defs.h>
63#include <asm/octeon/cvmx-sriox-defs.h>
64#include <asm/octeon/cvmx-helper-jtag.h>
65
66#ifdef CONFIG_CAVIUM_DECODE_RSL
67#include <asm/octeon/cvmx-error.h>
68#endif
69#include <asm/octeon/cvmx-helper.h>
70#include <asm/octeon/cvmx-helper-board.h>
71#include <asm/octeon/cvmx-helper-errata.h>
72#include <asm/octeon/cvmx-qlm.h>
73#include <asm/octeon/cvmx-pcie.h>
74#include <asm/octeon/cvmx-sysinfo.h>
75#include <asm/octeon/cvmx-swap.h>
76#include <asm/octeon/cvmx-wqe.h>
77#else
78#include "cvmx.h"
79#include "cvmx-csr-db.h"
80#include "cvmx-pcie.h"
81#include "cvmx-sysinfo.h"
82#include "cvmx-swap.h"
83#include "cvmx-wqe.h"
84#include "cvmx-error.h"
85#include "cvmx-helper-errata.h"
86#include "cvmx-qlm.h"
87#endif
88
89#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
90#define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
91#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
92#define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
93
94/**
95 * Return the Core virtual base address for PCIe IO access. IOs are
96 * read/written as an offset from this address.
97 *
98 * @param pcie_port PCIe port the IO is for
99 *
100 * @return 64bit Octeon IO base address for read/write
101 */
102uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
103{
104    cvmx_pcie_address_t pcie_addr;
105    pcie_addr.u64 = 0;
106    pcie_addr.io.upper = 0;
107    pcie_addr.io.io = 1;
108    pcie_addr.io.did = 3;
109    pcie_addr.io.subdid = 2;
110    pcie_addr.io.es = 1;
111    pcie_addr.io.port = pcie_port;
112    return pcie_addr.u64;
113}
114
115
116/**
117 * Size of the IO address region returned at address
118 * cvmx_pcie_get_io_base_address()
119 *
120 * @param pcie_port PCIe port the IO is for
121 *
122 * @return Size of the IO window
123 */
124uint64_t cvmx_pcie_get_io_size(int pcie_port)
125{
126    return 1ull<<32;
127}
128
129
130/**
131 * Return the Core virtual base address for PCIe MEM access. Memory is
132 * read/written as an offset from this address.
133 *
134 * @param pcie_port PCIe port the IO is for
135 *
136 * @return 64bit Octeon IO base address for read/write
137 */
138uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
139{
140    cvmx_pcie_address_t pcie_addr;
141    pcie_addr.u64 = 0;
142    pcie_addr.mem.upper = 0;
143    pcie_addr.mem.io = 1;
144    pcie_addr.mem.did = 3;
145    pcie_addr.mem.subdid = 3 + pcie_port;
146    return pcie_addr.u64;
147}
148
149
150/**
151 * Size of the Mem address region returned at address
152 * cvmx_pcie_get_mem_base_address()
153 *
154 * @param pcie_port PCIe port the IO is for
155 *
156 * @return Size of the Mem window
157 */
158uint64_t cvmx_pcie_get_mem_size(int pcie_port)
159{
160    return 1ull<<36;
161}
162
163
164/**
165 * @INTERNAL
166 * Initialize the RC config space CSRs
167 *
168 * @param pcie_port PCIe port to initialize
169 */
170static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
171{
172    /* Max Payload Size (PCIE*_CFG030[MPS]) */
173    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
174    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
175    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
176    {
177        cvmx_pciercx_cfg030_t pciercx_cfg030;
178        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
179        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
180        {
181            pciercx_cfg030.s.mps = MPS_CN5XXX;
182            pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
183        }
184        else
185        {
186            pciercx_cfg030.s.mps = MPS_CN6XXX;
187            pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
188        }
189        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
190        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
191        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
192        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
193        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
194        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
195        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
196    }
197
198    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
199    {
200        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
201        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
202        cvmx_npei_ctl_status2_t npei_ctl_status2;
203        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
204        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
205        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
206        if (pcie_port)
207            npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
208        else
209            npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
210
211        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
212    }
213    else
214    {
215        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
216        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
217        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
218        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
219        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
220        prt_cfg.s.mps = MPS_CN6XXX;
221        prt_cfg.s.mrrs = MRRS_CN6XXX;
222        /* Max outstanding load request. */
223        prt_cfg.s.molr = 32;
224        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
225
226        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
227        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
228        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
229    }
230
231    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
232    {
233        cvmx_pciercx_cfg070_t pciercx_cfg070;
234        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
235        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
236        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
237        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
238    }
239
240    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
241        /* ME and MSAE should always be set. */
242    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
243    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
244    {
245        cvmx_pciercx_cfg001_t pciercx_cfg001;
246        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
247        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
248        pciercx_cfg001.s.me = 1; /* Bus master enable. */
249        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
250        pciercx_cfg001.s.see = 1; /* SERR# enable */
251        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
252    }
253
254
255    /* Advanced Error Recovery Message Enables */
256    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
257    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
258    /* Use CVMX_PCIERCX_CFG067 hardware default */
259    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
260
261
262    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
263    {
264        cvmx_pciercx_cfg032_t pciercx_cfg032;
265        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
266        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
267        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
268    }
269
270    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
271    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
272    {
273        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
274        cvmx_pciercx_cfg006_t pciercx_cfg006;
275        pciercx_cfg006.u32 = 0;
276        pciercx_cfg006.s.pbnum = 1;
277        pciercx_cfg006.s.sbnum = 1;
278        pciercx_cfg006.s.subbnum = 1;
279        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
280    }
281
282    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
283    /* Most applications should disable the memory-mapped I/O BAR by */
284    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
285    {
286        cvmx_pciercx_cfg008_t pciercx_cfg008;
287        pciercx_cfg008.u32 = 0;
288        pciercx_cfg008.s.mb_addr = 0x100;
289        pciercx_cfg008.s.ml_addr = 0;
290        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
291    }
292
293    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
294    /* Most applications should disable the prefetchable BAR by setting */
295    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
296    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
297    {
298        cvmx_pciercx_cfg009_t pciercx_cfg009;
299        cvmx_pciercx_cfg010_t pciercx_cfg010;
300        cvmx_pciercx_cfg011_t pciercx_cfg011;
301        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
302        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
303        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
304        pciercx_cfg009.s.lmem_base = 0x100;
305        pciercx_cfg009.s.lmem_limit = 0;
306        pciercx_cfg010.s.umem_base = 0x100;
307        pciercx_cfg011.s.umem_limit = 0;
308        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
309        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
310        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
311    }
312
313    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
314    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
315    {
316        cvmx_pciercx_cfg035_t pciercx_cfg035;
317        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
318        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
319        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
320        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
321        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
322        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
323    }
324
325    /* Advanced Error Recovery Interrupt Enables */
326    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
327    {
328        cvmx_pciercx_cfg075_t pciercx_cfg075;
329        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
330        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
331        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
332        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
333        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
334    }
335
336    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
337    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
338    {
339        cvmx_pciercx_cfg034_t pciercx_cfg034;
340        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
341        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
342        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
343        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
344        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
345    }
346}
347
348/**
349 * @INTERNAL
350 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
351 * port from reset to a link up state. Software can then begin
352 * configuring the rest of the link.
353 *
354 * @param pcie_port PCIe port to initialize
355 *
356 * @return Zero on success
357 */
358static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
359{
360    uint64_t start_cycle;
361    cvmx_pescx_ctl_status_t pescx_ctl_status;
362    cvmx_pciercx_cfg452_t pciercx_cfg452;
363    cvmx_pciercx_cfg032_t pciercx_cfg032;
364    cvmx_pciercx_cfg448_t pciercx_cfg448;
365
366    /* Set the lane width */
367    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
368    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
369    if (pescx_ctl_status.s.qlm_cfg == 0)
370    {
371        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
372        pciercx_cfg452.s.lme = 0xf;
373    }
374    else
375    {
376        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
377        pciercx_cfg452.s.lme = 0x7;
378    }
379    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
380
381    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
382        cause bus errors on 64bit memory reads. Turning off length error
383        checking fixes this */
384    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
385    {
386        cvmx_pciercx_cfg455_t pciercx_cfg455;
387        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
388        pciercx_cfg455.s.m_cpl_len_err = 1;
389        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
390    }
391
392    /* Lane swap needs to be manually enabled for CN52XX */
393    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
394    {
395      switch (cvmx_sysinfo_get()->board_type)
396      {
397#if defined(OCTEON_VENDOR_LANNER)
398	case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
399	  break;
400#endif
401	default:
402	  pescx_ctl_status.s.lane_swp = 1;
403	  break;
404      }
405      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
406    }
407
408    /* Bring up the link */
409    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
410    pescx_ctl_status.s.lnk_enb = 1;
411    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
412
413    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
414    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
415        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
416
417    /* Wait for the link to come up */
418    start_cycle = cvmx_get_cycle();
419    do
420    {
421        if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
422        {
423            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
424            return -1;
425        }
426        cvmx_wait(10000);
427        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
428    } while (pciercx_cfg032.s.dlla == 0);
429
430    /* Clear all pending errors */
431    cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
432
433    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
434        little longer to respond than expected under load. As a workaround for
435        this we configure the Replay Time Limit to the value expected for a 512
436        byte MPS instead of our actual 256 byte MPS. The numbers below are
437        directly from the PCIe spec table 3-4 */
438    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
439    switch (pciercx_cfg032.s.nlw)
440    {
441        case 1: /* 1 lane */
442            pciercx_cfg448.s.rtl = 1677;
443            break;
444        case 2: /* 2 lanes */
445            pciercx_cfg448.s.rtl = 867;
446            break;
447        case 4: /* 4 lanes */
448            pciercx_cfg448.s.rtl = 462;
449            break;
450        case 8: /* 8 lanes */
451            pciercx_cfg448.s.rtl = 258;
452            break;
453    }
454    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
455
456    return 0;
457}
458
459static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas)
460{
461    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
462        pmas->cn68xx.ba++;
463    else
464        pmas->cn63xx.ba++;
465}
466
467/**
468 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
469 * the bus.
470 *
471 * @param pcie_port PCIe port to initialize
472 *
473 * @return Zero on success
474 */
475static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
476{
477    int i;
478    int base;
479    uint64_t addr_swizzle;
480    cvmx_ciu_soft_prst_t ciu_soft_prst;
481    cvmx_pescx_bist_status_t pescx_bist_status;
482    cvmx_pescx_bist_status2_t pescx_bist_status2;
483    cvmx_npei_ctl_status_t npei_ctl_status;
484    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
485    cvmx_npei_mem_access_subidx_t mem_access_subid;
486    cvmx_npei_dbg_data_t npei_dbg_data;
487    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
488    cvmx_pciercx_cfg032_t pciercx_cfg032;
489    cvmx_npei_bar1_indexx_t bar1_index;
490
491retry:
492    /* Make sure we aren't trying to setup a target mode interface in host mode */
493    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
494    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
495    {
496        cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
497        return -1;
498    }
499
500    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
501    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
502    {
503        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
504        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
505        {
506            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
507            return -1;
508        }
509    }
510
511    /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
512    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
513        cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
514        return -1;
515    }
516
517    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
518    npei_ctl_status.s.arb = 1;
519    /* Allow up to 0x20 config retries */
520    npei_ctl_status.s.cfg_rtry = 0x20;
521    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
522    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
523    {
524        npei_ctl_status.s.p0_ntags = 0x20;
525        npei_ctl_status.s.p1_ntags = 0x20;
526    }
527    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
528
529    /* Bring the PCIe out of reset */
530    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
531    {
532        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
533            workaround for this bug, we bring both PCIe ports out of reset at
534            the same time instead of on separate calls. So for port 0, we bring
535            both out of reset and do nothing on port 1 */
536        if (pcie_port == 0)
537        {
538            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
539            /* After a chip reset the PCIe will also be in reset. If it isn't,
540                most likely someone is trying to init it again without a proper
541                PCIe reset */
542            if (ciu_soft_prst.s.soft_prst == 0)
543            {
544		/* Reset the ports */
545		ciu_soft_prst.s.soft_prst = 1;
546		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
547		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
548		ciu_soft_prst.s.soft_prst = 1;
549		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
550		/* Wait until pcie resets the ports. */
551		cvmx_wait_usec(2000);
552            }
553            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
554            ciu_soft_prst.s.soft_prst = 0;
555            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
556            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
557            ciu_soft_prst.s.soft_prst = 0;
558            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
559        }
560    }
561    else
562    {
563        /* The normal case: The PCIe ports are completely separate and can be
564            brought out of reset independently */
565        if (pcie_port)
566            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
567        else
568            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
569        /* After a chip reset the PCIe will also be in reset. If it isn't,
570            most likely someone is trying to init it again without a proper
571            PCIe reset */
572        if (ciu_soft_prst.s.soft_prst == 0)
573        {
574	    /* Reset the port */
575	    ciu_soft_prst.s.soft_prst = 1;
576	    if (pcie_port)
577		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
578 	    else
579		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
580	    /* Wait until pcie resets the ports. */
581	    cvmx_wait_usec(2000);
582        }
583        if (pcie_port)
584        {
585            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
586            ciu_soft_prst.s.soft_prst = 0;
587            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
588        }
589        else
590        {
591            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
592            ciu_soft_prst.s.soft_prst = 0;
593            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
594        }
595    }
596
597    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
598       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
599    cvmx_wait(400000);
600
601    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
602        CN52XX, so we only probe it on newer chips */
603    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
604    {
605        /* Clear PCLK_RUN so we can check if the clock is running */
606        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
607        pescx_ctl_status2.s.pclk_run = 1;
608        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
609        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
610            us the clock is running */
611        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
612            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
613        {
614            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
615            return -1;
616        }
617    }
618
619    /* Check and make sure PCIe came out of reset. If it doesn't the board
620        probably hasn't wired the clocks up and the interface should be
621        skipped */
622    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
623    if (pescx_ctl_status2.s.pcierst)
624    {
625        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
626        return -1;
627    }
628
629    /* Check BIST2 status. If any bits are set skip this interface. This
630        is an attempt to catch PCIE-813 on pass 1 parts */
631    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
632    if (pescx_bist_status2.u64)
633    {
634        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
635        return -1;
636    }
637
638    /* Check BIST status */
639    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
640    if (pescx_bist_status.u64)
641        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
642
643    /* Initialize the config space CSRs */
644    __cvmx_pcie_rc_initialize_config_space(pcie_port);
645
646    /* Bring the link up */
647    if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
648    {
649        cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
650        return -1;
651    }
652
653    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
654    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
655    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
656    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
657    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
658
659    /* Setup Mem access SubDIDs */
660    mem_access_subid.u64 = 0;
661    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
662    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
663    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
664    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
665    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
666    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
667    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
668    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
669    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
670
671    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
672    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
673    {
674        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
675        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
676    }
677
678    /* Disable the peer to peer forwarding register. This must be setup
679        by the OS after it enumerates the bus and assigns addresses to the
680        PCIe busses */
681    for (i=0; i<4; i++)
682    {
683        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
684        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
685    }
686
687    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
688    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
689
690    /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
691    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
692
693    bar1_index.u32 = 0;
694    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
695    bar1_index.s.ca = 1;       /* Not Cached */
696    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
697    bar1_index.s.addr_v = 1;   /* Valid entry */
698
699    base = pcie_port ? 16 : 0;
700
701    /* Big endian swizzle for 32-bit PEXP_NCB register. */
702#ifdef __MIPSEB__
703    addr_swizzle = 4;
704#else
705    addr_swizzle = 0;
706#endif
707    for (i = 0; i < 16; i++) {
708        cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
709        base++;
710        /* 256MB / 16 >> 22 == 4 */
711        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
712    }
713
714    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
715        where they overlap. It also overlaps with the device addresses, so
716        make sure the peer to peer forwarding is set right */
717    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
718
719    /* Setup BAR2 attributes */
720    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
721    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
722    /* � WAIT_COM=0 will likely work for all applications. */
723    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
724    if (pcie_port)
725    {
726        cvmx_npei_ctl_port1_t npei_ctl_port;
727        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
728        npei_ctl_port.s.bar2_enb = 1;
729        npei_ctl_port.s.bar2_esx = 1;
730        npei_ctl_port.s.bar2_cax = 0;
731        npei_ctl_port.s.ptlp_ro = 1;
732        npei_ctl_port.s.ctlp_ro = 1;
733        npei_ctl_port.s.wait_com = 0;
734        npei_ctl_port.s.waitl_com = 0;
735        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
736    }
737    else
738    {
739        cvmx_npei_ctl_port0_t npei_ctl_port;
740        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
741        npei_ctl_port.s.bar2_enb = 1;
742        npei_ctl_port.s.bar2_esx = 1;
743        npei_ctl_port.s.bar2_cax = 0;
744        npei_ctl_port.s.ptlp_ro = 1;
745        npei_ctl_port.s.ctlp_ro = 1;
746        npei_ctl_port.s.wait_com = 0;
747        npei_ctl_port.s.waitl_com = 0;
748        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
749    }
750
751    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
752        TLP ordering to not be preserved after multiple PCIe port resets. This
753        code detects this fault and corrects it by aligning the TLP counters
754        properly. Another link reset is then performed. See PCIE-13340 */
755    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
756        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
757    {
758        cvmx_npei_dbg_data_t dbg_data;
759        int old_in_fif_p_count;
760        int in_fif_p_count;
761        int out_p_count;
762        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
763        int i;
764
765        /* Choose a write address of 1MB. It should be harmless as all bars
766            haven't been setup */
767        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
768
769        /* Make sure at least in_p_offset have been executed before we try and
770            read in_fif_p_count */
771        i = in_p_offset;
772        while (i--)
773        {
774            cvmx_write64_uint32(write_address, 0);
775            cvmx_wait(10000);
776        }
777
778        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
779            unstable sometimes so read it twice with a write between the reads.
780            This way we can tell the value is good as it will increment by one
781            due to the write */
782        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
783        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
784        do
785        {
786            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
787            old_in_fif_p_count = dbg_data.s.data & 0xff;
788            cvmx_write64_uint32(write_address, 0);
789            cvmx_wait(10000);
790            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
791            in_fif_p_count = dbg_data.s.data & 0xff;
792        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
793
794        /* Update in_fif_p_count for it's offset with respect to out_p_count */
795        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
796
797        /* Read the OUT_P_COUNT from the debug select */
798        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
799        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
800        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
801        out_p_count = (dbg_data.s.data>>1) & 0xff;
802
803        /* Check that the two counters are aligned */
804        if (out_p_count != in_fif_p_count)
805        {
806            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
807            while (in_fif_p_count != 0)
808            {
809                cvmx_write64_uint32(write_address, 0);
810                cvmx_wait(10000);
811                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
812            }
813            /* The EBH5200 board swapped the PCIe reset lines on the board. This
814                means we must bring both links down and up, which will cause the
815                PCIe0 to need alignment again. Lots of messages will be displayed,
816                but everything should work */
817            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
818                (pcie_port == 1))
819                cvmx_pcie_rc_initialize(0);
820            /* Rety bringing this port up */
821            goto retry;
822        }
823    }
824
825    /* Display the link status */
826    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
827    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
828
829    return 0;
830}
831
832/**
833 * @INTERNAL
834 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
835 * port from reset to a link up state. Software can then begin
836 * configuring the rest of the link.
837 *
838 * @param pcie_port PCIe port to initialize
839 *
840 * @return Zero on success
841 */
842static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
843{
844    uint64_t start_cycle;
845    cvmx_pemx_ctl_status_t pem_ctl_status;
846    cvmx_pciercx_cfg032_t pciercx_cfg032;
847    cvmx_pciercx_cfg448_t pciercx_cfg448;
848
849    /* Bring up the link */
850    pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
851    pem_ctl_status.s.lnk_enb = 1;
852    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
853
854    /* Wait for the link to come up */
855    start_cycle = cvmx_get_cycle();
856    do
857    {
858        if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
859            return -1;
860        cvmx_wait(10000);
861        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
862    } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
863
864    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
865        little longer to respond than expected under load. As a workaround for
866        this we configure the Replay Time Limit to the value expected for a 512
867        byte MPS instead of our actual 256 byte MPS. The numbers below are
868        directly from the PCIe spec table 3-4 */
869    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
870    switch (pciercx_cfg032.s.nlw)
871    {
872        case 1: /* 1 lane */
873            pciercx_cfg448.s.rtl = 1677;
874            break;
875        case 2: /* 2 lanes */
876            pciercx_cfg448.s.rtl = 867;
877            break;
878        case 4: /* 4 lanes */
879            pciercx_cfg448.s.rtl = 462;
880            break;
881        case 8: /* 8 lanes */
882            pciercx_cfg448.s.rtl = 258;
883            break;
884    }
885    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
886
887    return 0;
888}
889
890
891/**
892 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
893 * the bus.
894 *
895 * @param pcie_port PCIe port to initialize
896 *
897 * @return Zero on success
898 */
899static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
900{
901    int i;
902    cvmx_ciu_soft_prst_t ciu_soft_prst;
903    cvmx_mio_rst_ctlx_t mio_rst_ctl;
904    cvmx_pemx_bar_ctl_t pemx_bar_ctl;
905    cvmx_pemx_ctl_status_t pemx_ctl_status;
906    cvmx_pemx_bist_status_t pemx_bist_status;
907    cvmx_pemx_bist_status2_t pemx_bist_status2;
908    cvmx_pciercx_cfg032_t pciercx_cfg032;
909    cvmx_pciercx_cfg515_t pciercx_cfg515;
910    cvmx_sli_ctl_portx_t sli_ctl_portx;
911    cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
912    cvmx_sli_mem_access_subidx_t mem_access_subid;
913    cvmx_pemx_bar1_indexx_t bar1_index;
914    int ep_mode;
915
916    /* Make sure this interface is PCIe */
917    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
918    {
919        /* Requires reading the MIO_QLMX_CFG register to figure
920           out the port type. */
921        int qlm = pcie_port;
922        int status;
923        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
924            qlm = 3 - (pcie_port * 2);
925        else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
926        {
927            cvmx_mio_qlmx_cfg_t qlm_cfg;
928            qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
929            if (qlm_cfg.s.qlm_cfg == 1)
930                qlm = 1;
931        }
932        /* PCIe is allowed only in QLM1, 1 PCIe port in x2 or
933           2 PCIe ports in x1 */
934        else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
935            qlm = 1;
936        status = cvmx_qlm_get_status(qlm);
937        if (status == 4 || status == 5)
938        {
939            cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
940            return -1;
941        }
942        if (status == 1)
943        {
944            cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
945            return -1;
946        }
947        if (status == 2)
948        {
949            cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
950            return -1;
951        }
952        if (status == -1)
953        {
954            cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port);
955            return -1;
956        }
957    }
958
959#if 0
960    /* This code is so that the PCIe analyzer is able to see 63XX traffic */
961    cvmx_dprintf("PCIE : init for pcie analyzer.\n");
962    cvmx_helper_qlm_jtag_init();
963    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
964    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
965    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
966    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
967    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
968    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
969    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
970    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
971    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
972    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
973    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
974    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
975    cvmx_helper_qlm_jtag_update(pcie_port);
976#endif
977
978    /* Make sure we aren't trying to setup a target mode interface in host mode */
979    mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
980    ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode));
981    if (ep_mode)
982    {
983        cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
984        return -1;
985    }
986
987    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
988    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
989    {
990        if (pcie_port)
991        {
992            cvmx_ciu_qlm1_t ciu_qlm;
993            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
994            ciu_qlm.s.txbypass = 1;
995            ciu_qlm.s.txdeemph = 5;
996            ciu_qlm.s.txmargin = 0x17;
997            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
998        }
999        else
1000        {
1001            cvmx_ciu_qlm0_t ciu_qlm;
1002            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1003            ciu_qlm.s.txbypass = 1;
1004            ciu_qlm.s.txdeemph = 5;
1005            ciu_qlm.s.txmargin = 0x17;
1006            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1007        }
1008    }
1009    /* Bring the PCIe out of reset */
1010    if (pcie_port)
1011        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1012    else
1013        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1014    /* After a chip reset the PCIe will also be in reset. If it isn't,
1015        most likely someone is trying to init it again without a proper
1016        PCIe reset */
1017    if (ciu_soft_prst.s.soft_prst == 0)
1018    {
1019        /* Reset the port */
1020        ciu_soft_prst.s.soft_prst = 1;
1021        if (pcie_port)
1022            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1023        else
1024            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1025        /* Wait until pcie resets the ports. */
1026        cvmx_wait_usec(2000);
1027    }
1028    if (pcie_port)
1029    {
1030        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1031        ciu_soft_prst.s.soft_prst = 0;
1032        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1033    }
1034    else
1035    {
1036        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1037        ciu_soft_prst.s.soft_prst = 0;
1038        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1039    }
1040
1041    /* Wait for PCIe reset to complete */
1042    cvmx_wait_usec(1000);
1043
1044    /* Check and make sure PCIe came out of reset. If it doesn't the board
1045        probably hasn't wired the clocks up and the interface should be
1046        skipped */
1047    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000))
1048    {
1049        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
1050        return -1;
1051    }
1052
1053    /* Check BIST status */
1054    pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
1055    if (pemx_bist_status.u64)
1056        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
1057    pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
1058    /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
1059    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1060        pemx_bist_status2.u64 &= ~0x3full;
1061    if (pemx_bist_status2.u64)
1062        cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
1063
1064    /* Initialize the config space CSRs */
1065    __cvmx_pcie_rc_initialize_config_space(pcie_port);
1066
1067    /* Enable gen2 speed selection */
1068    pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1069    pciercx_cfg515.s.dsc = 1;
1070    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1071
1072    /* Bring the link up */
1073    if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1074    {
1075        /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1076            gen2 and try again with only gen1 */
1077        cvmx_pciercx_cfg031_t pciercx_cfg031;
1078        pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1079        pciercx_cfg031.s.mls = 1;
1080        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
1081        if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1082        {
1083            cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1084            return -1;
1085        }
1086    }
1087
1088    /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1089    sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1090    sli_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
1091    sli_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
1092    cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1093
1094    /* Setup Mem access SubDIDs */
1095    mem_access_subid.u64 = 0;
1096    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1097    mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
1098    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
1099    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
1100    mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1101    mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1102    /* PCIe Adddress Bits <63:34>. */
1103    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1104        mem_access_subid.cn68xx.ba = 0;
1105    else
1106        mem_access_subid.cn63xx.ba = 0;
1107
1108    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1109    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1110    {
1111        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1112        /* Set each SUBID to extend the addressable range */
1113	__cvmx_increment_ba(&mem_access_subid);
1114    }
1115
1116    if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
1117    {
1118        /* Disable the peer to peer forwarding register. This must be setup
1119            by the OS after it enumerates the bus and assigns addresses to the
1120            PCIe busses */
1121        for (i=0; i<4; i++)
1122        {
1123            cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1124            cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1125        }
1126    }
1127
1128    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1129    cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1130
1131    /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1132        where they overlap. It also overlaps with the device addresses, so
1133        make sure the peer to peer forwarding is set right */
1134    cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1135
1136    /* Setup BAR2 attributes */
1137    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1138    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
1139    /* � WAIT_COM=0 will likely work for all applications. */
1140    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1141    pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1142    pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1143    pemx_bar_ctl.s.bar2_enb = 1;
1144    pemx_bar_ctl.s.bar2_esx = 1;
1145    pemx_bar_ctl.s.bar2_cax = 0;
1146    cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1147    sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1148    sli_ctl_portx.s.ptlp_ro = 1;
1149    sli_ctl_portx.s.ctlp_ro = 1;
1150    sli_ctl_portx.s.wait_com = 0;
1151    sli_ctl_portx.s.waitl_com = 0;
1152    cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1153
1154    /* BAR1 follows BAR2 */
1155    cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1156
1157    bar1_index.u64 = 0;
1158    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1159    bar1_index.s.ca = 1;       /* Not Cached */
1160    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1161    bar1_index.s.addr_v = 1;   /* Valid entry */
1162
1163    for (i = 0; i < 16; i++) {
1164        cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1165        /* 256MB / 16 >> 22 == 4 */
1166        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1167    }
1168
1169    /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1170        clock */
1171    pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1172    pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1173    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1174
1175    /* Display the link status */
1176    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1177    cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1178
1179    return 0;
1180}
1181
1182/**
1183 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1184 *
1185 * @param pcie_port PCIe port to initialize
1186 *
1187 * @return Zero on success
1188 */
1189int cvmx_pcie_rc_initialize(int pcie_port)
1190{
1191    int result;
1192    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1193        result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1194    else
1195        result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1196#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1197    if (result == 0)
1198        cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1199#endif
1200    return result;
1201}
1202
1203
1204/**
1205 * Shutdown a PCIe port and put it in reset
1206 *
1207 * @param pcie_port PCIe port to shutdown
1208 *
1209 * @return Zero on success
1210 */
1211int cvmx_pcie_rc_shutdown(int pcie_port)
1212{
1213#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1214    cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1215#endif
1216    /* Wait for all pending operations to complete */
1217    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1218    {
1219        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1220            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1221    }
1222    else
1223    {
1224        if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1225            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1226    }
1227
1228    /* Force reset */
1229    if (pcie_port)
1230    {
1231        cvmx_ciu_soft_prst_t ciu_soft_prst;
1232        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1233        ciu_soft_prst.s.soft_prst = 1;
1234        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1235    }
1236    else
1237    {
1238        cvmx_ciu_soft_prst_t ciu_soft_prst;
1239        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1240        ciu_soft_prst.s.soft_prst = 1;
1241        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1242    }
1243    return 0;
1244}
1245
1246
1247/**
1248 * @INTERNAL
1249 * Build a PCIe config space request address for a device
1250 *
1251 * @param pcie_port PCIe port to access
1252 * @param bus       Sub bus
1253 * @param dev       Device ID
1254 * @param fn        Device sub function
1255 * @param reg       Register to access
1256 *
1257 * @return 64bit Octeon IO address
1258 */
1259static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1260{
1261    cvmx_pcie_address_t pcie_addr;
1262    cvmx_pciercx_cfg006_t pciercx_cfg006;
1263
1264    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1265    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1266        return 0;
1267
1268    pcie_addr.u64 = 0;
1269    pcie_addr.config.upper = 2;
1270    pcie_addr.config.io = 1;
1271    pcie_addr.config.did = 3;
1272    pcie_addr.config.subdid = 1;
1273    pcie_addr.config.es = 1;
1274    pcie_addr.config.port = pcie_port;
1275    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1276    pcie_addr.config.bus = bus;
1277    pcie_addr.config.dev = dev;
1278    pcie_addr.config.func = fn;
1279    pcie_addr.config.reg = reg;
1280    return pcie_addr.u64;
1281}
1282
1283
1284/**
1285 * Read 8bits from a Device's config space
1286 *
1287 * @param pcie_port PCIe port the device is on
1288 * @param bus       Sub bus
1289 * @param dev       Device ID
1290 * @param fn        Device sub function
1291 * @param reg       Register to access
1292 *
1293 * @return Result of the read
1294 */
1295uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1296{
1297    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1298    if (address)
1299        return cvmx_read64_uint8(address);
1300    else
1301        return 0xff;
1302}
1303
1304
1305/**
1306 * Read 16bits from a Device's config space
1307 *
1308 * @param pcie_port PCIe port the device is on
1309 * @param bus       Sub bus
1310 * @param dev       Device ID
1311 * @param fn        Device sub function
1312 * @param reg       Register to access
1313 *
1314 * @return Result of the read
1315 */
1316uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1317{
1318    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1319    if (address)
1320        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1321    else
1322        return 0xffff;
1323}
1324
1325
1326/**
1327 * Read 32bits from a Device's config space
1328 *
1329 * @param pcie_port PCIe port the device is on
1330 * @param bus       Sub bus
1331 * @param dev       Device ID
1332 * @param fn        Device sub function
1333 * @param reg       Register to access
1334 *
1335 * @return Result of the read
1336 */
1337uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1338{
1339    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1340    if (address)
1341        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1342    else
1343        return 0xffffffff;
1344}
1345
1346
1347/**
1348 * Write 8bits to a Device's config space
1349 *
1350 * @param pcie_port PCIe port the device is on
1351 * @param bus       Sub bus
1352 * @param dev       Device ID
1353 * @param fn        Device sub function
1354 * @param reg       Register to access
1355 * @param val       Value to write
1356 */
1357void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1358{
1359    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1360    if (address)
1361        cvmx_write64_uint8(address, val);
1362}
1363
1364
1365/**
1366 * Write 16bits to a Device's config space
1367 *
1368 * @param pcie_port PCIe port the device is on
1369 * @param bus       Sub bus
1370 * @param dev       Device ID
1371 * @param fn        Device sub function
1372 * @param reg       Register to access
1373 * @param val       Value to write
1374 */
1375void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1376{
1377    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1378    if (address)
1379        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1380}
1381
1382
1383/**
1384 * Write 32bits to a Device's config space
1385 *
1386 * @param pcie_port PCIe port the device is on
1387 * @param bus       Sub bus
1388 * @param dev       Device ID
1389 * @param fn        Device sub function
1390 * @param reg       Register to access
1391 * @param val       Value to write
1392 */
1393void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1394{
1395    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1396    if (address)
1397        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1398}
1399
1400
1401/**
1402 * Read a PCIe config space register indirectly. This is used for
1403 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1404 *
1405 * @param pcie_port  PCIe port to read from
1406 * @param cfg_offset Address to read
1407 *
1408 * @return Value read
1409 */
1410uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1411{
1412    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1413    {
1414        cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1415        pescx_cfg_rd.u64 = 0;
1416        pescx_cfg_rd.s.addr = cfg_offset;
1417        cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1418        pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1419        return pescx_cfg_rd.s.data;
1420    }
1421    else
1422    {
1423        cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1424        pemx_cfg_rd.u64 = 0;
1425        pemx_cfg_rd.s.addr = cfg_offset;
1426        cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1427        pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1428        return pemx_cfg_rd.s.data;
1429    }
1430}
1431
1432
1433/**
1434 * Write a PCIe config space register indirectly. This is used for
1435 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1436 *
1437 * @param pcie_port  PCIe port to write to
1438 * @param cfg_offset Address to write
1439 * @param val        Value to write
1440 */
1441void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1442{
1443    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1444    {
1445        cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1446        pescx_cfg_wr.u64 = 0;
1447        pescx_cfg_wr.s.addr = cfg_offset;
1448        pescx_cfg_wr.s.data = val;
1449        cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1450    }
1451    else
1452    {
1453        cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1454        pemx_cfg_wr.u64 = 0;
1455        pemx_cfg_wr.s.addr = cfg_offset;
1456        pemx_cfg_wr.s.data = val;
1457        cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1458    }
1459}
1460
1461
1462/**
1463 * Initialize a PCIe port for use in target(EP) mode.
1464 *
1465 * @param pcie_port PCIe port to initialize
1466 *
1467 * @return Zero on success
1468 */
1469int cvmx_pcie_ep_initialize(int pcie_port)
1470{
1471    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1472    {
1473        cvmx_npei_ctl_status_t npei_ctl_status;
1474        npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1475        if (npei_ctl_status.s.host_mode)
1476            return -1;
1477    }
1478    else
1479    {
1480        cvmx_mio_rst_ctlx_t mio_rst_ctl;
1481        int ep_mode;
1482        mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1483        ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode);
1484        if (ep_mode)
1485            return -1;
1486    }
1487
1488    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1489    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1490    {
1491        if (pcie_port)
1492        {
1493            cvmx_ciu_qlm1_t ciu_qlm;
1494            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1495            ciu_qlm.s.txbypass = 1;
1496            ciu_qlm.s.txdeemph = 5;
1497            ciu_qlm.s.txmargin = 0x17;
1498            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1499        }
1500        else
1501        {
1502            cvmx_ciu_qlm0_t ciu_qlm;
1503            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1504            ciu_qlm.s.txbypass = 1;
1505            ciu_qlm.s.txdeemph = 5;
1506            ciu_qlm.s.txmargin = 0x17;
1507            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1508        }
1509    }
1510
1511    /* Enable bus master and memory */
1512    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1513
1514    /* Max Payload Size (PCIE*_CFG030[MPS]) */
1515    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1516    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1517    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1518    {
1519        cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1520        pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1521        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1522        {
1523            pcieepx_cfg030.s.mps = MPS_CN5XXX;
1524            pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1525        }
1526        else
1527        {
1528            pcieepx_cfg030.s.mps = MPS_CN6XXX;
1529            pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1530        }
1531        pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1532        pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1533        pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1534        pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1535        pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1536        pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1537        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1538    }
1539
1540    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1541    {
1542        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1543        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1544        cvmx_npei_ctl_status2_t npei_ctl_status2;
1545        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1546        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1547        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1548        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1549    }
1550    else
1551    {
1552        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1553        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1554        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1555        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1556        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1557        prt_cfg.s.mps = MPS_CN6XXX;
1558        prt_cfg.s.mrrs = MRRS_CN6XXX;
1559        /* Max outstanding load request. */
1560        prt_cfg.s.molr = 32;
1561        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1562
1563        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1564        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1565        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1566    }
1567
1568    /* Setup Mem access SubDID 12 to access Host memory */
1569    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1570    {
1571        cvmx_npei_mem_access_subidx_t mem_access_subid;
1572        mem_access_subid.u64 = 0;
1573        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1574        mem_access_subid.s.nmerge = 1;  /* Merging is not allowed in this window. */
1575        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1576        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1577        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1578        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1579        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1580        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1581        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1582        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1583    }
1584    else
1585    {
1586        cvmx_sli_mem_access_subidx_t mem_access_subid;
1587        mem_access_subid.u64 = 0;
1588        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1589        mem_access_subid.s.nmerge = 0;  /* Merging is allowed in this window. */
1590        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1591        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1592        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1593        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1594        /* PCIe Adddress Bits <63:34>. */
1595        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1596            mem_access_subid.cn68xx.ba = 0;
1597        else
1598            mem_access_subid.cn63xx.ba = 0;
1599        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1600    }
1601    return 0;
1602}
1603
1604
1605/**
1606 * Wait for posted PCIe read/writes to reach the other side of
1607 * the internal PCIe switch. This will insure that core
1608 * read/writes are posted before anything after this function
1609 * is called. This may be necessary when writing to memory that
1610 * will later be read using the DMA/PKT engines.
1611 *
1612 * @param pcie_port PCIe port to wait for
1613 */
1614void cvmx_pcie_wait_for_pending(int pcie_port)
1615{
1616    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1617    {
1618        cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1619        int a;
1620        int b;
1621        int c;
1622
1623        /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1624            description of how this code works */
1625        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1626        if (pcie_port)
1627        {
1628            if (!npei_data_out_cnt.s.p1_fcnt)
1629                return;
1630            a = npei_data_out_cnt.s.p1_ucnt;
1631            b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1632        }
1633        else
1634        {
1635            if (!npei_data_out_cnt.s.p0_fcnt)
1636                return;
1637            a = npei_data_out_cnt.s.p0_ucnt;
1638            b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1639        }
1640
1641        while (1)
1642        {
1643            npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1644            c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1645            if (a<=b)
1646            {
1647                if ((c<a) || (c>b))
1648                    return;
1649            }
1650            else
1651            {
1652                if ((c>b) && (c<a))
1653                    return;
1654            }
1655        }
1656    }
1657    else
1658    {
1659        cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1660        int a;
1661        int b;
1662        int c;
1663
1664        sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1665        if (pcie_port)
1666        {
1667            if (!sli_data_out_cnt.s.p1_fcnt)
1668                return;
1669            a = sli_data_out_cnt.s.p1_ucnt;
1670            b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1671        }
1672        else
1673        {
1674            if (!sli_data_out_cnt.s.p0_fcnt)
1675                return;
1676            a = sli_data_out_cnt.s.p0_ucnt;
1677            b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1678        }
1679
1680        while (1)
1681        {
1682            sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1683            c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
1684            if (a<=b)
1685            {
1686                if ((c<a) || (c>b))
1687                    return;
1688            }
1689            else
1690            {
1691                if ((c>b) && (c<a))
1692                    return;
1693            }
1694        }
1695    }
1696}
1697