cvmx-pcie.c revision 232816
1/***********************license start***************
2 * Copyright (c) 2003-2011  Cavium, Inc. <support@cavium.com>.  All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40/**
41 * @file
42 *
43 * Interface to PCIe as a host(RC) or target(EP)
44 *
45 * <hr>$Revision: 70030 $<hr>
46 */
47#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
48#include <asm/octeon/cvmx.h>
49#include <asm/octeon/cvmx-config.h>
50#include <asm/octeon/cvmx-clock.h>
51#include <asm/octeon/cvmx-ciu-defs.h>
52#include <asm/octeon/cvmx-dpi-defs.h>
53#include <asm/octeon/cvmx-mio-defs.h>
54#include <asm/octeon/cvmx-npi-defs.h>
55#include <asm/octeon/cvmx-npei-defs.h>
56#include <asm/octeon/cvmx-pci-defs.h>
57#include <asm/octeon/cvmx-pcieepx-defs.h>
58#include <asm/octeon/cvmx-pciercx-defs.h>
59#include <asm/octeon/cvmx-pemx-defs.h>
60#include <asm/octeon/cvmx-pexp-defs.h>
61#include <asm/octeon/cvmx-pescx-defs.h>
62#include <asm/octeon/cvmx-sli-defs.h>
63#include <asm/octeon/cvmx-sriox-defs.h>
64#include <asm/octeon/cvmx-helper-jtag.h>
65
66#ifdef CONFIG_CAVIUM_DECODE_RSL
67#include <asm/octeon/cvmx-error.h>
68#endif
69#include <asm/octeon/cvmx-helper.h>
70#include <asm/octeon/cvmx-helper-board.h>
71#include <asm/octeon/cvmx-helper-errata.h>
72#include <asm/octeon/cvmx-qlm.h>
73#include <asm/octeon/cvmx-pcie.h>
74#include <asm/octeon/cvmx-sysinfo.h>
75#include <asm/octeon/cvmx-swap.h>
76#include <asm/octeon/cvmx-wqe.h>
77#else
78#include "cvmx.h"
79#include "cvmx-csr-db.h"
80#include "cvmx-pcie.h"
81#include "cvmx-sysinfo.h"
82#include "cvmx-swap.h"
83#include "cvmx-wqe.h"
84#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
85#include "cvmx-error.h"
86#endif
87#include "cvmx-helper-errata.h"
88#include "cvmx-qlm.h"
89#endif
90
91#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
92#define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
93#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
94#define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
95
96/**
97 * Return the Core virtual base address for PCIe IO access. IOs are
98 * read/written as an offset from this address.
99 *
100 * @param pcie_port PCIe port the IO is for
101 *
102 * @return 64bit Octeon IO base address for read/write
103 */
104uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
105{
106    cvmx_pcie_address_t pcie_addr;
107    pcie_addr.u64 = 0;
108    pcie_addr.io.upper = 0;
109    pcie_addr.io.io = 1;
110    pcie_addr.io.did = 3;
111    pcie_addr.io.subdid = 2;
112    pcie_addr.io.es = 1;
113    pcie_addr.io.port = pcie_port;
114    return pcie_addr.u64;
115}
116
117
118/**
119 * Size of the IO address region returned at address
120 * cvmx_pcie_get_io_base_address()
121 *
122 * @param pcie_port PCIe port the IO is for
123 *
124 * @return Size of the IO window
125 */
126uint64_t cvmx_pcie_get_io_size(int pcie_port)
127{
128    return 1ull<<32;
129}
130
131
132/**
133 * Return the Core virtual base address for PCIe MEM access. Memory is
134 * read/written as an offset from this address.
135 *
136 * @param pcie_port PCIe port the IO is for
137 *
138 * @return 64bit Octeon IO base address for read/write
139 */
140uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
141{
142    cvmx_pcie_address_t pcie_addr;
143    pcie_addr.u64 = 0;
144    pcie_addr.mem.upper = 0;
145    pcie_addr.mem.io = 1;
146    pcie_addr.mem.did = 3;
147    pcie_addr.mem.subdid = 3 + pcie_port;
148    return pcie_addr.u64;
149}
150
151
152/**
153 * Size of the Mem address region returned at address
154 * cvmx_pcie_get_mem_base_address()
155 *
156 * @param pcie_port PCIe port the IO is for
157 *
158 * @return Size of the Mem window
159 */
160uint64_t cvmx_pcie_get_mem_size(int pcie_port)
161{
162    return 1ull<<36;
163}
164
165
166/**
167 * @INTERNAL
168 * Initialize the RC config space CSRs
169 *
170 * @param pcie_port PCIe port to initialize
171 */
172static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
173{
174    /* Max Payload Size (PCIE*_CFG030[MPS]) */
175    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
176    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
177    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
178    {
179        cvmx_pciercx_cfg030_t pciercx_cfg030;
180        pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
181        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
182        {
183            pciercx_cfg030.s.mps = MPS_CN5XXX;
184            pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
185        }
186        else
187        {
188            pciercx_cfg030.s.mps = MPS_CN6XXX;
189            pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
190        }
191        pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
192        pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
193        pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
194        pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
195        pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
196        pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
197        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
198    }
199
200    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
201    {
202        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
203        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
204        cvmx_npei_ctl_status2_t npei_ctl_status2;
205        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
206        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
207        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
208        if (pcie_port)
209            npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
210        else
211            npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
212
213        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
214    }
215    else
216    {
217        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
218        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
219        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
220        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
221        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
222        prt_cfg.s.mps = MPS_CN6XXX;
223        prt_cfg.s.mrrs = MRRS_CN6XXX;
224        /* Max outstanding load request. */
225        prt_cfg.s.molr = 32;
226        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
227
228        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
229        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
230        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
231    }
232
233    /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
234    {
235        cvmx_pciercx_cfg070_t pciercx_cfg070;
236        pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
237        pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
238        pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
239        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
240    }
241
242    /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
243        /* ME and MSAE should always be set. */
244    /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
245    /* System Error Message Enable (PCIE*_CFG001[SEE]) */
246    {
247        cvmx_pciercx_cfg001_t pciercx_cfg001;
248        pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
249        pciercx_cfg001.s.msae = 1; /* Memory space enable. */
250        pciercx_cfg001.s.me = 1; /* Bus master enable. */
251        pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
252        pciercx_cfg001.s.see = 1; /* SERR# enable */
253        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
254    }
255
256
257    /* Advanced Error Recovery Message Enables */
258    /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
259    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
260    /* Use CVMX_PCIERCX_CFG067 hardware default */
261    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
262
263
264    /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
265    {
266        cvmx_pciercx_cfg032_t pciercx_cfg032;
267        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
268        pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
269        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
270    }
271
272    /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
273    /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
274    {
275        /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
276        cvmx_pciercx_cfg006_t pciercx_cfg006;
277        pciercx_cfg006.u32 = 0;
278        pciercx_cfg006.s.pbnum = 1;
279        pciercx_cfg006.s.sbnum = 1;
280        pciercx_cfg006.s.subbnum = 1;
281        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
282    }
283
284    /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
285    /* Most applications should disable the memory-mapped I/O BAR by */
286    /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
287    {
288        cvmx_pciercx_cfg008_t pciercx_cfg008;
289        pciercx_cfg008.u32 = 0;
290        pciercx_cfg008.s.mb_addr = 0x100;
291        pciercx_cfg008.s.ml_addr = 0;
292        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
293    }
294
295    /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
296    /* Most applications should disable the prefetchable BAR by setting */
297    /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
298    /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
299    {
300        cvmx_pciercx_cfg009_t pciercx_cfg009;
301        cvmx_pciercx_cfg010_t pciercx_cfg010;
302        cvmx_pciercx_cfg011_t pciercx_cfg011;
303        pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
304        pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
305        pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
306        pciercx_cfg009.s.lmem_base = 0x100;
307        pciercx_cfg009.s.lmem_limit = 0;
308        pciercx_cfg010.s.umem_base = 0x100;
309        pciercx_cfg011.s.umem_limit = 0;
310        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
311        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
312        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
313    }
314
315    /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
316    /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
317    {
318        cvmx_pciercx_cfg035_t pciercx_cfg035;
319        pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
320        pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
321        pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
322        pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
323        pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
324        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
325    }
326
327    /* Advanced Error Recovery Interrupt Enables */
328    /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
329    {
330        cvmx_pciercx_cfg075_t pciercx_cfg075;
331        pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
332        pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
333        pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
334        pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
335        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
336    }
337
338    /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
339    /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
340    {
341        cvmx_pciercx_cfg034_t pciercx_cfg034;
342        pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
343        pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
344        pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
345        pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
346        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
347    }
348}
349
350/**
351 * @INTERNAL
352 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
353 * port from reset to a link up state. Software can then begin
354 * configuring the rest of the link.
355 *
356 * @param pcie_port PCIe port to initialize
357 *
358 * @return Zero on success
359 */
360static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
361{
362    uint64_t start_cycle;
363    cvmx_pescx_ctl_status_t pescx_ctl_status;
364    cvmx_pciercx_cfg452_t pciercx_cfg452;
365    cvmx_pciercx_cfg032_t pciercx_cfg032;
366    cvmx_pciercx_cfg448_t pciercx_cfg448;
367
368    /* Set the lane width */
369    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
370    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
371    if (pescx_ctl_status.s.qlm_cfg == 0)
372    {
373        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
374        pciercx_cfg452.s.lme = 0xf;
375    }
376    else
377    {
378        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
379        pciercx_cfg452.s.lme = 0x7;
380    }
381    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
382
383    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
384        cause bus errors on 64bit memory reads. Turning off length error
385        checking fixes this */
386    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
387    {
388        cvmx_pciercx_cfg455_t pciercx_cfg455;
389        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
390        pciercx_cfg455.s.m_cpl_len_err = 1;
391        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
392    }
393
394    /* Lane swap needs to be manually enabled for CN52XX */
395    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
396    {
397      switch (cvmx_sysinfo_get()->board_type)
398      {
399#if defined(OCTEON_VENDOR_LANNER)
400	case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
401	  break;
402#endif
403	default:
404	  pescx_ctl_status.s.lane_swp = 1;
405	  break;
406      }
407      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
408    }
409
410    /* Bring up the link */
411    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
412    pescx_ctl_status.s.lnk_enb = 1;
413    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
414
415    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
416    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
417        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
418
419    /* Wait for the link to come up */
420    start_cycle = cvmx_get_cycle();
421    do
422    {
423        if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
424        {
425            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
426            return -1;
427        }
428        cvmx_wait(10000);
429        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
430    } while (pciercx_cfg032.s.dlla == 0);
431
432    /* Clear all pending errors */
433    cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
434
435    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
436        little longer to respond than expected under load. As a workaround for
437        this we configure the Replay Time Limit to the value expected for a 512
438        byte MPS instead of our actual 256 byte MPS. The numbers below are
439        directly from the PCIe spec table 3-4 */
440    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
441    switch (pciercx_cfg032.s.nlw)
442    {
443        case 1: /* 1 lane */
444            pciercx_cfg448.s.rtl = 1677;
445            break;
446        case 2: /* 2 lanes */
447            pciercx_cfg448.s.rtl = 867;
448            break;
449        case 4: /* 4 lanes */
450            pciercx_cfg448.s.rtl = 462;
451            break;
452        case 8: /* 8 lanes */
453            pciercx_cfg448.s.rtl = 258;
454            break;
455    }
456    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
457
458    return 0;
459}
460
461static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas)
462{
463    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
464        pmas->cn68xx.ba++;
465    else
466        pmas->cn63xx.ba++;
467}
468
469/**
470 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
471 * the bus.
472 *
473 * @param pcie_port PCIe port to initialize
474 *
475 * @return Zero on success
476 */
477static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
478{
479    int i;
480    int base;
481    uint64_t addr_swizzle;
482    cvmx_ciu_soft_prst_t ciu_soft_prst;
483    cvmx_pescx_bist_status_t pescx_bist_status;
484    cvmx_pescx_bist_status2_t pescx_bist_status2;
485    cvmx_npei_ctl_status_t npei_ctl_status;
486    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
487    cvmx_npei_mem_access_subidx_t mem_access_subid;
488    cvmx_npei_dbg_data_t npei_dbg_data;
489    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
490    cvmx_pciercx_cfg032_t pciercx_cfg032;
491    cvmx_npei_bar1_indexx_t bar1_index;
492
493retry:
494    /* Make sure we aren't trying to setup a target mode interface in host mode */
495    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
496    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
497    {
498        cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
499        return -1;
500    }
501
502    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
503    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
504    {
505        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
506        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
507        {
508            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
509            return -1;
510        }
511    }
512
513    /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
514    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
515        cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
516        return -1;
517    }
518
519    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
520    npei_ctl_status.s.arb = 1;
521    /* Allow up to 0x20 config retries */
522    npei_ctl_status.s.cfg_rtry = 0x20;
523    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
524    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
525    {
526        npei_ctl_status.s.p0_ntags = 0x20;
527        npei_ctl_status.s.p1_ntags = 0x20;
528    }
529    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
530
531    /* Bring the PCIe out of reset */
532    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
533    {
534        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
535            workaround for this bug, we bring both PCIe ports out of reset at
536            the same time instead of on separate calls. So for port 0, we bring
537            both out of reset and do nothing on port 1 */
538        if (pcie_port == 0)
539        {
540            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
541            /* After a chip reset the PCIe will also be in reset. If it isn't,
542                most likely someone is trying to init it again without a proper
543                PCIe reset */
544            if (ciu_soft_prst.s.soft_prst == 0)
545            {
546		/* Reset the ports */
547		ciu_soft_prst.s.soft_prst = 1;
548		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
549		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
550		ciu_soft_prst.s.soft_prst = 1;
551		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
552		/* Wait until pcie resets the ports. */
553		cvmx_wait_usec(2000);
554            }
555            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
556            ciu_soft_prst.s.soft_prst = 0;
557            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
558            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
559            ciu_soft_prst.s.soft_prst = 0;
560            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
561        }
562    }
563    else
564    {
565        /* The normal case: The PCIe ports are completely separate and can be
566            brought out of reset independently */
567        if (pcie_port)
568            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
569        else
570            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
571        /* After a chip reset the PCIe will also be in reset. If it isn't,
572            most likely someone is trying to init it again without a proper
573            PCIe reset */
574        if (ciu_soft_prst.s.soft_prst == 0)
575        {
576	    /* Reset the port */
577	    ciu_soft_prst.s.soft_prst = 1;
578	    if (pcie_port)
579		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
580 	    else
581		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
582	    /* Wait until pcie resets the ports. */
583	    cvmx_wait_usec(2000);
584        }
585        if (pcie_port)
586        {
587            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
588            ciu_soft_prst.s.soft_prst = 0;
589            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
590        }
591        else
592        {
593            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
594            ciu_soft_prst.s.soft_prst = 0;
595            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
596        }
597    }
598
599    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
600       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
601    cvmx_wait(400000);
602
603    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
604        CN52XX, so we only probe it on newer chips */
605    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
606    {
607        /* Clear PCLK_RUN so we can check if the clock is running */
608        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
609        pescx_ctl_status2.s.pclk_run = 1;
610        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
611        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
612            us the clock is running */
613        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
614            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
615        {
616            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
617            return -1;
618        }
619    }
620
621    /* Check and make sure PCIe came out of reset. If it doesn't the board
622        probably hasn't wired the clocks up and the interface should be
623        skipped */
624    pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
625    if (pescx_ctl_status2.s.pcierst)
626    {
627        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
628        return -1;
629    }
630
631    /* Check BIST2 status. If any bits are set skip this interface. This
632        is an attempt to catch PCIE-813 on pass 1 parts */
633    pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
634    if (pescx_bist_status2.u64)
635    {
636        cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
637        return -1;
638    }
639
640    /* Check BIST status */
641    pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
642    if (pescx_bist_status.u64)
643        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
644
645    /* Initialize the config space CSRs */
646    __cvmx_pcie_rc_initialize_config_space(pcie_port);
647
648    /* Bring the link up */
649    if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
650    {
651        cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
652        return -1;
653    }
654
655    /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
656    npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
657    npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
658    npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
659    cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
660
661    /* Setup Mem access SubDIDs */
662    mem_access_subid.u64 = 0;
663    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
664    mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
665    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
666    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
667    mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
668    mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
669    mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
670    mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
671    mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
672
673    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
674    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
675    {
676        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
677        mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
678    }
679
680    /* Disable the peer to peer forwarding register. This must be setup
681        by the OS after it enumerates the bus and assigns addresses to the
682        PCIe busses */
683    for (i=0; i<4; i++)
684    {
685        cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
686        cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
687    }
688
689    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
690    cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
691
692    /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
693    cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
694
695    bar1_index.u32 = 0;
696    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
697    bar1_index.s.ca = 1;       /* Not Cached */
698    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
699    bar1_index.s.addr_v = 1;   /* Valid entry */
700
701    base = pcie_port ? 16 : 0;
702
703    /* Big endian swizzle for 32-bit PEXP_NCB register. */
704#ifdef __MIPSEB__
705    addr_swizzle = 4;
706#else
707    addr_swizzle = 0;
708#endif
709    for (i = 0; i < 16; i++) {
710        cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
711        base++;
712        /* 256MB / 16 >> 22 == 4 */
713        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
714    }
715
716    /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
717        where they overlap. It also overlaps with the device addresses, so
718        make sure the peer to peer forwarding is set right */
719    cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
720
721    /* Setup BAR2 attributes */
722    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
723    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
724    /* � WAIT_COM=0 will likely work for all applications. */
725    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
726    if (pcie_port)
727    {
728        cvmx_npei_ctl_port1_t npei_ctl_port;
729        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
730        npei_ctl_port.s.bar2_enb = 1;
731        npei_ctl_port.s.bar2_esx = 1;
732        npei_ctl_port.s.bar2_cax = 0;
733        npei_ctl_port.s.ptlp_ro = 1;
734        npei_ctl_port.s.ctlp_ro = 1;
735        npei_ctl_port.s.wait_com = 0;
736        npei_ctl_port.s.waitl_com = 0;
737        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
738    }
739    else
740    {
741        cvmx_npei_ctl_port0_t npei_ctl_port;
742        npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
743        npei_ctl_port.s.bar2_enb = 1;
744        npei_ctl_port.s.bar2_esx = 1;
745        npei_ctl_port.s.bar2_cax = 0;
746        npei_ctl_port.s.ptlp_ro = 1;
747        npei_ctl_port.s.ctlp_ro = 1;
748        npei_ctl_port.s.wait_com = 0;
749        npei_ctl_port.s.waitl_com = 0;
750        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
751    }
752
753    /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
754        TLP ordering to not be preserved after multiple PCIe port resets. This
755        code detects this fault and corrects it by aligning the TLP counters
756        properly. Another link reset is then performed. See PCIE-13340 */
757    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
758        OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
759    {
760        cvmx_npei_dbg_data_t dbg_data;
761        int old_in_fif_p_count;
762        int in_fif_p_count;
763        int out_p_count;
764        int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
765        int i;
766
767        /* Choose a write address of 1MB. It should be harmless as all bars
768            haven't been setup */
769        uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
770
771        /* Make sure at least in_p_offset have been executed before we try and
772            read in_fif_p_count */
773        i = in_p_offset;
774        while (i--)
775        {
776            cvmx_write64_uint32(write_address, 0);
777            cvmx_wait(10000);
778        }
779
780        /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
781            unstable sometimes so read it twice with a write between the reads.
782            This way we can tell the value is good as it will increment by one
783            due to the write */
784        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
785        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
786        do
787        {
788            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
789            old_in_fif_p_count = dbg_data.s.data & 0xff;
790            cvmx_write64_uint32(write_address, 0);
791            cvmx_wait(10000);
792            dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
793            in_fif_p_count = dbg_data.s.data & 0xff;
794        } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
795
796        /* Update in_fif_p_count for it's offset with respect to out_p_count */
797        in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
798
799        /* Read the OUT_P_COUNT from the debug select */
800        cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
801        cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
802        dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
803        out_p_count = (dbg_data.s.data>>1) & 0xff;
804
805        /* Check that the two counters are aligned */
806        if (out_p_count != in_fif_p_count)
807        {
808            cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
809            while (in_fif_p_count != 0)
810            {
811                cvmx_write64_uint32(write_address, 0);
812                cvmx_wait(10000);
813                in_fif_p_count = (in_fif_p_count + 1) & 0xff;
814            }
815            /* The EBH5200 board swapped the PCIe reset lines on the board. This
816                means we must bring both links down and up, which will cause the
817                PCIe0 to need alignment again. Lots of messages will be displayed,
818                but everything should work */
819            if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
820                (pcie_port == 1))
821                cvmx_pcie_rc_initialize(0);
822            /* Rety bringing this port up */
823            goto retry;
824        }
825    }
826
827    /* Display the link status */
828    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
829    cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
830
831    return 0;
832}
833
834/**
835 * @INTERNAL
836 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
837 * port from reset to a link up state. Software can then begin
838 * configuring the rest of the link.
839 *
840 * @param pcie_port PCIe port to initialize
841 *
842 * @return Zero on success
843 */
844static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
845{
846    uint64_t start_cycle;
847    cvmx_pemx_ctl_status_t pem_ctl_status;
848    cvmx_pciercx_cfg032_t pciercx_cfg032;
849    cvmx_pciercx_cfg448_t pciercx_cfg448;
850
851    /* Bring up the link */
852    pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
853    pem_ctl_status.s.lnk_enb = 1;
854    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
855
856    /* Wait for the link to come up */
857    start_cycle = cvmx_get_cycle();
858    do
859    {
860        if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
861            return -1;
862        cvmx_wait(10000);
863        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
864    } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
865
866    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
867        little longer to respond than expected under load. As a workaround for
868        this we configure the Replay Time Limit to the value expected for a 512
869        byte MPS instead of our actual 256 byte MPS. The numbers below are
870        directly from the PCIe spec table 3-4 */
871    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
872    switch (pciercx_cfg032.s.nlw)
873    {
874        case 1: /* 1 lane */
875            pciercx_cfg448.s.rtl = 1677;
876            break;
877        case 2: /* 2 lanes */
878            pciercx_cfg448.s.rtl = 867;
879            break;
880        case 4: /* 4 lanes */
881            pciercx_cfg448.s.rtl = 462;
882            break;
883        case 8: /* 8 lanes */
884            pciercx_cfg448.s.rtl = 258;
885            break;
886    }
887    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
888
889    return 0;
890}
891
892
893/**
894 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
895 * the bus.
896 *
897 * @param pcie_port PCIe port to initialize
898 *
899 * @return Zero on success
900 */
901static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
902{
903    int i;
904    cvmx_ciu_soft_prst_t ciu_soft_prst;
905    cvmx_mio_rst_ctlx_t mio_rst_ctl;
906    cvmx_pemx_bar_ctl_t pemx_bar_ctl;
907    cvmx_pemx_ctl_status_t pemx_ctl_status;
908    cvmx_pemx_bist_status_t pemx_bist_status;
909    cvmx_pemx_bist_status2_t pemx_bist_status2;
910    cvmx_pciercx_cfg032_t pciercx_cfg032;
911    cvmx_pciercx_cfg515_t pciercx_cfg515;
912    cvmx_sli_ctl_portx_t sli_ctl_portx;
913    cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
914    cvmx_sli_mem_access_subidx_t mem_access_subid;
915    cvmx_pemx_bar1_indexx_t bar1_index;
916    int ep_mode;
917
918    /* Make sure this interface is PCIe */
919    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
920    {
921        /* Requires reading the MIO_QLMX_CFG register to figure
922           out the port type. */
923        int qlm = pcie_port;
924        int status;
925        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
926            qlm = 3 - (pcie_port * 2);
927        else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
928        {
929            cvmx_mio_qlmx_cfg_t qlm_cfg;
930            qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
931            if (qlm_cfg.s.qlm_cfg == 1)
932                qlm = 1;
933        }
934        /* PCIe is allowed only in QLM1, 1 PCIe port in x2 or
935           2 PCIe ports in x1 */
936        else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
937            qlm = 1;
938        status = cvmx_qlm_get_status(qlm);
939        if (status == 4 || status == 5)
940        {
941            cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
942            return -1;
943        }
944        if (status == 1)
945        {
946            cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
947            return -1;
948        }
949        if (status == 2)
950        {
951            cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
952            return -1;
953        }
954        if (status == -1)
955        {
956            cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port);
957            return -1;
958        }
959    }
960
961#if 0
962    /* This code is so that the PCIe analyzer is able to see 63XX traffic */
963    cvmx_dprintf("PCIE : init for pcie analyzer.\n");
964    cvmx_helper_qlm_jtag_init();
965    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
966    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
967    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
968    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
969    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
970    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
971    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
972    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
973    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
974    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
975    cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
976    cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
977    cvmx_helper_qlm_jtag_update(pcie_port);
978#endif
979
980    /* Make sure we aren't trying to setup a target mode interface in host mode */
981    mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
982    ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode));
983    if (ep_mode)
984    {
985        cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
986        return -1;
987    }
988
989    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
990    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
991    {
992        if (pcie_port)
993        {
994            cvmx_ciu_qlm1_t ciu_qlm;
995            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
996            ciu_qlm.s.txbypass = 1;
997            ciu_qlm.s.txdeemph = 5;
998            ciu_qlm.s.txmargin = 0x17;
999            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1000        }
1001        else
1002        {
1003            cvmx_ciu_qlm0_t ciu_qlm;
1004            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1005            ciu_qlm.s.txbypass = 1;
1006            ciu_qlm.s.txdeemph = 5;
1007            ciu_qlm.s.txmargin = 0x17;
1008            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1009        }
1010    }
1011    /* Bring the PCIe out of reset */
1012    if (pcie_port)
1013        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1014    else
1015        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1016    /* After a chip reset the PCIe will also be in reset. If it isn't,
1017        most likely someone is trying to init it again without a proper
1018        PCIe reset */
1019    if (ciu_soft_prst.s.soft_prst == 0)
1020    {
1021        /* Reset the port */
1022        ciu_soft_prst.s.soft_prst = 1;
1023        if (pcie_port)
1024            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1025        else
1026            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1027        /* Wait until pcie resets the ports. */
1028        cvmx_wait_usec(2000);
1029    }
1030    if (pcie_port)
1031    {
1032        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1033        ciu_soft_prst.s.soft_prst = 0;
1034        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1035    }
1036    else
1037    {
1038        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1039        ciu_soft_prst.s.soft_prst = 0;
1040        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1041    }
1042
1043    /* Wait for PCIe reset to complete */
1044    cvmx_wait_usec(1000);
1045
1046    /* Check and make sure PCIe came out of reset. If it doesn't the board
1047        probably hasn't wired the clocks up and the interface should be
1048        skipped */
1049    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000))
1050    {
1051        cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
1052        return -1;
1053    }
1054
1055    /* Check BIST status */
1056    pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
1057    if (pemx_bist_status.u64)
1058        cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
1059    pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
1060    /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
1061    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1062        pemx_bist_status2.u64 &= ~0x3full;
1063    if (pemx_bist_status2.u64)
1064        cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
1065
1066    /* Initialize the config space CSRs */
1067    __cvmx_pcie_rc_initialize_config_space(pcie_port);
1068
1069    /* Enable gen2 speed selection */
1070    pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1071    pciercx_cfg515.s.dsc = 1;
1072    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1073
1074    /* Bring the link up */
1075    if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1076    {
1077        /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1078            gen2 and try again with only gen1 */
1079        cvmx_pciercx_cfg031_t pciercx_cfg031;
1080        pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1081        pciercx_cfg031.s.mls = 1;
1082        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
1083        if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1084        {
1085            cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1086            return -1;
1087        }
1088    }
1089
1090    /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1091    sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1092    sli_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
1093    sli_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
1094    cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1095
1096    /* Setup Mem access SubDIDs */
1097    mem_access_subid.u64 = 0;
1098    mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1099    mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
1100    mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
1101    mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
1102    mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1103    mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1104    /* PCIe Adddress Bits <63:34>. */
1105    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1106        mem_access_subid.cn68xx.ba = 0;
1107    else
1108        mem_access_subid.cn63xx.ba = 0;
1109
1110    /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1111    for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1112    {
1113        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1114        /* Set each SUBID to extend the addressable range */
1115	__cvmx_increment_ba(&mem_access_subid);
1116    }
1117
1118    if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
1119    {
1120        /* Disable the peer to peer forwarding register. This must be setup
1121            by the OS after it enumerates the bus and assigns addresses to the
1122            PCIe busses */
1123        for (i=0; i<4; i++)
1124        {
1125            cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1126            cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1127        }
1128    }
1129
1130    /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1131    cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1132
1133    /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1134        where they overlap. It also overlaps with the device addresses, so
1135        make sure the peer to peer forwarding is set right */
1136    cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1137
1138    /* Setup BAR2 attributes */
1139    /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1140    /* � PTLP_RO,CTLP_RO should normally be set (except for debug). */
1141    /* � WAIT_COM=0 will likely work for all applications. */
1142    /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1143    pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1144    pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1145    pemx_bar_ctl.s.bar2_enb = 1;
1146    pemx_bar_ctl.s.bar2_esx = 1;
1147    pemx_bar_ctl.s.bar2_cax = 0;
1148    cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1149    sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1150    sli_ctl_portx.s.ptlp_ro = 1;
1151    sli_ctl_portx.s.ctlp_ro = 1;
1152    sli_ctl_portx.s.wait_com = 0;
1153    sli_ctl_portx.s.waitl_com = 0;
1154    cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1155
1156    /* BAR1 follows BAR2 */
1157    cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1158
1159    bar1_index.u64 = 0;
1160    bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1161    bar1_index.s.ca = 1;       /* Not Cached */
1162    bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1163    bar1_index.s.addr_v = 1;   /* Valid entry */
1164
1165    for (i = 0; i < 16; i++) {
1166        cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1167        /* 256MB / 16 >> 22 == 4 */
1168        bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1169    }
1170
1171    /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1172        clock */
1173    pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1174    pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1175    cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1176
1177    /* Display the link status */
1178    pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1179    cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1180
1181    return 0;
1182}
1183
1184/**
1185 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1186 *
1187 * @param pcie_port PCIe port to initialize
1188 *
1189 * @return Zero on success
1190 */
1191int cvmx_pcie_rc_initialize(int pcie_port)
1192{
1193    int result;
1194    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1195        result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1196    else
1197        result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1198#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
1199    if (result == 0)
1200        cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1201#endif
1202    return result;
1203}
1204
1205
1206/**
1207 * Shutdown a PCIe port and put it in reset
1208 *
1209 * @param pcie_port PCIe port to shutdown
1210 *
1211 * @return Zero on success
1212 */
1213int cvmx_pcie_rc_shutdown(int pcie_port)
1214{
1215#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
1216    cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1217#endif
1218    /* Wait for all pending operations to complete */
1219    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1220    {
1221        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1222            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1223    }
1224    else
1225    {
1226        if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1227            cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1228    }
1229
1230    /* Force reset */
1231    if (pcie_port)
1232    {
1233        cvmx_ciu_soft_prst_t ciu_soft_prst;
1234        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1235        ciu_soft_prst.s.soft_prst = 1;
1236        cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1237    }
1238    else
1239    {
1240        cvmx_ciu_soft_prst_t ciu_soft_prst;
1241        ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1242        ciu_soft_prst.s.soft_prst = 1;
1243        cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1244    }
1245    return 0;
1246}
1247
1248
1249/**
1250 * @INTERNAL
1251 * Build a PCIe config space request address for a device
1252 *
1253 * @param pcie_port PCIe port to access
1254 * @param bus       Sub bus
1255 * @param dev       Device ID
1256 * @param fn        Device sub function
1257 * @param reg       Register to access
1258 *
1259 * @return 64bit Octeon IO address
1260 */
1261static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1262{
1263    cvmx_pcie_address_t pcie_addr;
1264    cvmx_pciercx_cfg006_t pciercx_cfg006;
1265
1266    pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1267    if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1268        return 0;
1269
1270    pcie_addr.u64 = 0;
1271    pcie_addr.config.upper = 2;
1272    pcie_addr.config.io = 1;
1273    pcie_addr.config.did = 3;
1274    pcie_addr.config.subdid = 1;
1275    pcie_addr.config.es = 1;
1276    pcie_addr.config.port = pcie_port;
1277    pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1278    pcie_addr.config.bus = bus;
1279    pcie_addr.config.dev = dev;
1280    pcie_addr.config.func = fn;
1281    pcie_addr.config.reg = reg;
1282    return pcie_addr.u64;
1283}
1284
1285
1286/**
1287 * Read 8bits from a Device's config space
1288 *
1289 * @param pcie_port PCIe port the device is on
1290 * @param bus       Sub bus
1291 * @param dev       Device ID
1292 * @param fn        Device sub function
1293 * @param reg       Register to access
1294 *
1295 * @return Result of the read
1296 */
1297uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1298{
1299    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1300    if (address)
1301        return cvmx_read64_uint8(address);
1302    else
1303        return 0xff;
1304}
1305
1306
1307/**
1308 * Read 16bits from a Device's config space
1309 *
1310 * @param pcie_port PCIe port the device is on
1311 * @param bus       Sub bus
1312 * @param dev       Device ID
1313 * @param fn        Device sub function
1314 * @param reg       Register to access
1315 *
1316 * @return Result of the read
1317 */
1318uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1319{
1320    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1321    if (address)
1322        return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1323    else
1324        return 0xffff;
1325}
1326
1327
1328/**
1329 * Read 32bits from a Device's config space
1330 *
1331 * @param pcie_port PCIe port the device is on
1332 * @param bus       Sub bus
1333 * @param dev       Device ID
1334 * @param fn        Device sub function
1335 * @param reg       Register to access
1336 *
1337 * @return Result of the read
1338 */
1339uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1340{
1341    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1342    if (address)
1343        return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1344    else
1345        return 0xffffffff;
1346}
1347
1348
1349/**
1350 * Write 8bits to a Device's config space
1351 *
1352 * @param pcie_port PCIe port the device is on
1353 * @param bus       Sub bus
1354 * @param dev       Device ID
1355 * @param fn        Device sub function
1356 * @param reg       Register to access
1357 * @param val       Value to write
1358 */
1359void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1360{
1361    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1362    if (address)
1363        cvmx_write64_uint8(address, val);
1364}
1365
1366
1367/**
1368 * Write 16bits to a Device's config space
1369 *
1370 * @param pcie_port PCIe port the device is on
1371 * @param bus       Sub bus
1372 * @param dev       Device ID
1373 * @param fn        Device sub function
1374 * @param reg       Register to access
1375 * @param val       Value to write
1376 */
1377void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1378{
1379    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1380    if (address)
1381        cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1382}
1383
1384
1385/**
1386 * Write 32bits to a Device's config space
1387 *
1388 * @param pcie_port PCIe port the device is on
1389 * @param bus       Sub bus
1390 * @param dev       Device ID
1391 * @param fn        Device sub function
1392 * @param reg       Register to access
1393 * @param val       Value to write
1394 */
1395void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1396{
1397    uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1398    if (address)
1399        cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1400}
1401
1402
1403/**
1404 * Read a PCIe config space register indirectly. This is used for
1405 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1406 *
1407 * @param pcie_port  PCIe port to read from
1408 * @param cfg_offset Address to read
1409 *
1410 * @return Value read
1411 */
1412uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1413{
1414    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1415    {
1416        cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1417        pescx_cfg_rd.u64 = 0;
1418        pescx_cfg_rd.s.addr = cfg_offset;
1419        cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1420        pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1421        return pescx_cfg_rd.s.data;
1422    }
1423    else
1424    {
1425        cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1426        pemx_cfg_rd.u64 = 0;
1427        pemx_cfg_rd.s.addr = cfg_offset;
1428        cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1429        pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1430        return pemx_cfg_rd.s.data;
1431    }
1432}
1433
1434
1435/**
1436 * Write a PCIe config space register indirectly. This is used for
1437 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1438 *
1439 * @param pcie_port  PCIe port to write to
1440 * @param cfg_offset Address to write
1441 * @param val        Value to write
1442 */
1443void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1444{
1445    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1446    {
1447        cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1448        pescx_cfg_wr.u64 = 0;
1449        pescx_cfg_wr.s.addr = cfg_offset;
1450        pescx_cfg_wr.s.data = val;
1451        cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1452    }
1453    else
1454    {
1455        cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1456        pemx_cfg_wr.u64 = 0;
1457        pemx_cfg_wr.s.addr = cfg_offset;
1458        pemx_cfg_wr.s.data = val;
1459        cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1460    }
1461}
1462
1463
1464/**
1465 * Initialize a PCIe port for use in target(EP) mode.
1466 *
1467 * @param pcie_port PCIe port to initialize
1468 *
1469 * @return Zero on success
1470 */
1471int cvmx_pcie_ep_initialize(int pcie_port)
1472{
1473    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1474    {
1475        cvmx_npei_ctl_status_t npei_ctl_status;
1476        npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1477        if (npei_ctl_status.s.host_mode)
1478            return -1;
1479    }
1480    else
1481    {
1482        cvmx_mio_rst_ctlx_t mio_rst_ctl;
1483        int ep_mode;
1484        mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1485        ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode);
1486        if (ep_mode)
1487            return -1;
1488    }
1489
1490    /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1491    if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1492    {
1493        if (pcie_port)
1494        {
1495            cvmx_ciu_qlm1_t ciu_qlm;
1496            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1497            ciu_qlm.s.txbypass = 1;
1498            ciu_qlm.s.txdeemph = 5;
1499            ciu_qlm.s.txmargin = 0x17;
1500            cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1501        }
1502        else
1503        {
1504            cvmx_ciu_qlm0_t ciu_qlm;
1505            ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1506            ciu_qlm.s.txbypass = 1;
1507            ciu_qlm.s.txdeemph = 5;
1508            ciu_qlm.s.txmargin = 0x17;
1509            cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1510        }
1511    }
1512
1513    /* Enable bus master and memory */
1514    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1515
1516    /* Max Payload Size (PCIE*_CFG030[MPS]) */
1517    /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1518    /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1519    /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1520    {
1521        cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1522        pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1523        if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1524        {
1525            pcieepx_cfg030.s.mps = MPS_CN5XXX;
1526            pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1527        }
1528        else
1529        {
1530            pcieepx_cfg030.s.mps = MPS_CN6XXX;
1531            pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1532        }
1533        pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1534        pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1535        pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1536        pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1537        pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1538        pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1539        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1540    }
1541
1542    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1543    {
1544        /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1545        /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1546        cvmx_npei_ctl_status2_t npei_ctl_status2;
1547        npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1548        npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1549        npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1550        cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1551    }
1552    else
1553    {
1554        /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1555        /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1556        cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1557        cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1558        prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1559        prt_cfg.s.mps = MPS_CN6XXX;
1560        prt_cfg.s.mrrs = MRRS_CN6XXX;
1561        /* Max outstanding load request. */
1562        prt_cfg.s.molr = 32;
1563        cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1564
1565        sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1566        sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1567        cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1568    }
1569
1570    /* Setup Mem access SubDID 12 to access Host memory */
1571    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1572    {
1573        cvmx_npei_mem_access_subidx_t mem_access_subid;
1574        mem_access_subid.u64 = 0;
1575        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1576        mem_access_subid.s.nmerge = 1;  /* Merging is not allowed in this window. */
1577        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1578        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1579        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1580        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
1581        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
1582        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
1583        mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
1584        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1585    }
1586    else
1587    {
1588        cvmx_sli_mem_access_subidx_t mem_access_subid;
1589        mem_access_subid.u64 = 0;
1590        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1591        mem_access_subid.s.nmerge = 0;  /* Merging is allowed in this window. */
1592        mem_access_subid.s.esr = 0;     /* Endian-swap for Reads. */
1593        mem_access_subid.s.esw = 0;     /* Endian-swap for Writes. */
1594        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1595        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
1596        /* PCIe Adddress Bits <63:34>. */
1597        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1598            mem_access_subid.cn68xx.ba = 0;
1599        else
1600            mem_access_subid.cn63xx.ba = 0;
1601        cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1602    }
1603    return 0;
1604}
1605
1606
1607/**
1608 * Wait for posted PCIe read/writes to reach the other side of
1609 * the internal PCIe switch. This will insure that core
1610 * read/writes are posted before anything after this function
1611 * is called. This may be necessary when writing to memory that
1612 * will later be read using the DMA/PKT engines.
1613 *
1614 * @param pcie_port PCIe port to wait for
1615 */
1616void cvmx_pcie_wait_for_pending(int pcie_port)
1617{
1618    if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1619    {
1620        cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1621        int a;
1622        int b;
1623        int c;
1624
1625        /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1626            description of how this code works */
1627        npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1628        if (pcie_port)
1629        {
1630            if (!npei_data_out_cnt.s.p1_fcnt)
1631                return;
1632            a = npei_data_out_cnt.s.p1_ucnt;
1633            b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1634        }
1635        else
1636        {
1637            if (!npei_data_out_cnt.s.p0_fcnt)
1638                return;
1639            a = npei_data_out_cnt.s.p0_ucnt;
1640            b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1641        }
1642
1643        while (1)
1644        {
1645            npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1646            c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1647            if (a<=b)
1648            {
1649                if ((c<a) || (c>b))
1650                    return;
1651            }
1652            else
1653            {
1654                if ((c>b) && (c<a))
1655                    return;
1656            }
1657        }
1658    }
1659    else
1660    {
1661        cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1662        int a;
1663        int b;
1664        int c;
1665
1666        sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1667        if (pcie_port)
1668        {
1669            if (!sli_data_out_cnt.s.p1_fcnt)
1670                return;
1671            a = sli_data_out_cnt.s.p1_ucnt;
1672            b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1673        }
1674        else
1675        {
1676            if (!sli_data_out_cnt.s.p0_fcnt)
1677                return;
1678            a = sli_data_out_cnt.s.p0_ucnt;
1679            b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1680        }
1681
1682        while (1)
1683        {
1684            sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1685            c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
1686            if (a<=b)
1687            {
1688                if ((c<a) || (c>b))
1689                    return;
1690            }
1691            else
1692            {
1693                if ((c>b) && (c<a))
1694                    return;
1695            }
1696        }
1697    }
1698}
1699