1/***********************license start*************** 2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-ilk-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon ilk. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_ILK_DEFS_H__ 53#define __CVMX_ILK_DEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56#define CVMX_ILK_BIST_SUM CVMX_ILK_BIST_SUM_FUNC() 57static inline uint64_t CVMX_ILK_BIST_SUM_FUNC(void) 58{ 59 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 60 cvmx_warn("CVMX_ILK_BIST_SUM not supported on this chip\n"); 61 return CVMX_ADD_IO_SEG(0x0001180014000038ull); 62} 63#else 64#define CVMX_ILK_BIST_SUM (CVMX_ADD_IO_SEG(0x0001180014000038ull)) 65#endif 66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 67#define CVMX_ILK_GBL_CFG CVMX_ILK_GBL_CFG_FUNC() 68static inline uint64_t CVMX_ILK_GBL_CFG_FUNC(void) 69{ 70 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 71 cvmx_warn("CVMX_ILK_GBL_CFG not supported on this chip\n"); 72 return CVMX_ADD_IO_SEG(0x0001180014000000ull); 73} 74#else 75#define CVMX_ILK_GBL_CFG (CVMX_ADD_IO_SEG(0x0001180014000000ull)) 76#endif 77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 78#define CVMX_ILK_GBL_INT CVMX_ILK_GBL_INT_FUNC() 79static inline uint64_t CVMX_ILK_GBL_INT_FUNC(void) 80{ 81 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 82 cvmx_warn("CVMX_ILK_GBL_INT not supported on this chip\n"); 83 return CVMX_ADD_IO_SEG(0x0001180014000008ull); 84} 85#else 86#define CVMX_ILK_GBL_INT (CVMX_ADD_IO_SEG(0x0001180014000008ull)) 87#endif 88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 89#define CVMX_ILK_GBL_INT_EN CVMX_ILK_GBL_INT_EN_FUNC() 90static inline uint64_t CVMX_ILK_GBL_INT_EN_FUNC(void) 91{ 92 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 93 cvmx_warn("CVMX_ILK_GBL_INT_EN not supported on this chip\n"); 94 return CVMX_ADD_IO_SEG(0x0001180014000010ull); 95} 96#else 97#define CVMX_ILK_GBL_INT_EN (CVMX_ADD_IO_SEG(0x0001180014000010ull)) 98#endif 99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 100#define CVMX_ILK_INT_SUM CVMX_ILK_INT_SUM_FUNC() 101static inline uint64_t CVMX_ILK_INT_SUM_FUNC(void) 102{ 103 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 104 cvmx_warn("CVMX_ILK_INT_SUM not supported on this chip\n"); 105 return CVMX_ADD_IO_SEG(0x0001180014000030ull); 106} 107#else 108#define CVMX_ILK_INT_SUM (CVMX_ADD_IO_SEG(0x0001180014000030ull)) 109#endif 110#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 111#define CVMX_ILK_LNE_DBG CVMX_ILK_LNE_DBG_FUNC() 112static inline uint64_t CVMX_ILK_LNE_DBG_FUNC(void) 113{ 114 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 115 cvmx_warn("CVMX_ILK_LNE_DBG not supported on this chip\n"); 116 return CVMX_ADD_IO_SEG(0x0001180014030008ull); 117} 118#else 119#define CVMX_ILK_LNE_DBG (CVMX_ADD_IO_SEG(0x0001180014030008ull)) 120#endif 121#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 122#define CVMX_ILK_LNE_STS_MSG CVMX_ILK_LNE_STS_MSG_FUNC() 123static inline uint64_t CVMX_ILK_LNE_STS_MSG_FUNC(void) 124{ 125 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 126 cvmx_warn("CVMX_ILK_LNE_STS_MSG not supported on this chip\n"); 127 return CVMX_ADD_IO_SEG(0x0001180014030000ull); 128} 129#else 130#define CVMX_ILK_LNE_STS_MSG (CVMX_ADD_IO_SEG(0x0001180014030000ull)) 131#endif 132#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 133#define CVMX_ILK_RXF_IDX_PMAP CVMX_ILK_RXF_IDX_PMAP_FUNC() 134static inline uint64_t CVMX_ILK_RXF_IDX_PMAP_FUNC(void) 135{ 136 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 137 cvmx_warn("CVMX_ILK_RXF_IDX_PMAP not supported on this chip\n"); 138 return CVMX_ADD_IO_SEG(0x0001180014000020ull); 139} 140#else 141#define CVMX_ILK_RXF_IDX_PMAP (CVMX_ADD_IO_SEG(0x0001180014000020ull)) 142#endif 143#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 144#define CVMX_ILK_RXF_MEM_PMAP CVMX_ILK_RXF_MEM_PMAP_FUNC() 145static inline uint64_t CVMX_ILK_RXF_MEM_PMAP_FUNC(void) 146{ 147 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 148 cvmx_warn("CVMX_ILK_RXF_MEM_PMAP not supported on this chip\n"); 149 return CVMX_ADD_IO_SEG(0x0001180014000028ull); 150} 151#else 152#define CVMX_ILK_RXF_MEM_PMAP (CVMX_ADD_IO_SEG(0x0001180014000028ull)) 153#endif 154#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 155static inline uint64_t CVMX_ILK_RXX_CFG0(unsigned long offset) 156{ 157 if (!( 158 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 159 cvmx_warn("CVMX_ILK_RXX_CFG0(%lu) is invalid on this chip\n", offset); 160 return CVMX_ADD_IO_SEG(0x0001180014020000ull) + ((offset) & 1) * 16384; 161} 162#else 163#define CVMX_ILK_RXX_CFG0(offset) (CVMX_ADD_IO_SEG(0x0001180014020000ull) + ((offset) & 1) * 16384) 164#endif 165#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 166static inline uint64_t CVMX_ILK_RXX_CFG1(unsigned long offset) 167{ 168 if (!( 169 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 170 cvmx_warn("CVMX_ILK_RXX_CFG1(%lu) is invalid on this chip\n", offset); 171 return CVMX_ADD_IO_SEG(0x0001180014020008ull) + ((offset) & 1) * 16384; 172} 173#else 174#define CVMX_ILK_RXX_CFG1(offset) (CVMX_ADD_IO_SEG(0x0001180014020008ull) + ((offset) & 1) * 16384) 175#endif 176#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 177static inline uint64_t CVMX_ILK_RXX_FLOW_CTL0(unsigned long offset) 178{ 179 if (!( 180 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 181 cvmx_warn("CVMX_ILK_RXX_FLOW_CTL0(%lu) is invalid on this chip\n", offset); 182 return CVMX_ADD_IO_SEG(0x0001180014020090ull) + ((offset) & 1) * 16384; 183} 184#else 185#define CVMX_ILK_RXX_FLOW_CTL0(offset) (CVMX_ADD_IO_SEG(0x0001180014020090ull) + ((offset) & 1) * 16384) 186#endif 187#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 188static inline uint64_t CVMX_ILK_RXX_FLOW_CTL1(unsigned long offset) 189{ 190 if (!( 191 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 192 cvmx_warn("CVMX_ILK_RXX_FLOW_CTL1(%lu) is invalid on this chip\n", offset); 193 return CVMX_ADD_IO_SEG(0x0001180014020098ull) + ((offset) & 1) * 16384; 194} 195#else 196#define CVMX_ILK_RXX_FLOW_CTL1(offset) (CVMX_ADD_IO_SEG(0x0001180014020098ull) + ((offset) & 1) * 16384) 197#endif 198#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 199static inline uint64_t CVMX_ILK_RXX_IDX_CAL(unsigned long offset) 200{ 201 if (!( 202 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 203 cvmx_warn("CVMX_ILK_RXX_IDX_CAL(%lu) is invalid on this chip\n", offset); 204 return CVMX_ADD_IO_SEG(0x00011800140200A0ull) + ((offset) & 1) * 16384; 205} 206#else 207#define CVMX_ILK_RXX_IDX_CAL(offset) (CVMX_ADD_IO_SEG(0x00011800140200A0ull) + ((offset) & 1) * 16384) 208#endif 209#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 210static inline uint64_t CVMX_ILK_RXX_IDX_STAT0(unsigned long offset) 211{ 212 if (!( 213 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 214 cvmx_warn("CVMX_ILK_RXX_IDX_STAT0(%lu) is invalid on this chip\n", offset); 215 return CVMX_ADD_IO_SEG(0x0001180014020070ull) + ((offset) & 1) * 16384; 216} 217#else 218#define CVMX_ILK_RXX_IDX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020070ull) + ((offset) & 1) * 16384) 219#endif 220#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 221static inline uint64_t CVMX_ILK_RXX_IDX_STAT1(unsigned long offset) 222{ 223 if (!( 224 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 225 cvmx_warn("CVMX_ILK_RXX_IDX_STAT1(%lu) is invalid on this chip\n", offset); 226 return CVMX_ADD_IO_SEG(0x0001180014020078ull) + ((offset) & 1) * 16384; 227} 228#else 229#define CVMX_ILK_RXX_IDX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020078ull) + ((offset) & 1) * 16384) 230#endif 231#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 232static inline uint64_t CVMX_ILK_RXX_INT(unsigned long offset) 233{ 234 if (!( 235 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 236 cvmx_warn("CVMX_ILK_RXX_INT(%lu) is invalid on this chip\n", offset); 237 return CVMX_ADD_IO_SEG(0x0001180014020010ull) + ((offset) & 1) * 16384; 238} 239#else 240#define CVMX_ILK_RXX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014020010ull) + ((offset) & 1) * 16384) 241#endif 242#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 243static inline uint64_t CVMX_ILK_RXX_INT_EN(unsigned long offset) 244{ 245 if (!( 246 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 247 cvmx_warn("CVMX_ILK_RXX_INT_EN(%lu) is invalid on this chip\n", offset); 248 return CVMX_ADD_IO_SEG(0x0001180014020018ull) + ((offset) & 1) * 16384; 249} 250#else 251#define CVMX_ILK_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014020018ull) + ((offset) & 1) * 16384) 252#endif 253#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 254static inline uint64_t CVMX_ILK_RXX_JABBER(unsigned long offset) 255{ 256 if (!( 257 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 258 cvmx_warn("CVMX_ILK_RXX_JABBER(%lu) is invalid on this chip\n", offset); 259 return CVMX_ADD_IO_SEG(0x00011800140200B8ull) + ((offset) & 1) * 16384; 260} 261#else 262#define CVMX_ILK_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800140200B8ull) + ((offset) & 1) * 16384) 263#endif 264#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 265static inline uint64_t CVMX_ILK_RXX_MEM_CAL0(unsigned long offset) 266{ 267 if (!( 268 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 269 cvmx_warn("CVMX_ILK_RXX_MEM_CAL0(%lu) is invalid on this chip\n", offset); 270 return CVMX_ADD_IO_SEG(0x00011800140200A8ull) + ((offset) & 1) * 16384; 271} 272#else 273#define CVMX_ILK_RXX_MEM_CAL0(offset) (CVMX_ADD_IO_SEG(0x00011800140200A8ull) + ((offset) & 1) * 16384) 274#endif 275#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 276static inline uint64_t CVMX_ILK_RXX_MEM_CAL1(unsigned long offset) 277{ 278 if (!( 279 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 280 cvmx_warn("CVMX_ILK_RXX_MEM_CAL1(%lu) is invalid on this chip\n", offset); 281 return CVMX_ADD_IO_SEG(0x00011800140200B0ull) + ((offset) & 1) * 16384; 282} 283#else 284#define CVMX_ILK_RXX_MEM_CAL1(offset) (CVMX_ADD_IO_SEG(0x00011800140200B0ull) + ((offset) & 1) * 16384) 285#endif 286#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 287static inline uint64_t CVMX_ILK_RXX_MEM_STAT0(unsigned long offset) 288{ 289 if (!( 290 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 291 cvmx_warn("CVMX_ILK_RXX_MEM_STAT0(%lu) is invalid on this chip\n", offset); 292 return CVMX_ADD_IO_SEG(0x0001180014020080ull) + ((offset) & 1) * 16384; 293} 294#else 295#define CVMX_ILK_RXX_MEM_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020080ull) + ((offset) & 1) * 16384) 296#endif 297#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 298static inline uint64_t CVMX_ILK_RXX_MEM_STAT1(unsigned long offset) 299{ 300 if (!( 301 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 302 cvmx_warn("CVMX_ILK_RXX_MEM_STAT1(%lu) is invalid on this chip\n", offset); 303 return CVMX_ADD_IO_SEG(0x0001180014020088ull) + ((offset) & 1) * 16384; 304} 305#else 306#define CVMX_ILK_RXX_MEM_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020088ull) + ((offset) & 1) * 16384) 307#endif 308#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 309static inline uint64_t CVMX_ILK_RXX_RID(unsigned long offset) 310{ 311 if (!( 312 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 313 cvmx_warn("CVMX_ILK_RXX_RID(%lu) is invalid on this chip\n", offset); 314 return CVMX_ADD_IO_SEG(0x00011800140200C0ull) + ((offset) & 1) * 16384; 315} 316#else 317#define CVMX_ILK_RXX_RID(offset) (CVMX_ADD_IO_SEG(0x00011800140200C0ull) + ((offset) & 1) * 16384) 318#endif 319#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 320static inline uint64_t CVMX_ILK_RXX_STAT0(unsigned long offset) 321{ 322 if (!( 323 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 324 cvmx_warn("CVMX_ILK_RXX_STAT0(%lu) is invalid on this chip\n", offset); 325 return CVMX_ADD_IO_SEG(0x0001180014020020ull) + ((offset) & 1) * 16384; 326} 327#else 328#define CVMX_ILK_RXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020020ull) + ((offset) & 1) * 16384) 329#endif 330#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 331static inline uint64_t CVMX_ILK_RXX_STAT1(unsigned long offset) 332{ 333 if (!( 334 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 335 cvmx_warn("CVMX_ILK_RXX_STAT1(%lu) is invalid on this chip\n", offset); 336 return CVMX_ADD_IO_SEG(0x0001180014020028ull) + ((offset) & 1) * 16384; 337} 338#else 339#define CVMX_ILK_RXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020028ull) + ((offset) & 1) * 16384) 340#endif 341#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 342static inline uint64_t CVMX_ILK_RXX_STAT2(unsigned long offset) 343{ 344 if (!( 345 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 346 cvmx_warn("CVMX_ILK_RXX_STAT2(%lu) is invalid on this chip\n", offset); 347 return CVMX_ADD_IO_SEG(0x0001180014020030ull) + ((offset) & 1) * 16384; 348} 349#else 350#define CVMX_ILK_RXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x0001180014020030ull) + ((offset) & 1) * 16384) 351#endif 352#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 353static inline uint64_t CVMX_ILK_RXX_STAT3(unsigned long offset) 354{ 355 if (!( 356 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 357 cvmx_warn("CVMX_ILK_RXX_STAT3(%lu) is invalid on this chip\n", offset); 358 return CVMX_ADD_IO_SEG(0x0001180014020038ull) + ((offset) & 1) * 16384; 359} 360#else 361#define CVMX_ILK_RXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x0001180014020038ull) + ((offset) & 1) * 16384) 362#endif 363#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 364static inline uint64_t CVMX_ILK_RXX_STAT4(unsigned long offset) 365{ 366 if (!( 367 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 368 cvmx_warn("CVMX_ILK_RXX_STAT4(%lu) is invalid on this chip\n", offset); 369 return CVMX_ADD_IO_SEG(0x0001180014020040ull) + ((offset) & 1) * 16384; 370} 371#else 372#define CVMX_ILK_RXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x0001180014020040ull) + ((offset) & 1) * 16384) 373#endif 374#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 375static inline uint64_t CVMX_ILK_RXX_STAT5(unsigned long offset) 376{ 377 if (!( 378 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 379 cvmx_warn("CVMX_ILK_RXX_STAT5(%lu) is invalid on this chip\n", offset); 380 return CVMX_ADD_IO_SEG(0x0001180014020048ull) + ((offset) & 1) * 16384; 381} 382#else 383#define CVMX_ILK_RXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x0001180014020048ull) + ((offset) & 1) * 16384) 384#endif 385#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 386static inline uint64_t CVMX_ILK_RXX_STAT6(unsigned long offset) 387{ 388 if (!( 389 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 390 cvmx_warn("CVMX_ILK_RXX_STAT6(%lu) is invalid on this chip\n", offset); 391 return CVMX_ADD_IO_SEG(0x0001180014020050ull) + ((offset) & 1) * 16384; 392} 393#else 394#define CVMX_ILK_RXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x0001180014020050ull) + ((offset) & 1) * 16384) 395#endif 396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 397static inline uint64_t CVMX_ILK_RXX_STAT7(unsigned long offset) 398{ 399 if (!( 400 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 401 cvmx_warn("CVMX_ILK_RXX_STAT7(%lu) is invalid on this chip\n", offset); 402 return CVMX_ADD_IO_SEG(0x0001180014020058ull) + ((offset) & 1) * 16384; 403} 404#else 405#define CVMX_ILK_RXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x0001180014020058ull) + ((offset) & 1) * 16384) 406#endif 407#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 408static inline uint64_t CVMX_ILK_RXX_STAT8(unsigned long offset) 409{ 410 if (!( 411 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 412 cvmx_warn("CVMX_ILK_RXX_STAT8(%lu) is invalid on this chip\n", offset); 413 return CVMX_ADD_IO_SEG(0x0001180014020060ull) + ((offset) & 1) * 16384; 414} 415#else 416#define CVMX_ILK_RXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x0001180014020060ull) + ((offset) & 1) * 16384) 417#endif 418#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 419static inline uint64_t CVMX_ILK_RXX_STAT9(unsigned long offset) 420{ 421 if (!( 422 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 423 cvmx_warn("CVMX_ILK_RXX_STAT9(%lu) is invalid on this chip\n", offset); 424 return CVMX_ADD_IO_SEG(0x0001180014020068ull) + ((offset) & 1) * 16384; 425} 426#else 427#define CVMX_ILK_RXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x0001180014020068ull) + ((offset) & 1) * 16384) 428#endif 429#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 430static inline uint64_t CVMX_ILK_RX_LNEX_CFG(unsigned long offset) 431{ 432 if (!( 433 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 434 cvmx_warn("CVMX_ILK_RX_LNEX_CFG(%lu) is invalid on this chip\n", offset); 435 return CVMX_ADD_IO_SEG(0x0001180014038000ull) + ((offset) & 7) * 1024; 436} 437#else 438#define CVMX_ILK_RX_LNEX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001180014038000ull) + ((offset) & 7) * 1024) 439#endif 440#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 441static inline uint64_t CVMX_ILK_RX_LNEX_INT(unsigned long offset) 442{ 443 if (!( 444 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 445 cvmx_warn("CVMX_ILK_RX_LNEX_INT(%lu) is invalid on this chip\n", offset); 446 return CVMX_ADD_IO_SEG(0x0001180014038008ull) + ((offset) & 7) * 1024; 447} 448#else 449#define CVMX_ILK_RX_LNEX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014038008ull) + ((offset) & 7) * 1024) 450#endif 451#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 452static inline uint64_t CVMX_ILK_RX_LNEX_INT_EN(unsigned long offset) 453{ 454 if (!( 455 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 456 cvmx_warn("CVMX_ILK_RX_LNEX_INT_EN(%lu) is invalid on this chip\n", offset); 457 return CVMX_ADD_IO_SEG(0x0001180014038010ull) + ((offset) & 7) * 1024; 458} 459#else 460#define CVMX_ILK_RX_LNEX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014038010ull) + ((offset) & 7) * 1024) 461#endif 462#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 463static inline uint64_t CVMX_ILK_RX_LNEX_STAT0(unsigned long offset) 464{ 465 if (!( 466 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 467 cvmx_warn("CVMX_ILK_RX_LNEX_STAT0(%lu) is invalid on this chip\n", offset); 468 return CVMX_ADD_IO_SEG(0x0001180014038018ull) + ((offset) & 7) * 1024; 469} 470#else 471#define CVMX_ILK_RX_LNEX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014038018ull) + ((offset) & 7) * 1024) 472#endif 473#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 474static inline uint64_t CVMX_ILK_RX_LNEX_STAT1(unsigned long offset) 475{ 476 if (!( 477 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 478 cvmx_warn("CVMX_ILK_RX_LNEX_STAT1(%lu) is invalid on this chip\n", offset); 479 return CVMX_ADD_IO_SEG(0x0001180014038020ull) + ((offset) & 7) * 1024; 480} 481#else 482#define CVMX_ILK_RX_LNEX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014038020ull) + ((offset) & 7) * 1024) 483#endif 484#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 485static inline uint64_t CVMX_ILK_RX_LNEX_STAT2(unsigned long offset) 486{ 487 if (!( 488 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 489 cvmx_warn("CVMX_ILK_RX_LNEX_STAT2(%lu) is invalid on this chip\n", offset); 490 return CVMX_ADD_IO_SEG(0x0001180014038028ull) + ((offset) & 7) * 1024; 491} 492#else 493#define CVMX_ILK_RX_LNEX_STAT2(offset) (CVMX_ADD_IO_SEG(0x0001180014038028ull) + ((offset) & 7) * 1024) 494#endif 495#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 496static inline uint64_t CVMX_ILK_RX_LNEX_STAT3(unsigned long offset) 497{ 498 if (!( 499 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 500 cvmx_warn("CVMX_ILK_RX_LNEX_STAT3(%lu) is invalid on this chip\n", offset); 501 return CVMX_ADD_IO_SEG(0x0001180014038030ull) + ((offset) & 7) * 1024; 502} 503#else 504#define CVMX_ILK_RX_LNEX_STAT3(offset) (CVMX_ADD_IO_SEG(0x0001180014038030ull) + ((offset) & 7) * 1024) 505#endif 506#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 507static inline uint64_t CVMX_ILK_RX_LNEX_STAT4(unsigned long offset) 508{ 509 if (!( 510 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 511 cvmx_warn("CVMX_ILK_RX_LNEX_STAT4(%lu) is invalid on this chip\n", offset); 512 return CVMX_ADD_IO_SEG(0x0001180014038038ull) + ((offset) & 7) * 1024; 513} 514#else 515#define CVMX_ILK_RX_LNEX_STAT4(offset) (CVMX_ADD_IO_SEG(0x0001180014038038ull) + ((offset) & 7) * 1024) 516#endif 517#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 518static inline uint64_t CVMX_ILK_RX_LNEX_STAT5(unsigned long offset) 519{ 520 if (!( 521 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 522 cvmx_warn("CVMX_ILK_RX_LNEX_STAT5(%lu) is invalid on this chip\n", offset); 523 return CVMX_ADD_IO_SEG(0x0001180014038040ull) + ((offset) & 7) * 1024; 524} 525#else 526#define CVMX_ILK_RX_LNEX_STAT5(offset) (CVMX_ADD_IO_SEG(0x0001180014038040ull) + ((offset) & 7) * 1024) 527#endif 528#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 529static inline uint64_t CVMX_ILK_RX_LNEX_STAT6(unsigned long offset) 530{ 531 if (!( 532 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 533 cvmx_warn("CVMX_ILK_RX_LNEX_STAT6(%lu) is invalid on this chip\n", offset); 534 return CVMX_ADD_IO_SEG(0x0001180014038048ull) + ((offset) & 7) * 1024; 535} 536#else 537#define CVMX_ILK_RX_LNEX_STAT6(offset) (CVMX_ADD_IO_SEG(0x0001180014038048ull) + ((offset) & 7) * 1024) 538#endif 539#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 540static inline uint64_t CVMX_ILK_RX_LNEX_STAT7(unsigned long offset) 541{ 542 if (!( 543 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 544 cvmx_warn("CVMX_ILK_RX_LNEX_STAT7(%lu) is invalid on this chip\n", offset); 545 return CVMX_ADD_IO_SEG(0x0001180014038050ull) + ((offset) & 7) * 1024; 546} 547#else 548#define CVMX_ILK_RX_LNEX_STAT7(offset) (CVMX_ADD_IO_SEG(0x0001180014038050ull) + ((offset) & 7) * 1024) 549#endif 550#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 551static inline uint64_t CVMX_ILK_RX_LNEX_STAT8(unsigned long offset) 552{ 553 if (!( 554 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 555 cvmx_warn("CVMX_ILK_RX_LNEX_STAT8(%lu) is invalid on this chip\n", offset); 556 return CVMX_ADD_IO_SEG(0x0001180014038058ull) + ((offset) & 7) * 1024; 557} 558#else 559#define CVMX_ILK_RX_LNEX_STAT8(offset) (CVMX_ADD_IO_SEG(0x0001180014038058ull) + ((offset) & 7) * 1024) 560#endif 561#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 562static inline uint64_t CVMX_ILK_RX_LNEX_STAT9(unsigned long offset) 563{ 564 if (!( 565 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))))) 566 cvmx_warn("CVMX_ILK_RX_LNEX_STAT9(%lu) is invalid on this chip\n", offset); 567 return CVMX_ADD_IO_SEG(0x0001180014038060ull) + ((offset) & 7) * 1024; 568} 569#else 570#define CVMX_ILK_RX_LNEX_STAT9(offset) (CVMX_ADD_IO_SEG(0x0001180014038060ull) + ((offset) & 7) * 1024) 571#endif 572#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 573#define CVMX_ILK_SER_CFG CVMX_ILK_SER_CFG_FUNC() 574static inline uint64_t CVMX_ILK_SER_CFG_FUNC(void) 575{ 576 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX))) 577 cvmx_warn("CVMX_ILK_SER_CFG not supported on this chip\n"); 578 return CVMX_ADD_IO_SEG(0x0001180014000018ull); 579} 580#else 581#define CVMX_ILK_SER_CFG (CVMX_ADD_IO_SEG(0x0001180014000018ull)) 582#endif 583#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 584static inline uint64_t CVMX_ILK_TXX_CFG0(unsigned long offset) 585{ 586 if (!( 587 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 588 cvmx_warn("CVMX_ILK_TXX_CFG0(%lu) is invalid on this chip\n", offset); 589 return CVMX_ADD_IO_SEG(0x0001180014010000ull) + ((offset) & 1) * 16384; 590} 591#else 592#define CVMX_ILK_TXX_CFG0(offset) (CVMX_ADD_IO_SEG(0x0001180014010000ull) + ((offset) & 1) * 16384) 593#endif 594#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 595static inline uint64_t CVMX_ILK_TXX_CFG1(unsigned long offset) 596{ 597 if (!( 598 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 599 cvmx_warn("CVMX_ILK_TXX_CFG1(%lu) is invalid on this chip\n", offset); 600 return CVMX_ADD_IO_SEG(0x0001180014010008ull) + ((offset) & 1) * 16384; 601} 602#else 603#define CVMX_ILK_TXX_CFG1(offset) (CVMX_ADD_IO_SEG(0x0001180014010008ull) + ((offset) & 1) * 16384) 604#endif 605#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 606static inline uint64_t CVMX_ILK_TXX_DBG(unsigned long offset) 607{ 608 if (!( 609 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 610 cvmx_warn("CVMX_ILK_TXX_DBG(%lu) is invalid on this chip\n", offset); 611 return CVMX_ADD_IO_SEG(0x0001180014010070ull) + ((offset) & 1) * 16384; 612} 613#else 614#define CVMX_ILK_TXX_DBG(offset) (CVMX_ADD_IO_SEG(0x0001180014010070ull) + ((offset) & 1) * 16384) 615#endif 616#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 617static inline uint64_t CVMX_ILK_TXX_FLOW_CTL0(unsigned long offset) 618{ 619 if (!( 620 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 621 cvmx_warn("CVMX_ILK_TXX_FLOW_CTL0(%lu) is invalid on this chip\n", offset); 622 return CVMX_ADD_IO_SEG(0x0001180014010048ull) + ((offset) & 1) * 16384; 623} 624#else 625#define CVMX_ILK_TXX_FLOW_CTL0(offset) (CVMX_ADD_IO_SEG(0x0001180014010048ull) + ((offset) & 1) * 16384) 626#endif 627#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 628static inline uint64_t CVMX_ILK_TXX_FLOW_CTL1(unsigned long offset) 629{ 630 if (!( 631 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 632 cvmx_warn("CVMX_ILK_TXX_FLOW_CTL1(%lu) is invalid on this chip\n", offset); 633 return CVMX_ADD_IO_SEG(0x0001180014010050ull) + ((offset) & 1) * 16384; 634} 635#else 636#define CVMX_ILK_TXX_FLOW_CTL1(offset) (CVMX_ADD_IO_SEG(0x0001180014010050ull) + ((offset) & 1) * 16384) 637#endif 638#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 639static inline uint64_t CVMX_ILK_TXX_IDX_CAL(unsigned long offset) 640{ 641 if (!( 642 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 643 cvmx_warn("CVMX_ILK_TXX_IDX_CAL(%lu) is invalid on this chip\n", offset); 644 return CVMX_ADD_IO_SEG(0x0001180014010058ull) + ((offset) & 1) * 16384; 645} 646#else 647#define CVMX_ILK_TXX_IDX_CAL(offset) (CVMX_ADD_IO_SEG(0x0001180014010058ull) + ((offset) & 1) * 16384) 648#endif 649#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 650static inline uint64_t CVMX_ILK_TXX_IDX_PMAP(unsigned long offset) 651{ 652 if (!( 653 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 654 cvmx_warn("CVMX_ILK_TXX_IDX_PMAP(%lu) is invalid on this chip\n", offset); 655 return CVMX_ADD_IO_SEG(0x0001180014010010ull) + ((offset) & 1) * 16384; 656} 657#else 658#define CVMX_ILK_TXX_IDX_PMAP(offset) (CVMX_ADD_IO_SEG(0x0001180014010010ull) + ((offset) & 1) * 16384) 659#endif 660#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 661static inline uint64_t CVMX_ILK_TXX_IDX_STAT0(unsigned long offset) 662{ 663 if (!( 664 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 665 cvmx_warn("CVMX_ILK_TXX_IDX_STAT0(%lu) is invalid on this chip\n", offset); 666 return CVMX_ADD_IO_SEG(0x0001180014010020ull) + ((offset) & 1) * 16384; 667} 668#else 669#define CVMX_ILK_TXX_IDX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014010020ull) + ((offset) & 1) * 16384) 670#endif 671#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 672static inline uint64_t CVMX_ILK_TXX_IDX_STAT1(unsigned long offset) 673{ 674 if (!( 675 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 676 cvmx_warn("CVMX_ILK_TXX_IDX_STAT1(%lu) is invalid on this chip\n", offset); 677 return CVMX_ADD_IO_SEG(0x0001180014010028ull) + ((offset) & 1) * 16384; 678} 679#else 680#define CVMX_ILK_TXX_IDX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014010028ull) + ((offset) & 1) * 16384) 681#endif 682#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 683static inline uint64_t CVMX_ILK_TXX_INT(unsigned long offset) 684{ 685 if (!( 686 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 687 cvmx_warn("CVMX_ILK_TXX_INT(%lu) is invalid on this chip\n", offset); 688 return CVMX_ADD_IO_SEG(0x0001180014010078ull) + ((offset) & 1) * 16384; 689} 690#else 691#define CVMX_ILK_TXX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014010078ull) + ((offset) & 1) * 16384) 692#endif 693#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 694static inline uint64_t CVMX_ILK_TXX_INT_EN(unsigned long offset) 695{ 696 if (!( 697 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 698 cvmx_warn("CVMX_ILK_TXX_INT_EN(%lu) is invalid on this chip\n", offset); 699 return CVMX_ADD_IO_SEG(0x0001180014010080ull) + ((offset) & 1) * 16384; 700} 701#else 702#define CVMX_ILK_TXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014010080ull) + ((offset) & 1) * 16384) 703#endif 704#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 705static inline uint64_t CVMX_ILK_TXX_MEM_CAL0(unsigned long offset) 706{ 707 if (!( 708 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 709 cvmx_warn("CVMX_ILK_TXX_MEM_CAL0(%lu) is invalid on this chip\n", offset); 710 return CVMX_ADD_IO_SEG(0x0001180014010060ull) + ((offset) & 1) * 16384; 711} 712#else 713#define CVMX_ILK_TXX_MEM_CAL0(offset) (CVMX_ADD_IO_SEG(0x0001180014010060ull) + ((offset) & 1) * 16384) 714#endif 715#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 716static inline uint64_t CVMX_ILK_TXX_MEM_CAL1(unsigned long offset) 717{ 718 if (!( 719 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 720 cvmx_warn("CVMX_ILK_TXX_MEM_CAL1(%lu) is invalid on this chip\n", offset); 721 return CVMX_ADD_IO_SEG(0x0001180014010068ull) + ((offset) & 1) * 16384; 722} 723#else 724#define CVMX_ILK_TXX_MEM_CAL1(offset) (CVMX_ADD_IO_SEG(0x0001180014010068ull) + ((offset) & 1) * 16384) 725#endif 726#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 727static inline uint64_t CVMX_ILK_TXX_MEM_PMAP(unsigned long offset) 728{ 729 if (!( 730 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 731 cvmx_warn("CVMX_ILK_TXX_MEM_PMAP(%lu) is invalid on this chip\n", offset); 732 return CVMX_ADD_IO_SEG(0x0001180014010018ull) + ((offset) & 1) * 16384; 733} 734#else 735#define CVMX_ILK_TXX_MEM_PMAP(offset) (CVMX_ADD_IO_SEG(0x0001180014010018ull) + ((offset) & 1) * 16384) 736#endif 737#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 738static inline uint64_t CVMX_ILK_TXX_MEM_STAT0(unsigned long offset) 739{ 740 if (!( 741 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 742 cvmx_warn("CVMX_ILK_TXX_MEM_STAT0(%lu) is invalid on this chip\n", offset); 743 return CVMX_ADD_IO_SEG(0x0001180014010030ull) + ((offset) & 1) * 16384; 744} 745#else 746#define CVMX_ILK_TXX_MEM_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014010030ull) + ((offset) & 1) * 16384) 747#endif 748#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 749static inline uint64_t CVMX_ILK_TXX_MEM_STAT1(unsigned long offset) 750{ 751 if (!( 752 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 753 cvmx_warn("CVMX_ILK_TXX_MEM_STAT1(%lu) is invalid on this chip\n", offset); 754 return CVMX_ADD_IO_SEG(0x0001180014010038ull) + ((offset) & 1) * 16384; 755} 756#else 757#define CVMX_ILK_TXX_MEM_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014010038ull) + ((offset) & 1) * 16384) 758#endif 759#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 760static inline uint64_t CVMX_ILK_TXX_PIPE(unsigned long offset) 761{ 762 if (!( 763 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 764 cvmx_warn("CVMX_ILK_TXX_PIPE(%lu) is invalid on this chip\n", offset); 765 return CVMX_ADD_IO_SEG(0x0001180014010088ull) + ((offset) & 1) * 16384; 766} 767#else 768#define CVMX_ILK_TXX_PIPE(offset) (CVMX_ADD_IO_SEG(0x0001180014010088ull) + ((offset) & 1) * 16384) 769#endif 770#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 771static inline uint64_t CVMX_ILK_TXX_RMATCH(unsigned long offset) 772{ 773 if (!( 774 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))))) 775 cvmx_warn("CVMX_ILK_TXX_RMATCH(%lu) is invalid on this chip\n", offset); 776 return CVMX_ADD_IO_SEG(0x0001180014010040ull) + ((offset) & 1) * 16384; 777} 778#else 779#define CVMX_ILK_TXX_RMATCH(offset) (CVMX_ADD_IO_SEG(0x0001180014010040ull) + ((offset) & 1) * 16384) 780#endif 781 782/** 783 * cvmx_ilk_bist_sum 784 */ 785union cvmx_ilk_bist_sum { 786 uint64_t u64; 787 struct cvmx_ilk_bist_sum_s { 788#ifdef __BIG_ENDIAN_BITFIELD 789 uint64_t reserved_58_63 : 6; 790 uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */ 791 uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */ 792 uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */ 793 uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */ 794 uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */ 795 uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */ 796 uint64_t reserved_36_51 : 16; 797 uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */ 798 uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */ 799 uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */ 800 uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */ 801 uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */ 802 uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */ 803 uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */ 804 uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */ 805 uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */ 806 uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */ 807 uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */ 808 uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */ 809 uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */ 810 uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */ 811 uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */ 812 uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */ 813 uint64_t reserved_19_19 : 1; 814 uint64_t rlk1_stat1 : 1; /**< Bist status of rlk1.csr.stat_mem1 ***NOTE: Added in pass 2.0 */ 815 uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */ 816 uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem */ 817 uint64_t reserved_15_15 : 1; 818 uint64_t rlk0_stat1 : 1; /**< Bist status of rlk0.csr.stat_mem1 ***NOTE: Added in pass 2.0 */ 819 uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */ 820 uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem */ 821 uint64_t tlk1_stat1 : 1; /**< Bist status of tlk1.csr.stat_mem1 */ 822 uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */ 823 uint64_t reserved_9_9 : 1; 824 uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */ 825 uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */ 826 uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */ 827 uint64_t tlk0_stat1 : 1; /**< Bist status of tlk0.csr.stat_mem1 */ 828 uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */ 829 uint64_t reserved_3_3 : 1; 830 uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */ 831 uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */ 832 uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */ 833#else 834 uint64_t tlk0_txf0 : 1; 835 uint64_t tlk0_txf1 : 1; 836 uint64_t tlk0_txf2 : 1; 837 uint64_t reserved_3_3 : 1; 838 uint64_t tlk0_fwc : 1; 839 uint64_t tlk0_stat1 : 1; 840 uint64_t tlk1_txf0 : 1; 841 uint64_t tlk1_txf1 : 1; 842 uint64_t tlk1_txf2 : 1; 843 uint64_t reserved_9_9 : 1; 844 uint64_t tlk1_fwc : 1; 845 uint64_t tlk1_stat1 : 1; 846 uint64_t rlk0_stat : 1; 847 uint64_t rlk0_fwc : 1; 848 uint64_t rlk0_stat1 : 1; 849 uint64_t reserved_15_15 : 1; 850 uint64_t rlk1_stat : 1; 851 uint64_t rlk1_fwc : 1; 852 uint64_t rlk1_stat1 : 1; 853 uint64_t reserved_19_19 : 1; 854 uint64_t rle0_dsk0 : 1; 855 uint64_t rle0_dsk1 : 1; 856 uint64_t rle1_dsk0 : 1; 857 uint64_t rle1_dsk1 : 1; 858 uint64_t rle2_dsk0 : 1; 859 uint64_t rle2_dsk1 : 1; 860 uint64_t rle3_dsk0 : 1; 861 uint64_t rle3_dsk1 : 1; 862 uint64_t rle4_dsk0 : 1; 863 uint64_t rle4_dsk1 : 1; 864 uint64_t rle5_dsk0 : 1; 865 uint64_t rle5_dsk1 : 1; 866 uint64_t rle6_dsk0 : 1; 867 uint64_t rle6_dsk1 : 1; 868 uint64_t rle7_dsk0 : 1; 869 uint64_t rle7_dsk1 : 1; 870 uint64_t reserved_36_51 : 16; 871 uint64_t rxf_mem0 : 1; 872 uint64_t rxf_mem1 : 1; 873 uint64_t rxf_mem2 : 1; 874 uint64_t rxf_pmap : 1; 875 uint64_t rxf_x2p0 : 1; 876 uint64_t rxf_x2p1 : 1; 877 uint64_t reserved_58_63 : 6; 878#endif 879 } s; 880 struct cvmx_ilk_bist_sum_cn68xx { 881#ifdef __BIG_ENDIAN_BITFIELD 882 uint64_t reserved_58_63 : 6; 883 uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */ 884 uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */ 885 uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */ 886 uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */ 887 uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */ 888 uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */ 889 uint64_t reserved_36_51 : 16; 890 uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */ 891 uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */ 892 uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */ 893 uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */ 894 uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */ 895 uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */ 896 uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */ 897 uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */ 898 uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */ 899 uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */ 900 uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */ 901 uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */ 902 uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */ 903 uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */ 904 uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */ 905 uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */ 906 uint64_t reserved_19_19 : 1; 907 uint64_t rlk1_stat1 : 1; /**< Bist status of rlk1.csr.stat_mem1 ***NOTE: Added in pass 2.0 */ 908 uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */ 909 uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem0 */ 910 uint64_t reserved_15_15 : 1; 911 uint64_t rlk0_stat1 : 1; /**< Bist status of rlk0.csr.stat_mem1 ***NOTE: Added in pass 2.0 */ 912 uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */ 913 uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem0 */ 914 uint64_t tlk1_stat1 : 1; /**< Bist status of tlk1.csr.stat_mem1 */ 915 uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */ 916 uint64_t tlk1_stat0 : 1; /**< Bist status of tlk1.csr.stat_mem0 */ 917 uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */ 918 uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */ 919 uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */ 920 uint64_t tlk0_stat1 : 1; /**< Bist status of tlk0.csr.stat_mem1 */ 921 uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */ 922 uint64_t tlk0_stat0 : 1; /**< Bist status of tlk0.csr.stat_mem0 */ 923 uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */ 924 uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */ 925 uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */ 926#else 927 uint64_t tlk0_txf0 : 1; 928 uint64_t tlk0_txf1 : 1; 929 uint64_t tlk0_txf2 : 1; 930 uint64_t tlk0_stat0 : 1; 931 uint64_t tlk0_fwc : 1; 932 uint64_t tlk0_stat1 : 1; 933 uint64_t tlk1_txf0 : 1; 934 uint64_t tlk1_txf1 : 1; 935 uint64_t tlk1_txf2 : 1; 936 uint64_t tlk1_stat0 : 1; 937 uint64_t tlk1_fwc : 1; 938 uint64_t tlk1_stat1 : 1; 939 uint64_t rlk0_stat : 1; 940 uint64_t rlk0_fwc : 1; 941 uint64_t rlk0_stat1 : 1; 942 uint64_t reserved_15_15 : 1; 943 uint64_t rlk1_stat : 1; 944 uint64_t rlk1_fwc : 1; 945 uint64_t rlk1_stat1 : 1; 946 uint64_t reserved_19_19 : 1; 947 uint64_t rle0_dsk0 : 1; 948 uint64_t rle0_dsk1 : 1; 949 uint64_t rle1_dsk0 : 1; 950 uint64_t rle1_dsk1 : 1; 951 uint64_t rle2_dsk0 : 1; 952 uint64_t rle2_dsk1 : 1; 953 uint64_t rle3_dsk0 : 1; 954 uint64_t rle3_dsk1 : 1; 955 uint64_t rle4_dsk0 : 1; 956 uint64_t rle4_dsk1 : 1; 957 uint64_t rle5_dsk0 : 1; 958 uint64_t rle5_dsk1 : 1; 959 uint64_t rle6_dsk0 : 1; 960 uint64_t rle6_dsk1 : 1; 961 uint64_t rle7_dsk0 : 1; 962 uint64_t rle7_dsk1 : 1; 963 uint64_t reserved_36_51 : 16; 964 uint64_t rxf_mem0 : 1; 965 uint64_t rxf_mem1 : 1; 966 uint64_t rxf_mem2 : 1; 967 uint64_t rxf_pmap : 1; 968 uint64_t rxf_x2p0 : 1; 969 uint64_t rxf_x2p1 : 1; 970 uint64_t reserved_58_63 : 6; 971#endif 972 } cn68xx; 973 struct cvmx_ilk_bist_sum_cn68xxp1 { 974#ifdef __BIG_ENDIAN_BITFIELD 975 uint64_t reserved_58_63 : 6; 976 uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */ 977 uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */ 978 uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */ 979 uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */ 980 uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */ 981 uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */ 982 uint64_t reserved_36_51 : 16; 983 uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */ 984 uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */ 985 uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */ 986 uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */ 987 uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */ 988 uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */ 989 uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */ 990 uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */ 991 uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */ 992 uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */ 993 uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */ 994 uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */ 995 uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */ 996 uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */ 997 uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */ 998 uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */ 999 uint64_t reserved_18_19 : 2; 1000 uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */ 1001 uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem */ 1002 uint64_t reserved_14_15 : 2; 1003 uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */ 1004 uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem */ 1005 uint64_t reserved_11_11 : 1; 1006 uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */ 1007 uint64_t tlk1_stat : 1; /**< Bist status of tlk1.csr.stat_mem */ 1008 uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */ 1009 uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */ 1010 uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */ 1011 uint64_t reserved_5_5 : 1; 1012 uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */ 1013 uint64_t tlk0_stat : 1; /**< Bist status of tlk0.csr.stat_mem */ 1014 uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */ 1015 uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */ 1016 uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */ 1017#else 1018 uint64_t tlk0_txf0 : 1; 1019 uint64_t tlk0_txf1 : 1; 1020 uint64_t tlk0_txf2 : 1; 1021 uint64_t tlk0_stat : 1; 1022 uint64_t tlk0_fwc : 1; 1023 uint64_t reserved_5_5 : 1; 1024 uint64_t tlk1_txf0 : 1; 1025 uint64_t tlk1_txf1 : 1; 1026 uint64_t tlk1_txf2 : 1; 1027 uint64_t tlk1_stat : 1; 1028 uint64_t tlk1_fwc : 1; 1029 uint64_t reserved_11_11 : 1; 1030 uint64_t rlk0_stat : 1; 1031 uint64_t rlk0_fwc : 1; 1032 uint64_t reserved_14_15 : 2; 1033 uint64_t rlk1_stat : 1; 1034 uint64_t rlk1_fwc : 1; 1035 uint64_t reserved_18_19 : 2; 1036 uint64_t rle0_dsk0 : 1; 1037 uint64_t rle0_dsk1 : 1; 1038 uint64_t rle1_dsk0 : 1; 1039 uint64_t rle1_dsk1 : 1; 1040 uint64_t rle2_dsk0 : 1; 1041 uint64_t rle2_dsk1 : 1; 1042 uint64_t rle3_dsk0 : 1; 1043 uint64_t rle3_dsk1 : 1; 1044 uint64_t rle4_dsk0 : 1; 1045 uint64_t rle4_dsk1 : 1; 1046 uint64_t rle5_dsk0 : 1; 1047 uint64_t rle5_dsk1 : 1; 1048 uint64_t rle6_dsk0 : 1; 1049 uint64_t rle6_dsk1 : 1; 1050 uint64_t rle7_dsk0 : 1; 1051 uint64_t rle7_dsk1 : 1; 1052 uint64_t reserved_36_51 : 16; 1053 uint64_t rxf_mem0 : 1; 1054 uint64_t rxf_mem1 : 1; 1055 uint64_t rxf_mem2 : 1; 1056 uint64_t rxf_pmap : 1; 1057 uint64_t rxf_x2p0 : 1; 1058 uint64_t rxf_x2p1 : 1; 1059 uint64_t reserved_58_63 : 6; 1060#endif 1061 } cn68xxp1; 1062}; 1063typedef union cvmx_ilk_bist_sum cvmx_ilk_bist_sum_t; 1064 1065/** 1066 * cvmx_ilk_gbl_cfg 1067 */ 1068union cvmx_ilk_gbl_cfg { 1069 uint64_t u64; 1070 struct cvmx_ilk_gbl_cfg_s { 1071#ifdef __BIG_ENDIAN_BITFIELD 1072 uint64_t reserved_4_63 : 60; 1073 uint64_t rid_rstdis : 1; /**< Disable automatic reassembly-id error recovery. For diagnostic 1074 use only. 1075 1076 ***NOTE: Added in pass 2.0 */ 1077 uint64_t reset : 1; /**< Reset ILK. For diagnostic use only. 1078 1079 ***NOTE: Added in pass 2.0 */ 1080 uint64_t cclk_dis : 1; /**< Disable ILK conditional clocking. For diagnostic use only. */ 1081 uint64_t rxf_xlink : 1; /**< Causes external loopback traffic to switch links. Enabling 1082 this allow simultaneous use of external and internal loopback. */ 1083#else 1084 uint64_t rxf_xlink : 1; 1085 uint64_t cclk_dis : 1; 1086 uint64_t reset : 1; 1087 uint64_t rid_rstdis : 1; 1088 uint64_t reserved_4_63 : 60; 1089#endif 1090 } s; 1091 struct cvmx_ilk_gbl_cfg_s cn68xx; 1092 struct cvmx_ilk_gbl_cfg_cn68xxp1 { 1093#ifdef __BIG_ENDIAN_BITFIELD 1094 uint64_t reserved_2_63 : 62; 1095 uint64_t cclk_dis : 1; /**< Disable ILK conditional clocking. For diagnostic use only. */ 1096 uint64_t rxf_xlink : 1; /**< Causes external loopback traffic to switch links. Enabling 1097 this allow simultaneous use of external and internal loopback. */ 1098#else 1099 uint64_t rxf_xlink : 1; 1100 uint64_t cclk_dis : 1; 1101 uint64_t reserved_2_63 : 62; 1102#endif 1103 } cn68xxp1; 1104}; 1105typedef union cvmx_ilk_gbl_cfg cvmx_ilk_gbl_cfg_t; 1106 1107/** 1108 * cvmx_ilk_gbl_int 1109 */ 1110union cvmx_ilk_gbl_int { 1111 uint64_t u64; 1112 struct cvmx_ilk_gbl_int_s { 1113#ifdef __BIG_ENDIAN_BITFIELD 1114 uint64_t reserved_5_63 : 59; 1115 uint64_t rxf_push_full : 1; /**< RXF overflow */ 1116 uint64_t rxf_pop_empty : 1; /**< RXF underflow */ 1117 uint64_t rxf_ctl_perr : 1; /**< RXF parity error occurred on sideband control signals. Data 1118 cycle will be dropped. */ 1119 uint64_t rxf_lnk1_perr : 1; /**< RXF parity error occurred on RxLink1 packet data 1120 Packet will be marked with error at eop */ 1121 uint64_t rxf_lnk0_perr : 1; /**< RXF parity error occurred on RxLink0 packet data. Packet will 1122 be marked with error at eop */ 1123#else 1124 uint64_t rxf_lnk0_perr : 1; 1125 uint64_t rxf_lnk1_perr : 1; 1126 uint64_t rxf_ctl_perr : 1; 1127 uint64_t rxf_pop_empty : 1; 1128 uint64_t rxf_push_full : 1; 1129 uint64_t reserved_5_63 : 59; 1130#endif 1131 } s; 1132 struct cvmx_ilk_gbl_int_s cn68xx; 1133 struct cvmx_ilk_gbl_int_s cn68xxp1; 1134}; 1135typedef union cvmx_ilk_gbl_int cvmx_ilk_gbl_int_t; 1136 1137/** 1138 * cvmx_ilk_gbl_int_en 1139 */ 1140union cvmx_ilk_gbl_int_en { 1141 uint64_t u64; 1142 struct cvmx_ilk_gbl_int_en_s { 1143#ifdef __BIG_ENDIAN_BITFIELD 1144 uint64_t reserved_5_63 : 59; 1145 uint64_t rxf_push_full : 1; /**< RXF overflow */ 1146 uint64_t rxf_pop_empty : 1; /**< RXF underflow */ 1147 uint64_t rxf_ctl_perr : 1; /**< RXF parity error occurred on sideband control signals. Data 1148 cycle will be dropped. */ 1149 uint64_t rxf_lnk1_perr : 1; /**< RXF parity error occurred on RxLink1 packet data 1150 Packet will be marked with error at eop */ 1151 uint64_t rxf_lnk0_perr : 1; /**< RXF parity error occurred on RxLink0 packet data 1152 Packet will be marked with error at eop */ 1153#else 1154 uint64_t rxf_lnk0_perr : 1; 1155 uint64_t rxf_lnk1_perr : 1; 1156 uint64_t rxf_ctl_perr : 1; 1157 uint64_t rxf_pop_empty : 1; 1158 uint64_t rxf_push_full : 1; 1159 uint64_t reserved_5_63 : 59; 1160#endif 1161 } s; 1162 struct cvmx_ilk_gbl_int_en_s cn68xx; 1163 struct cvmx_ilk_gbl_int_en_s cn68xxp1; 1164}; 1165typedef union cvmx_ilk_gbl_int_en cvmx_ilk_gbl_int_en_t; 1166 1167/** 1168 * cvmx_ilk_int_sum 1169 */ 1170union cvmx_ilk_int_sum { 1171 uint64_t u64; 1172 struct cvmx_ilk_int_sum_s { 1173#ifdef __BIG_ENDIAN_BITFIELD 1174 uint64_t reserved_13_63 : 51; 1175 uint64_t rle7_int : 1; /**< RxLane7 interrupt status. See ILK_RX_LNE7_INT */ 1176 uint64_t rle6_int : 1; /**< RxLane6 interrupt status. See ILK_RX_LNE6_INT */ 1177 uint64_t rle5_int : 1; /**< RxLane5 interrupt status. See ILK_RX_LNE5_INT */ 1178 uint64_t rle4_int : 1; /**< RxLane4 interrupt status. See ILK_RX_LNE4_INT */ 1179 uint64_t rle3_int : 1; /**< RxLane3 interrupt status. See ILK_RX_LNE3_INT */ 1180 uint64_t rle2_int : 1; /**< RxLane2 interrupt status. See ILK_RX_LNE2_INT */ 1181 uint64_t rle1_int : 1; /**< RxLane1 interrupt status. See ILK_RX_LNE1_INT */ 1182 uint64_t rle0_int : 1; /**< RxLane0 interrupt status. See ILK_RX_LNE0_INT */ 1183 uint64_t rlk1_int : 1; /**< RxLink1 interrupt status. See ILK_RX1_INT */ 1184 uint64_t rlk0_int : 1; /**< RxLink0 interrupt status. See ILK_RX0_INT */ 1185 uint64_t tlk1_int : 1; /**< TxLink1 interrupt status. See ILK_TX1_INT */ 1186 uint64_t tlk0_int : 1; /**< TxLink0 interrupt status. See ILK_TX0_INT */ 1187 uint64_t gbl_int : 1; /**< Global interrupt status. See ILK_GBL_INT */ 1188#else 1189 uint64_t gbl_int : 1; 1190 uint64_t tlk0_int : 1; 1191 uint64_t tlk1_int : 1; 1192 uint64_t rlk0_int : 1; 1193 uint64_t rlk1_int : 1; 1194 uint64_t rle0_int : 1; 1195 uint64_t rle1_int : 1; 1196 uint64_t rle2_int : 1; 1197 uint64_t rle3_int : 1; 1198 uint64_t rle4_int : 1; 1199 uint64_t rle5_int : 1; 1200 uint64_t rle6_int : 1; 1201 uint64_t rle7_int : 1; 1202 uint64_t reserved_13_63 : 51; 1203#endif 1204 } s; 1205 struct cvmx_ilk_int_sum_s cn68xx; 1206 struct cvmx_ilk_int_sum_s cn68xxp1; 1207}; 1208typedef union cvmx_ilk_int_sum cvmx_ilk_int_sum_t; 1209 1210/** 1211 * cvmx_ilk_lne_dbg 1212 */ 1213union cvmx_ilk_lne_dbg { 1214 uint64_t u64; 1215 struct cvmx_ilk_lne_dbg_s { 1216#ifdef __BIG_ENDIAN_BITFIELD 1217 uint64_t reserved_60_63 : 4; 1218 uint64_t tx_bad_crc32 : 1; /**< Send 1 diagnostic word with bad CRC32 to the selected lane. 1219 Note: injects just once */ 1220 uint64_t tx_bad_6467_cnt : 5; /**< Send N bad 64B/67B codewords on selected lane */ 1221 uint64_t tx_bad_sync_cnt : 3; /**< Send N bad sync words on selected lane */ 1222 uint64_t tx_bad_scram_cnt : 3; /**< Send N bad scram state on selected lane */ 1223 uint64_t reserved_40_47 : 8; 1224 uint64_t tx_bad_lane_sel : 8; /**< Select lane to apply error injection counts */ 1225 uint64_t reserved_24_31 : 8; 1226 uint64_t tx_dis_dispr : 8; /**< Per-lane disparity disable */ 1227 uint64_t reserved_8_15 : 8; 1228 uint64_t tx_dis_scram : 8; /**< Per-lane scrambler disable */ 1229#else 1230 uint64_t tx_dis_scram : 8; 1231 uint64_t reserved_8_15 : 8; 1232 uint64_t tx_dis_dispr : 8; 1233 uint64_t reserved_24_31 : 8; 1234 uint64_t tx_bad_lane_sel : 8; 1235 uint64_t reserved_40_47 : 8; 1236 uint64_t tx_bad_scram_cnt : 3; 1237 uint64_t tx_bad_sync_cnt : 3; 1238 uint64_t tx_bad_6467_cnt : 5; 1239 uint64_t tx_bad_crc32 : 1; 1240 uint64_t reserved_60_63 : 4; 1241#endif 1242 } s; 1243 struct cvmx_ilk_lne_dbg_s cn68xx; 1244 struct cvmx_ilk_lne_dbg_s cn68xxp1; 1245}; 1246typedef union cvmx_ilk_lne_dbg cvmx_ilk_lne_dbg_t; 1247 1248/** 1249 * cvmx_ilk_lne_sts_msg 1250 */ 1251union cvmx_ilk_lne_sts_msg { 1252 uint64_t u64; 1253 struct cvmx_ilk_lne_sts_msg_s { 1254#ifdef __BIG_ENDIAN_BITFIELD 1255 uint64_t reserved_56_63 : 8; 1256 uint64_t rx_lnk_stat : 8; /**< Link status received in the diagnostic word (per-lane) */ 1257 uint64_t reserved_40_47 : 8; 1258 uint64_t rx_lne_stat : 8; /**< Lane status received in the diagnostic word (per-lane) */ 1259 uint64_t reserved_24_31 : 8; 1260 uint64_t tx_lnk_stat : 8; /**< Link status transmitted in the diagnostic word (per-lane) */ 1261 uint64_t reserved_8_15 : 8; 1262 uint64_t tx_lne_stat : 8; /**< Lane status transmitted in the diagnostic word (per-lane) */ 1263#else 1264 uint64_t tx_lne_stat : 8; 1265 uint64_t reserved_8_15 : 8; 1266 uint64_t tx_lnk_stat : 8; 1267 uint64_t reserved_24_31 : 8; 1268 uint64_t rx_lne_stat : 8; 1269 uint64_t reserved_40_47 : 8; 1270 uint64_t rx_lnk_stat : 8; 1271 uint64_t reserved_56_63 : 8; 1272#endif 1273 } s; 1274 struct cvmx_ilk_lne_sts_msg_s cn68xx; 1275 struct cvmx_ilk_lne_sts_msg_s cn68xxp1; 1276}; 1277typedef union cvmx_ilk_lne_sts_msg cvmx_ilk_lne_sts_msg_t; 1278 1279/** 1280 * cvmx_ilk_rx#_cfg0 1281 */ 1282union cvmx_ilk_rxx_cfg0 { 1283 uint64_t u64; 1284 struct cvmx_ilk_rxx_cfg0_s { 1285#ifdef __BIG_ENDIAN_BITFIELD 1286 uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */ 1287 uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing 1288 transmit & receive clocks, skip word are inserted/deleted */ 1289 uint64_t reserved_60_61 : 2; 1290 uint64_t lnk_stats_wrap : 1; /**< Upon overflow, a statistics counter should wrap instead of 1291 saturating. 1292 1293 ***NOTE: Added in pass 2.0 */ 1294 uint64_t bcw_push : 1; /**< The 8 byte burst control word containing the SOP will be 1295 prepended to the corresponding packet. 1296 1297 ***NOTE: Added in pass 2.0 */ 1298 uint64_t mproto_ign : 1; /**< When LA_MODE=1 and MPROTO_IGN=0, the multi-protocol bit of the 1299 LA control word is used to determine if the burst is an LA or 1300 non-LA burst. When LA_MODE=1 and MPROTO_IGN=1, all bursts 1301 are treated LA. When LA_MODE=0, this field is ignored 1302 1303 ***NOTE: Added in pass 2.0 */ 1304 uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode */ 1305 uint64_t lnk_stats_rdclr : 1; /**< CSR read to ILK_RXx_STAT* clears the counter after returning 1306 its current value. */ 1307 uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */ 1308 uint64_t mltuse_fc_ena : 1; /**< Use multi-use field for calendar */ 1309 uint64_t cal_ena : 1; /**< Enable Rx calendar. When the calendar table is disabled, all 1310 port-pipes receive XON. */ 1311 uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word, 1312 scrambler state, diag word, zero or more skip words, and the 1313 data payload. Must be large than ILK_RXX_CFG1[SKIP_CNT]+9. 1314 Supported range:ILK_RXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */ 1315 uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of 1316 8 bytes. Supported range from 8 bytes to 512 (ie. 0 < 1317 BRST_SHRT <= 64) 1318 This field affects the ILK_RX*_STAT4[BRST_SHRT_ERR_CNT] 1319 counter. It does not affect correct operation of the link. */ 1320 uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane de-striping is performed 1321 from most significant lane enabled to least significant lane 1322 enabled. LANE_ENA must be zero before changing LANE_REV. */ 1323 uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks. 1324 Supported range is from 64 bytes to 1024 bytes. (ie. 0 < 1325 BRST_MAX <= 16) 1326 This field affects the ILK_RX*_STAT2[BRST_NOT_FULL_CNT] and 1327 ILK_RX*_STAT3[BRST_MAX_ERR_CNT] counters. It does not affect 1328 correct operation of the link. */ 1329 uint64_t reserved_25_25 : 1; 1330 uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. Supported range from 1331 1 to 288. */ 1332 uint64_t reserved_8_15 : 8; 1333 uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The 1334 same lane should not be enabled in multiple ILK_RXx_CFG0. Each 1335 bit of LANE_ENA maps to a RX lane (RLE) and a QLM lane. NOTE: 1336 LANE_REV has no effect on this mapping. 1337 1338 LANE_ENA[0] = RLE0 = QLM1 lane 0 1339 LANE_ENA[1] = RLE1 = QLM1 lane 1 1340 LANE_ENA[2] = RLE2 = QLM1 lane 2 1341 LANE_ENA[3] = RLE3 = QLM1 lane 3 1342 LANE_ENA[4] = RLE4 = QLM2 lane 0 1343 LANE_ENA[5] = RLE5 = QLM2 lane 1 1344 LANE_ENA[6] = RLE6 = QLM2 lane 2 1345 LANE_ENA[7] = RLE7 = QLM2 lane 3 */ 1346#else 1347 uint64_t lane_ena : 8; 1348 uint64_t reserved_8_15 : 8; 1349 uint64_t cal_depth : 9; 1350 uint64_t reserved_25_25 : 1; 1351 uint64_t brst_max : 5; 1352 uint64_t lane_rev : 1; 1353 uint64_t brst_shrt : 7; 1354 uint64_t mfrm_len : 13; 1355 uint64_t cal_ena : 1; 1356 uint64_t mltuse_fc_ena : 1; 1357 uint64_t lnk_stats_ena : 1; 1358 uint64_t lnk_stats_rdclr : 1; 1359 uint64_t ptrn_mode : 1; 1360 uint64_t mproto_ign : 1; 1361 uint64_t bcw_push : 1; 1362 uint64_t lnk_stats_wrap : 1; 1363 uint64_t reserved_60_61 : 2; 1364 uint64_t ext_lpbk : 1; 1365 uint64_t ext_lpbk_fc : 1; 1366#endif 1367 } s; 1368 struct cvmx_ilk_rxx_cfg0_s cn68xx; 1369 struct cvmx_ilk_rxx_cfg0_cn68xxp1 { 1370#ifdef __BIG_ENDIAN_BITFIELD 1371 uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */ 1372 uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing 1373 transmit & receive clocks, skip word are inserted/deleted */ 1374 uint64_t reserved_57_61 : 5; 1375 uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode */ 1376 uint64_t lnk_stats_rdclr : 1; /**< CSR read to ILK_RXx_STAT* clears the counter after returning 1377 its current value. */ 1378 uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */ 1379 uint64_t mltuse_fc_ena : 1; /**< Use multi-use field for calendar */ 1380 uint64_t cal_ena : 1; /**< Enable Rx calendar. When the calendar table is disabled, all 1381 port-pipes receive XON. */ 1382 uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word, 1383 scrambler state, diag word, zero or more skip words, and the 1384 data payload. Must be large than ILK_RXX_CFG1[SKIP_CNT]+9. 1385 Supported range:ILK_RXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */ 1386 uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of 1387 8 bytes. Supported range from 8 bytes to 512 (ie. 0 < 1388 BRST_SHRT <= 64) 1389 This field affects the ILK_RX*_STAT4[BRST_SHRT_ERR_CNT] 1390 counter. It does not affect correct operation of the link. */ 1391 uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane de-striping is performed 1392 from most significant lane enabled to least significant lane 1393 enabled. LANE_ENA must be zero before changing LANE_REV. */ 1394 uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks. 1395 Supported range is from 64 bytes to 1024 bytes. (ie. 0 < 1396 BRST_MAX <= 16) 1397 This field affects the ILK_RX*_STAT2[BRST_NOT_FULL_CNT] and 1398 ILK_RX*_STAT3[BRST_MAX_ERR_CNT] counters. It does not affect 1399 correct operation of the link. */ 1400 uint64_t reserved_25_25 : 1; 1401 uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. Supported range from 1402 1 to 288. */ 1403 uint64_t reserved_8_15 : 8; 1404 uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The 1405 same lane should not be enabled in multiple ILK_RXx_CFG0. Each 1406 bit of LANE_ENA maps to a RX lane (RLE) and a QLM lane. NOTE: 1407 LANE_REV has no effect on this mapping. 1408 1409 LANE_ENA[0] = RLE0 = QLM1 lane 0 1410 LANE_ENA[1] = RLE1 = QLM1 lane 1 1411 LANE_ENA[2] = RLE2 = QLM1 lane 2 1412 LANE_ENA[3] = RLE3 = QLM1 lane 3 1413 LANE_ENA[4] = RLE4 = QLM2 lane 0 1414 LANE_ENA[5] = RLE5 = QLM2 lane 1 1415 LANE_ENA[6] = RLE6 = QLM2 lane 2 1416 LANE_ENA[7] = RLE7 = QLM2 lane 3 */ 1417#else 1418 uint64_t lane_ena : 8; 1419 uint64_t reserved_8_15 : 8; 1420 uint64_t cal_depth : 9; 1421 uint64_t reserved_25_25 : 1; 1422 uint64_t brst_max : 5; 1423 uint64_t lane_rev : 1; 1424 uint64_t brst_shrt : 7; 1425 uint64_t mfrm_len : 13; 1426 uint64_t cal_ena : 1; 1427 uint64_t mltuse_fc_ena : 1; 1428 uint64_t lnk_stats_ena : 1; 1429 uint64_t lnk_stats_rdclr : 1; 1430 uint64_t ptrn_mode : 1; 1431 uint64_t reserved_57_61 : 5; 1432 uint64_t ext_lpbk : 1; 1433 uint64_t ext_lpbk_fc : 1; 1434#endif 1435 } cn68xxp1; 1436}; 1437typedef union cvmx_ilk_rxx_cfg0 cvmx_ilk_rxx_cfg0_t; 1438 1439/** 1440 * cvmx_ilk_rx#_cfg1 1441 */ 1442union cvmx_ilk_rxx_cfg1 { 1443 uint64_t u64; 1444 struct cvmx_ilk_rxx_cfg1_s { 1445#ifdef __BIG_ENDIAN_BITFIELD 1446 uint64_t reserved_62_63 : 2; 1447 uint64_t rx_fifo_cnt : 12; /**< Number of 64-bit words currently consumed by this link in the 1448 RX fifo. */ 1449 uint64_t reserved_48_49 : 2; 1450 uint64_t rx_fifo_hwm : 12; /**< Number of 64-bit words consumed by this link before switch 1451 transmitted link flow control status from XON to XOFF. 1452 1453 XON = RX_FIFO_CNT < RX_FIFO_HWM 1454 XOFF = RX_FIFO_CNT >= RX_FIFO_HWM. */ 1455 uint64_t reserved_34_35 : 2; 1456 uint64_t rx_fifo_max : 12; /**< Maximum number of 64-bit words consumed by this link in the RX 1457 fifo. The sum of all links should be equal to 2048 (16KB) */ 1458 uint64_t pkt_flush : 1; /**< Packet receive flush. Writing PKT_FLUSH=1 will cause all open 1459 packets to be error-out, just as though the link went down. */ 1460 uint64_t pkt_ena : 1; /**< Packet receive enable. When PKT_ENA=0, any received SOP causes 1461 the entire packet to be dropped. */ 1462 uint64_t la_mode : 1; /**< 0 = Interlaken 1463 1 = Interlaken Look-Aside */ 1464 uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link 1465 XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */ 1466 uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words. 1467 XOFF will cause Tx-Link to stop transmitting on all channels. */ 1468 uint64_t rx_align_ena : 1; /**< Enable the lane alignment. This should only be done after all 1469 enabled lanes have achieved word boundary lock and scrambler 1470 synchronization. Note: Hardware will clear this when any 1471 participating lane loses either word boundary lock or scrambler 1472 synchronization */ 1473 uint64_t reserved_8_15 : 8; 1474 uint64_t rx_bdry_lock_ena : 8; /**< Enable word boundary lock. While disabled, received data is 1475 tossed. Once enabled, received data is searched for legal 1476 2bit patterns. Automatically cleared for disabled lanes. */ 1477#else 1478 uint64_t rx_bdry_lock_ena : 8; 1479 uint64_t reserved_8_15 : 8; 1480 uint64_t rx_align_ena : 1; 1481 uint64_t rx_link_fc : 1; 1482 uint64_t tx_link_fc : 1; 1483 uint64_t la_mode : 1; 1484 uint64_t pkt_ena : 1; 1485 uint64_t pkt_flush : 1; 1486 uint64_t rx_fifo_max : 12; 1487 uint64_t reserved_34_35 : 2; 1488 uint64_t rx_fifo_hwm : 12; 1489 uint64_t reserved_48_49 : 2; 1490 uint64_t rx_fifo_cnt : 12; 1491 uint64_t reserved_62_63 : 2; 1492#endif 1493 } s; 1494 struct cvmx_ilk_rxx_cfg1_s cn68xx; 1495 struct cvmx_ilk_rxx_cfg1_s cn68xxp1; 1496}; 1497typedef union cvmx_ilk_rxx_cfg1 cvmx_ilk_rxx_cfg1_t; 1498 1499/** 1500 * cvmx_ilk_rx#_flow_ctl0 1501 */ 1502union cvmx_ilk_rxx_flow_ctl0 { 1503 uint64_t u64; 1504 struct cvmx_ilk_rxx_flow_ctl0_s { 1505#ifdef __BIG_ENDIAN_BITFIELD 1506 uint64_t status : 64; /**< Flow control status for port-pipes 63-0, where a 1 indicates 1507 the presence of backpressure (ie. XOFF) and 0 indicates the 1508 absence of backpressure (ie. XON) */ 1509#else 1510 uint64_t status : 64; 1511#endif 1512 } s; 1513 struct cvmx_ilk_rxx_flow_ctl0_s cn68xx; 1514 struct cvmx_ilk_rxx_flow_ctl0_s cn68xxp1; 1515}; 1516typedef union cvmx_ilk_rxx_flow_ctl0 cvmx_ilk_rxx_flow_ctl0_t; 1517 1518/** 1519 * cvmx_ilk_rx#_flow_ctl1 1520 */ 1521union cvmx_ilk_rxx_flow_ctl1 { 1522 uint64_t u64; 1523 struct cvmx_ilk_rxx_flow_ctl1_s { 1524#ifdef __BIG_ENDIAN_BITFIELD 1525 uint64_t status : 64; /**< Flow control status for port-pipes 127-64, where a 1 indicates 1526 the presence of backpressure (ie. XOFF) and 0 indicates the 1527 absence of backpressure (ie. XON) */ 1528#else 1529 uint64_t status : 64; 1530#endif 1531 } s; 1532 struct cvmx_ilk_rxx_flow_ctl1_s cn68xx; 1533 struct cvmx_ilk_rxx_flow_ctl1_s cn68xxp1; 1534}; 1535typedef union cvmx_ilk_rxx_flow_ctl1 cvmx_ilk_rxx_flow_ctl1_t; 1536 1537/** 1538 * cvmx_ilk_rx#_idx_cal 1539 */ 1540union cvmx_ilk_rxx_idx_cal { 1541 uint64_t u64; 1542 struct cvmx_ilk_rxx_idx_cal_s { 1543#ifdef __BIG_ENDIAN_BITFIELD 1544 uint64_t reserved_14_63 : 50; 1545 uint64_t inc : 6; /**< Increment to add to current index for next index. NOTE: 1546 Increment performed after access to ILK_RXx_MEM_CAL1 */ 1547 uint64_t reserved_6_7 : 2; 1548 uint64_t index : 6; /**< Specify the group of 8 entries accessed by the next CSR 1549 read/write to calendar table memory. Software must never write 1550 IDX >= 36 */ 1551#else 1552 uint64_t index : 6; 1553 uint64_t reserved_6_7 : 2; 1554 uint64_t inc : 6; 1555 uint64_t reserved_14_63 : 50; 1556#endif 1557 } s; 1558 struct cvmx_ilk_rxx_idx_cal_s cn68xx; 1559 struct cvmx_ilk_rxx_idx_cal_s cn68xxp1; 1560}; 1561typedef union cvmx_ilk_rxx_idx_cal cvmx_ilk_rxx_idx_cal_t; 1562 1563/** 1564 * cvmx_ilk_rx#_idx_stat0 1565 */ 1566union cvmx_ilk_rxx_idx_stat0 { 1567 uint64_t u64; 1568 struct cvmx_ilk_rxx_idx_stat0_s { 1569#ifdef __BIG_ENDIAN_BITFIELD 1570 uint64_t reserved_32_63 : 32; 1571 uint64_t clr : 1; /**< CSR read to ILK_RXx_MEM_STAT0 clears the selected counter after 1572 returning its current value. */ 1573 uint64_t reserved_24_30 : 7; 1574 uint64_t inc : 8; /**< Increment to add to current index for next index */ 1575 uint64_t reserved_8_15 : 8; 1576 uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the 1577 ILK_RXx_MEM_STAT0 */ 1578#else 1579 uint64_t index : 8; 1580 uint64_t reserved_8_15 : 8; 1581 uint64_t inc : 8; 1582 uint64_t reserved_24_30 : 7; 1583 uint64_t clr : 1; 1584 uint64_t reserved_32_63 : 32; 1585#endif 1586 } s; 1587 struct cvmx_ilk_rxx_idx_stat0_s cn68xx; 1588 struct cvmx_ilk_rxx_idx_stat0_s cn68xxp1; 1589}; 1590typedef union cvmx_ilk_rxx_idx_stat0 cvmx_ilk_rxx_idx_stat0_t; 1591 1592/** 1593 * cvmx_ilk_rx#_idx_stat1 1594 */ 1595union cvmx_ilk_rxx_idx_stat1 { 1596 uint64_t u64; 1597 struct cvmx_ilk_rxx_idx_stat1_s { 1598#ifdef __BIG_ENDIAN_BITFIELD 1599 uint64_t reserved_32_63 : 32; 1600 uint64_t clr : 1; /**< CSR read to ILK_RXx_MEM_STAT1 clears the selected counter after 1601 returning its current value. */ 1602 uint64_t reserved_24_30 : 7; 1603 uint64_t inc : 8; /**< Increment to add to current index for next index */ 1604 uint64_t reserved_8_15 : 8; 1605 uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the 1606 ILK_RXx_MEM_STAT1 */ 1607#else 1608 uint64_t index : 8; 1609 uint64_t reserved_8_15 : 8; 1610 uint64_t inc : 8; 1611 uint64_t reserved_24_30 : 7; 1612 uint64_t clr : 1; 1613 uint64_t reserved_32_63 : 32; 1614#endif 1615 } s; 1616 struct cvmx_ilk_rxx_idx_stat1_s cn68xx; 1617 struct cvmx_ilk_rxx_idx_stat1_s cn68xxp1; 1618}; 1619typedef union cvmx_ilk_rxx_idx_stat1 cvmx_ilk_rxx_idx_stat1_t; 1620 1621/** 1622 * cvmx_ilk_rx#_int 1623 */ 1624union cvmx_ilk_rxx_int { 1625 uint64_t u64; 1626 struct cvmx_ilk_rxx_int_s { 1627#ifdef __BIG_ENDIAN_BITFIELD 1628 uint64_t reserved_9_63 : 55; 1629 uint64_t pkt_drop_sop : 1; /**< Entire packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX, 1630 lack of reassembly-ids or because ILK_RXX_CFG1[PKT_ENA]=0 | $RW 1631 because ILK_RXX_CFG1[PKT_ENA]=0 1632 1633 ***NOTE: Added in pass 2.0 */ 1634 uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or 1635 because ILK_RXX_CFG1[PKT_ENA]=0 */ 1636 uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */ 1637 uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown 1638 control word type. */ 1639 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 1640 uint64_t lane_align_done : 1; /**< Lane alignment successful */ 1641 uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and 1642 scrambler synchronization. Lane alignment may now be enabled. */ 1643 uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */ 1644 uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries). Hardware will repeat lane 1645 alignment until is succeeds or until ILK_RXx_CFG1[RX_ALIGN_ENA] 1646 is cleared. */ 1647#else 1648 uint64_t lane_align_fail : 1; 1649 uint64_t crc24_err : 1; 1650 uint64_t word_sync_done : 1; 1651 uint64_t lane_align_done : 1; 1652 uint64_t stat_cnt_ovfl : 1; 1653 uint64_t lane_bad_word : 1; 1654 uint64_t pkt_drop_rxf : 1; 1655 uint64_t pkt_drop_rid : 1; 1656 uint64_t pkt_drop_sop : 1; 1657 uint64_t reserved_9_63 : 55; 1658#endif 1659 } s; 1660 struct cvmx_ilk_rxx_int_s cn68xx; 1661 struct cvmx_ilk_rxx_int_cn68xxp1 { 1662#ifdef __BIG_ENDIAN_BITFIELD 1663 uint64_t reserved_8_63 : 56; 1664 uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or 1665 because ILK_RXX_CFG1[PKT_ENA]=0 */ 1666 uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */ 1667 uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown 1668 control word type. */ 1669 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 1670 uint64_t lane_align_done : 1; /**< Lane alignment successful */ 1671 uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and 1672 scrambler synchronization. Lane alignment may now be enabled. */ 1673 uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */ 1674 uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries). Hardware will repeat lane 1675 alignment until is succeeds or until ILK_RXx_CFG1[RX_ALIGN_ENA] 1676 is cleared. */ 1677#else 1678 uint64_t lane_align_fail : 1; 1679 uint64_t crc24_err : 1; 1680 uint64_t word_sync_done : 1; 1681 uint64_t lane_align_done : 1; 1682 uint64_t stat_cnt_ovfl : 1; 1683 uint64_t lane_bad_word : 1; 1684 uint64_t pkt_drop_rxf : 1; 1685 uint64_t pkt_drop_rid : 1; 1686 uint64_t reserved_8_63 : 56; 1687#endif 1688 } cn68xxp1; 1689}; 1690typedef union cvmx_ilk_rxx_int cvmx_ilk_rxx_int_t; 1691 1692/** 1693 * cvmx_ilk_rx#_int_en 1694 */ 1695union cvmx_ilk_rxx_int_en { 1696 uint64_t u64; 1697 struct cvmx_ilk_rxx_int_en_s { 1698#ifdef __BIG_ENDIAN_BITFIELD 1699 uint64_t reserved_9_63 : 55; 1700 uint64_t pkt_drop_sop : 1; /**< Entire packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX, 1701 lack of reassembly-ids or because ILK_RXX_CFG1[PKT_ENA]=0 | $PRW 1702 because ILK_RXX_CFG1[PKT_ENA]=0 1703 1704 ***NOTE: Added in pass 2.0 */ 1705 uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or 1706 because ILK_RXX_CFG1[PKT_ENA]=0 */ 1707 uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */ 1708 uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown 1709 control word type. */ 1710 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 1711 uint64_t lane_align_done : 1; /**< Lane alignment successful */ 1712 uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and 1713 scrambler synchronization. Lane alignment may now be enabled. */ 1714 uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */ 1715 uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries) */ 1716#else 1717 uint64_t lane_align_fail : 1; 1718 uint64_t crc24_err : 1; 1719 uint64_t word_sync_done : 1; 1720 uint64_t lane_align_done : 1; 1721 uint64_t stat_cnt_ovfl : 1; 1722 uint64_t lane_bad_word : 1; 1723 uint64_t pkt_drop_rxf : 1; 1724 uint64_t pkt_drop_rid : 1; 1725 uint64_t pkt_drop_sop : 1; 1726 uint64_t reserved_9_63 : 55; 1727#endif 1728 } s; 1729 struct cvmx_ilk_rxx_int_en_s cn68xx; 1730 struct cvmx_ilk_rxx_int_en_cn68xxp1 { 1731#ifdef __BIG_ENDIAN_BITFIELD 1732 uint64_t reserved_8_63 : 56; 1733 uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or 1734 because ILK_RXX_CFG1[PKT_ENA]=0 */ 1735 uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */ 1736 uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown 1737 control word type. */ 1738 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 1739 uint64_t lane_align_done : 1; /**< Lane alignment successful */ 1740 uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and 1741 scrambler synchronization. Lane alignment may now be enabled. */ 1742 uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */ 1743 uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries) */ 1744#else 1745 uint64_t lane_align_fail : 1; 1746 uint64_t crc24_err : 1; 1747 uint64_t word_sync_done : 1; 1748 uint64_t lane_align_done : 1; 1749 uint64_t stat_cnt_ovfl : 1; 1750 uint64_t lane_bad_word : 1; 1751 uint64_t pkt_drop_rxf : 1; 1752 uint64_t pkt_drop_rid : 1; 1753 uint64_t reserved_8_63 : 56; 1754#endif 1755 } cn68xxp1; 1756}; 1757typedef union cvmx_ilk_rxx_int_en cvmx_ilk_rxx_int_en_t; 1758 1759/** 1760 * cvmx_ilk_rx#_jabber 1761 */ 1762union cvmx_ilk_rxx_jabber { 1763 uint64_t u64; 1764 struct cvmx_ilk_rxx_jabber_s { 1765#ifdef __BIG_ENDIAN_BITFIELD 1766 uint64_t reserved_16_63 : 48; 1767 uint64_t cnt : 16; /**< Byte count for jabber check. Failing packets will be 1768 truncated to CNT bytes. 1769 1770 NOTE: Hardware tracks the size of up to two concurrent packet 1771 per link. If using segment mode with more than 2 channels, 1772 some large packets may not be flagged or truncated. 1773 1774 NOTE: CNT must be 8-byte aligned such that CNT[2:0] == 0 */ 1775#else 1776 uint64_t cnt : 16; 1777 uint64_t reserved_16_63 : 48; 1778#endif 1779 } s; 1780 struct cvmx_ilk_rxx_jabber_s cn68xx; 1781 struct cvmx_ilk_rxx_jabber_s cn68xxp1; 1782}; 1783typedef union cvmx_ilk_rxx_jabber cvmx_ilk_rxx_jabber_t; 1784 1785/** 1786 * cvmx_ilk_rx#_mem_cal0 1787 * 1788 * Notes: 1789 * Software must program the calendar table prior to enabling the 1790 * link. 1791 * 1792 * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1. 1793 * Software must never write them in reverse order or write one without 1794 * writing the other. 1795 * 1796 * A given calendar table entry has no effect on PKO pipe 1797 * backpressure when either: 1798 * - ENTRY_CTLx=Link (1), or 1799 * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP]. 1800 * 1801 * Within the 8 calendar table entries of one IDX value, if more 1802 * than one affects the same PKO pipe, XOFF always wins over XON, 1803 * regardless of the calendar table order. 1804 * 1805 * Software must always read ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1. Software 1806 * must never read them in reverse order or read one without reading the 1807 * other. 1808 */ 1809union cvmx_ilk_rxx_mem_cal0 { 1810 uint64_t u64; 1811 struct cvmx_ilk_rxx_mem_cal0_s { 1812#ifdef __BIG_ENDIAN_BITFIELD 1813 uint64_t reserved_36_63 : 28; 1814 uint64_t entry_ctl3 : 2; /**< XON/XOFF destination for entry (IDX*8)+3 1815 1816 - 0: PKO port-pipe Apply backpressure received from the 1817 remote tranmitter to the PKO pipe selected 1818 by PORT_PIPE3. 1819 1820 - 1: Link Apply the backpressure received from the 1821 remote transmitter to link backpressure. 1822 PORT_PIPE3 is unused. 1823 1824 - 2: XOFF Apply XOFF to the PKO pipe selected by 1825 PORT_PIPE3. 1826 1827 - 3: XON Apply XON to the PKO pipe selected by 1828 PORT_PIPE3. The calendar table entry is 1829 effectively unused if PORT_PIPE3 is out of 1830 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1831 uint64_t port_pipe3 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+3 1832 1833 PORT_PIPE3 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1834 when ENTRY_CTL3 is "XOFF" (2) or "PKO port-pipe" (0). */ 1835 uint64_t entry_ctl2 : 2; /**< XON/XOFF destination for entry (IDX*8)+2 1836 1837 - 0: PKO port-pipe Apply backpressure received from the 1838 remote tranmitter to the PKO pipe selected 1839 by PORT_PIPE2. 1840 1841 - 1: Link Apply the backpressure received from the 1842 remote transmitter to link backpressure. 1843 PORT_PIPE2 is unused. 1844 1845 - 2: XOFF Apply XOFF to the PKO pipe selected by 1846 PORT_PIPE2. 1847 1848 - 3: XON Apply XON to the PKO pipe selected by 1849 PORT_PIPE2. The calendar table entry is 1850 effectively unused if PORT_PIPE2 is out of 1851 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1852 uint64_t port_pipe2 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+2 1853 1854 PORT_PIPE2 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1855 when ENTRY_CTL2 is "XOFF" (2) or "PKO port-pipe" (0). */ 1856 uint64_t entry_ctl1 : 2; /**< XON/XOFF destination for entry (IDX*8)+1 1857 1858 - 0: PKO port-pipe Apply backpressure received from the 1859 remote tranmitter to the PKO pipe selected 1860 by PORT_PIPE1. 1861 1862 - 1: Link Apply the backpressure received from the 1863 remote transmitter to link backpressure. 1864 PORT_PIPE1 is unused. 1865 1866 - 2: XOFF Apply XOFF to the PKO pipe selected by 1867 PORT_PIPE1. 1868 1869 - 3: XON Apply XON to the PKO pipe selected by 1870 PORT_PIPE1. The calendar table entry is 1871 effectively unused if PORT_PIPE1 is out of 1872 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1873 uint64_t port_pipe1 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+1 1874 1875 PORT_PIPE1 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1876 when ENTRY_CTL1 is "XOFF" (2) or "PKO port-pipe" (0). */ 1877 uint64_t entry_ctl0 : 2; /**< XON/XOFF destination for entry (IDX*8)+0 1878 1879 - 0: PKO port-pipe Apply backpressure received from the 1880 remote tranmitter to the PKO pipe selected 1881 by PORT_PIPE0. 1882 1883 - 1: Link Apply the backpressure received from the 1884 remote transmitter to link backpressure. 1885 PORT_PIPE0 is unused. 1886 1887 - 2: XOFF Apply XOFF to the PKO pipe selected by 1888 PORT_PIPE0. 1889 1890 - 3: XON Apply XON to the PKO pipe selected by 1891 PORT_PIPE0. The calendar table entry is 1892 effectively unused if PORT_PIPEx is out of 1893 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1894 uint64_t port_pipe0 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+0 1895 1896 PORT_PIPE0 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1897 when ENTRY_CTL0 is "XOFF" (2) or "PKO port-pipe" (0). */ 1898#else 1899 uint64_t port_pipe0 : 7; 1900 uint64_t entry_ctl0 : 2; 1901 uint64_t port_pipe1 : 7; 1902 uint64_t entry_ctl1 : 2; 1903 uint64_t port_pipe2 : 7; 1904 uint64_t entry_ctl2 : 2; 1905 uint64_t port_pipe3 : 7; 1906 uint64_t entry_ctl3 : 2; 1907 uint64_t reserved_36_63 : 28; 1908#endif 1909 } s; 1910 struct cvmx_ilk_rxx_mem_cal0_s cn68xx; 1911 struct cvmx_ilk_rxx_mem_cal0_s cn68xxp1; 1912}; 1913typedef union cvmx_ilk_rxx_mem_cal0 cvmx_ilk_rxx_mem_cal0_t; 1914 1915/** 1916 * cvmx_ilk_rx#_mem_cal1 1917 * 1918 * Notes: 1919 * Software must program the calendar table prior to enabling the 1920 * link. 1921 * 1922 * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1. 1923 * Software must never write them in reverse order or write one without 1924 * writing the other. 1925 * 1926 * A given calendar table entry has no effect on PKO pipe 1927 * backpressure when either: 1928 * - ENTRY_CTLx=Link (1), or 1929 * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP]. 1930 * 1931 * Within the 8 calendar table entries of one IDX value, if more 1932 * than one affects the same PKO pipe, XOFF always wins over XON, 1933 * regardless of the calendar table order. 1934 * 1935 * Software must always read ILK_RXx_MEM_CAL0 then ILK_Rx_MEM_CAL1. Software 1936 * must never read them in reverse order or read one without reading the 1937 * other. 1938 */ 1939union cvmx_ilk_rxx_mem_cal1 { 1940 uint64_t u64; 1941 struct cvmx_ilk_rxx_mem_cal1_s { 1942#ifdef __BIG_ENDIAN_BITFIELD 1943 uint64_t reserved_36_63 : 28; 1944 uint64_t entry_ctl7 : 2; /**< XON/XOFF destination for entry (IDX*8)+7 1945 1946 - 0: PKO port-pipe Apply backpressure received from the 1947 remote tranmitter to the PKO pipe selected 1948 by PORT_PIPE7. 1949 1950 - 1: Link Apply the backpressure received from the 1951 remote transmitter to link backpressure. 1952 PORT_PIPE7 is unused. 1953 1954 - 2: XOFF Apply XOFF to the PKO pipe selected by 1955 PORT_PIPE7. 1956 1957 - 3: XON Apply XON to the PKO pipe selected by 1958 PORT_PIPE7. The calendar table entry is 1959 effectively unused if PORT_PIPE3 is out of 1960 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1961 uint64_t port_pipe7 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+7 1962 1963 PORT_PIPE7 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1964 when ENTRY_CTL7 is "XOFF" (2) or "PKO port-pipe" (0). */ 1965 uint64_t entry_ctl6 : 2; /**< XON/XOFF destination for entry (IDX*8)+6 1966 1967 - 0: PKO port-pipe Apply backpressure received from the 1968 remote tranmitter to the PKO pipe selected 1969 by PORT_PIPE6. 1970 1971 - 1: Link Apply the backpressure received from the 1972 remote transmitter to link backpressure. 1973 PORT_PIPE6 is unused. 1974 1975 - 2: XOFF Apply XOFF to the PKO pipe selected by 1976 PORT_PIPE6. 1977 1978 - 3: XON Apply XON to the PKO pipe selected by 1979 PORT_PIPE6. The calendar table entry is 1980 effectively unused if PORT_PIPE6 is out of 1981 range of ILK_TXx_PIPE[BASE/NUMP]. */ 1982 uint64_t port_pipe6 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+6 1983 1984 PORT_PIPE6 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 1985 when ENTRY_CTL6 is "XOFF" (2) or "PKO port-pipe" (0). */ 1986 uint64_t entry_ctl5 : 2; /**< XON/XOFF destination for entry (IDX*8)+5 1987 1988 - 0: PKO port-pipe Apply backpressure received from the 1989 remote tranmitter to the PKO pipe selected 1990 by PORT_PIPE5. 1991 1992 - 1: Link Apply the backpressure received from the 1993 remote transmitter to link backpressure. 1994 PORT_PIPE5 is unused. 1995 1996 - 2: XOFF Apply XOFF to the PKO pipe selected by 1997 PORT_PIPE5. 1998 1999 - 3: XON Apply XON to the PKO pipe selected by 2000 PORT_PIPE5. The calendar table entry is 2001 effectively unused if PORT_PIPE5 is out of 2002 range of ILK_TXx_PIPE[BASE/NUMP]. */ 2003 uint64_t port_pipe5 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+5 2004 2005 PORT_PIPE5 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 2006 when ENTRY_CTL5 is "XOFF" (2) or "PKO port-pipe" (0). */ 2007 uint64_t entry_ctl4 : 2; /**< XON/XOFF destination for entry (IDX*8)+4 2008 2009 - 0: PKO port-pipe Apply backpressure received from the 2010 remote tranmitter to the PKO pipe selected 2011 by PORT_PIPE4. 2012 2013 - 1: Link Apply the backpressure received from the 2014 remote transmitter to link backpressure. 2015 PORT_PIPE4 is unused. 2016 2017 - 2: XOFF Apply XOFF to the PKO pipe selected by 2018 PORT_PIPE4. 2019 2020 - 3: XON Apply XON to the PKO pipe selected by 2021 PORT_PIPE4. The calendar table entry is 2022 effectively unused if PORT_PIPE4 is out of 2023 range of ILK_TXx_PIPE[BASE/NUMP]. */ 2024 uint64_t port_pipe4 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+4 2025 2026 PORT_PIPE4 must reside in the range of ILK_TXx_PIPE[BASE/NUMP] 2027 when ENTRY_CTL4 is "XOFF" (2) or "PKO port-pipe" (0). */ 2028#else 2029 uint64_t port_pipe4 : 7; 2030 uint64_t entry_ctl4 : 2; 2031 uint64_t port_pipe5 : 7; 2032 uint64_t entry_ctl5 : 2; 2033 uint64_t port_pipe6 : 7; 2034 uint64_t entry_ctl6 : 2; 2035 uint64_t port_pipe7 : 7; 2036 uint64_t entry_ctl7 : 2; 2037 uint64_t reserved_36_63 : 28; 2038#endif 2039 } s; 2040 struct cvmx_ilk_rxx_mem_cal1_s cn68xx; 2041 struct cvmx_ilk_rxx_mem_cal1_s cn68xxp1; 2042}; 2043typedef union cvmx_ilk_rxx_mem_cal1 cvmx_ilk_rxx_mem_cal1_t; 2044 2045/** 2046 * cvmx_ilk_rx#_mem_stat0 2047 */ 2048union cvmx_ilk_rxx_mem_stat0 { 2049 uint64_t u64; 2050 struct cvmx_ilk_rxx_mem_stat0_s { 2051#ifdef __BIG_ENDIAN_BITFIELD 2052 uint64_t reserved_28_63 : 36; 2053 uint64_t rx_pkt : 28; /**< Number of packets received (256M) 2054 Channel selected by ILK_RXx_IDX_STAT0[IDX]. Saturates. 2055 Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2056#else 2057 uint64_t rx_pkt : 28; 2058 uint64_t reserved_28_63 : 36; 2059#endif 2060 } s; 2061 struct cvmx_ilk_rxx_mem_stat0_s cn68xx; 2062 struct cvmx_ilk_rxx_mem_stat0_s cn68xxp1; 2063}; 2064typedef union cvmx_ilk_rxx_mem_stat0 cvmx_ilk_rxx_mem_stat0_t; 2065 2066/** 2067 * cvmx_ilk_rx#_mem_stat1 2068 */ 2069union cvmx_ilk_rxx_mem_stat1 { 2070 uint64_t u64; 2071 struct cvmx_ilk_rxx_mem_stat1_s { 2072#ifdef __BIG_ENDIAN_BITFIELD 2073 uint64_t reserved_36_63 : 28; 2074 uint64_t rx_bytes : 36; /**< Number of bytes received (64GB) 2075 Channel selected by ILK_RXx_IDX_STAT1[IDX]. Saturates. 2076 Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2077#else 2078 uint64_t rx_bytes : 36; 2079 uint64_t reserved_36_63 : 28; 2080#endif 2081 } s; 2082 struct cvmx_ilk_rxx_mem_stat1_s cn68xx; 2083 struct cvmx_ilk_rxx_mem_stat1_s cn68xxp1; 2084}; 2085typedef union cvmx_ilk_rxx_mem_stat1 cvmx_ilk_rxx_mem_stat1_t; 2086 2087/** 2088 * cvmx_ilk_rx#_rid 2089 */ 2090union cvmx_ilk_rxx_rid { 2091 uint64_t u64; 2092 struct cvmx_ilk_rxx_rid_s { 2093#ifdef __BIG_ENDIAN_BITFIELD 2094 uint64_t reserved_6_63 : 58; 2095 uint64_t max_cnt : 6; /**< Maximum number of reassembly-ids allowed for a given link. If 2096 an SOP arrives and the link has already allocated at least 2097 MAX_CNT reassembly-ids, the packet will be dropped. 2098 2099 Note: An an SOP allocates a reassembly-ids. 2100 Note: An an EOP frees a reassembly-ids. 2101 2102 ***NOTE: Added in pass 2.0 */ 2103#else 2104 uint64_t max_cnt : 6; 2105 uint64_t reserved_6_63 : 58; 2106#endif 2107 } s; 2108 struct cvmx_ilk_rxx_rid_s cn68xx; 2109}; 2110typedef union cvmx_ilk_rxx_rid cvmx_ilk_rxx_rid_t; 2111 2112/** 2113 * cvmx_ilk_rx#_stat0 2114 */ 2115union cvmx_ilk_rxx_stat0 { 2116 uint64_t u64; 2117 struct cvmx_ilk_rxx_stat0_s { 2118#ifdef __BIG_ENDIAN_BITFIELD 2119 uint64_t reserved_33_63 : 31; 2120 uint64_t crc24_match_cnt : 33; /**< Number of CRC24 matches received. Saturates. Interrupt on 2121 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2122#else 2123 uint64_t crc24_match_cnt : 33; 2124 uint64_t reserved_33_63 : 31; 2125#endif 2126 } s; 2127 struct cvmx_ilk_rxx_stat0_s cn68xx; 2128 struct cvmx_ilk_rxx_stat0_cn68xxp1 { 2129#ifdef __BIG_ENDIAN_BITFIELD 2130 uint64_t reserved_27_63 : 37; 2131 uint64_t crc24_match_cnt : 27; /**< Number of CRC24 matches received. Saturates. Interrupt on 2132 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2133#else 2134 uint64_t crc24_match_cnt : 27; 2135 uint64_t reserved_27_63 : 37; 2136#endif 2137 } cn68xxp1; 2138}; 2139typedef union cvmx_ilk_rxx_stat0 cvmx_ilk_rxx_stat0_t; 2140 2141/** 2142 * cvmx_ilk_rx#_stat1 2143 */ 2144union cvmx_ilk_rxx_stat1 { 2145 uint64_t u64; 2146 struct cvmx_ilk_rxx_stat1_s { 2147#ifdef __BIG_ENDIAN_BITFIELD 2148 uint64_t reserved_18_63 : 46; 2149 uint64_t crc24_err_cnt : 18; /**< Number of bursts with a detected CRC error. Saturates. 2150 Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2151#else 2152 uint64_t crc24_err_cnt : 18; 2153 uint64_t reserved_18_63 : 46; 2154#endif 2155 } s; 2156 struct cvmx_ilk_rxx_stat1_s cn68xx; 2157 struct cvmx_ilk_rxx_stat1_s cn68xxp1; 2158}; 2159typedef union cvmx_ilk_rxx_stat1 cvmx_ilk_rxx_stat1_t; 2160 2161/** 2162 * cvmx_ilk_rx#_stat2 2163 */ 2164union cvmx_ilk_rxx_stat2 { 2165 uint64_t u64; 2166 struct cvmx_ilk_rxx_stat2_s { 2167#ifdef __BIG_ENDIAN_BITFIELD 2168 uint64_t reserved_48_63 : 16; 2169 uint64_t brst_not_full_cnt : 16; /**< Number of bursts received which terminated without an eop and 2170 contained fewer than BurstMax words. Saturates. Interrupt on 2171 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2172 uint64_t reserved_28_31 : 4; 2173 uint64_t brst_cnt : 28; /**< Number of bursts correctly received. (ie. good CRC24, not in 2174 violation of BurstMax or BurstShort) */ 2175#else 2176 uint64_t brst_cnt : 28; 2177 uint64_t reserved_28_31 : 4; 2178 uint64_t brst_not_full_cnt : 16; 2179 uint64_t reserved_48_63 : 16; 2180#endif 2181 } s; 2182 struct cvmx_ilk_rxx_stat2_s cn68xx; 2183 struct cvmx_ilk_rxx_stat2_cn68xxp1 { 2184#ifdef __BIG_ENDIAN_BITFIELD 2185 uint64_t reserved_48_63 : 16; 2186 uint64_t brst_not_full_cnt : 16; /**< Number of bursts received which terminated without an eop and 2187 contained fewer than BurstMax words. Saturates. Interrupt on 2188 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2189 uint64_t reserved_16_31 : 16; 2190 uint64_t brst_cnt : 16; /**< Number of bursts correctly received. (ie. good CRC24, not in 2191 violation of BurstMax or BurstShort) */ 2192#else 2193 uint64_t brst_cnt : 16; 2194 uint64_t reserved_16_31 : 16; 2195 uint64_t brst_not_full_cnt : 16; 2196 uint64_t reserved_48_63 : 16; 2197#endif 2198 } cn68xxp1; 2199}; 2200typedef union cvmx_ilk_rxx_stat2 cvmx_ilk_rxx_stat2_t; 2201 2202/** 2203 * cvmx_ilk_rx#_stat3 2204 */ 2205union cvmx_ilk_rxx_stat3 { 2206 uint64_t u64; 2207 struct cvmx_ilk_rxx_stat3_s { 2208#ifdef __BIG_ENDIAN_BITFIELD 2209 uint64_t reserved_16_63 : 48; 2210 uint64_t brst_max_err_cnt : 16; /**< Number of bursts received longer than the BurstMax parameter */ 2211#else 2212 uint64_t brst_max_err_cnt : 16; 2213 uint64_t reserved_16_63 : 48; 2214#endif 2215 } s; 2216 struct cvmx_ilk_rxx_stat3_s cn68xx; 2217 struct cvmx_ilk_rxx_stat3_s cn68xxp1; 2218}; 2219typedef union cvmx_ilk_rxx_stat3 cvmx_ilk_rxx_stat3_t; 2220 2221/** 2222 * cvmx_ilk_rx#_stat4 2223 */ 2224union cvmx_ilk_rxx_stat4 { 2225 uint64_t u64; 2226 struct cvmx_ilk_rxx_stat4_s { 2227#ifdef __BIG_ENDIAN_BITFIELD 2228 uint64_t reserved_16_63 : 48; 2229 uint64_t brst_shrt_err_cnt : 16; /**< Number of bursts received that violate the BurstShort 2230 parameter. Saturates. Interrupt on saturation if 2231 ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2232#else 2233 uint64_t brst_shrt_err_cnt : 16; 2234 uint64_t reserved_16_63 : 48; 2235#endif 2236 } s; 2237 struct cvmx_ilk_rxx_stat4_s cn68xx; 2238 struct cvmx_ilk_rxx_stat4_s cn68xxp1; 2239}; 2240typedef union cvmx_ilk_rxx_stat4 cvmx_ilk_rxx_stat4_t; 2241 2242/** 2243 * cvmx_ilk_rx#_stat5 2244 */ 2245union cvmx_ilk_rxx_stat5 { 2246 uint64_t u64; 2247 struct cvmx_ilk_rxx_stat5_s { 2248#ifdef __BIG_ENDIAN_BITFIELD 2249 uint64_t reserved_23_63 : 41; 2250 uint64_t align_cnt : 23; /**< Number of alignment sequences received (ie. those that do not 2251 violate the current alignment). Saturates. Interrupt on 2252 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2253#else 2254 uint64_t align_cnt : 23; 2255 uint64_t reserved_23_63 : 41; 2256#endif 2257 } s; 2258 struct cvmx_ilk_rxx_stat5_s cn68xx; 2259 struct cvmx_ilk_rxx_stat5_cn68xxp1 { 2260#ifdef __BIG_ENDIAN_BITFIELD 2261 uint64_t reserved_16_63 : 48; 2262 uint64_t align_cnt : 16; /**< Number of alignment sequences received (ie. those that do not 2263 violate the current alignment). Saturates. Interrupt on 2264 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2265#else 2266 uint64_t align_cnt : 16; 2267 uint64_t reserved_16_63 : 48; 2268#endif 2269 } cn68xxp1; 2270}; 2271typedef union cvmx_ilk_rxx_stat5 cvmx_ilk_rxx_stat5_t; 2272 2273/** 2274 * cvmx_ilk_rx#_stat6 2275 */ 2276union cvmx_ilk_rxx_stat6 { 2277 uint64_t u64; 2278 struct cvmx_ilk_rxx_stat6_s { 2279#ifdef __BIG_ENDIAN_BITFIELD 2280 uint64_t reserved_16_63 : 48; 2281 uint64_t align_err_cnt : 16; /**< Number of alignment sequences received in error (ie. those that 2282 violate the current alignment). Saturates. Interrupt on 2283 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2284#else 2285 uint64_t align_err_cnt : 16; 2286 uint64_t reserved_16_63 : 48; 2287#endif 2288 } s; 2289 struct cvmx_ilk_rxx_stat6_s cn68xx; 2290 struct cvmx_ilk_rxx_stat6_s cn68xxp1; 2291}; 2292typedef union cvmx_ilk_rxx_stat6 cvmx_ilk_rxx_stat6_t; 2293 2294/** 2295 * cvmx_ilk_rx#_stat7 2296 */ 2297union cvmx_ilk_rxx_stat7 { 2298 uint64_t u64; 2299 struct cvmx_ilk_rxx_stat7_s { 2300#ifdef __BIG_ENDIAN_BITFIELD 2301 uint64_t reserved_16_63 : 48; 2302 uint64_t bad_64b67b_cnt : 16; /**< Number of bad 64B/67B codewords. Saturates. Interrupt on 2303 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2304#else 2305 uint64_t bad_64b67b_cnt : 16; 2306 uint64_t reserved_16_63 : 48; 2307#endif 2308 } s; 2309 struct cvmx_ilk_rxx_stat7_s cn68xx; 2310 struct cvmx_ilk_rxx_stat7_s cn68xxp1; 2311}; 2312typedef union cvmx_ilk_rxx_stat7 cvmx_ilk_rxx_stat7_t; 2313 2314/** 2315 * cvmx_ilk_rx#_stat8 2316 */ 2317union cvmx_ilk_rxx_stat8 { 2318 uint64_t u64; 2319 struct cvmx_ilk_rxx_stat8_s { 2320#ifdef __BIG_ENDIAN_BITFIELD 2321 uint64_t reserved_32_63 : 32; 2322 uint64_t pkt_drop_rid_cnt : 16; /**< Number of packets dropped due to the lack of reassembly-ids or 2323 because ILK_RXX_CFG1[PKT_ENA]=0. Saturates. Interrupt on 2324 saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2325 uint64_t pkt_drop_rxf_cnt : 16; /**< Number of packets dropped due to RX_FIFO_CNT >= RX_FIFO_MAX. 2326 Saturates. Interrupt on saturation if 2327 ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */ 2328#else 2329 uint64_t pkt_drop_rxf_cnt : 16; 2330 uint64_t pkt_drop_rid_cnt : 16; 2331 uint64_t reserved_32_63 : 32; 2332#endif 2333 } s; 2334 struct cvmx_ilk_rxx_stat8_s cn68xx; 2335 struct cvmx_ilk_rxx_stat8_s cn68xxp1; 2336}; 2337typedef union cvmx_ilk_rxx_stat8 cvmx_ilk_rxx_stat8_t; 2338 2339/** 2340 * cvmx_ilk_rx#_stat9 2341 */ 2342union cvmx_ilk_rxx_stat9 { 2343 uint64_t u64; 2344 struct cvmx_ilk_rxx_stat9_s { 2345#ifdef __BIG_ENDIAN_BITFIELD 2346 uint64_t reserved_0_63 : 64; 2347#else 2348 uint64_t reserved_0_63 : 64; 2349#endif 2350 } s; 2351 struct cvmx_ilk_rxx_stat9_s cn68xx; 2352 struct cvmx_ilk_rxx_stat9_s cn68xxp1; 2353}; 2354typedef union cvmx_ilk_rxx_stat9 cvmx_ilk_rxx_stat9_t; 2355 2356/** 2357 * cvmx_ilk_rx_lne#_cfg 2358 */ 2359union cvmx_ilk_rx_lnex_cfg { 2360 uint64_t u64; 2361 struct cvmx_ilk_rx_lnex_cfg_s { 2362#ifdef __BIG_ENDIAN_BITFIELD 2363 uint64_t reserved_9_63 : 55; 2364 uint64_t rx_dis_psh_skip : 1; /**< When RX_DIS_PSH_SKIP=0, skip words are de-stripped. 2365 When RX_DIS_PSH_SKIP=1, skip words are discarded in the lane 2366 logic. 2367 2368 If the lane is in internal loopback mode, RX_DIS_PSH_SKIP 2369 is ignored and skip words are always discarded in the lane 2370 logic. 2371 2372 ***NOTE: Added in pass 2.0 */ 2373 uint64_t reserved_6_7 : 2; 2374 uint64_t rx_scrm_sync : 1; /**< Rx scrambler synchronization status 2375 2376 ***NOTE: Added in pass 2.0 */ 2377 uint64_t rx_bdry_sync : 1; /**< Rx word boundary sync status */ 2378 uint64_t rx_dis_ukwn : 1; /**< Disable normal response to unknown words. They are still 2379 logged but do not cause an error to all open channels */ 2380 uint64_t rx_dis_scram : 1; /**< Disable lane scrambler (debug) */ 2381 uint64_t stat_rdclr : 1; /**< CSR read to ILK_RX_LNEx_STAT* clears the selected counter after 2382 returning its current value. */ 2383 uint64_t stat_ena : 1; /**< Enable RX lane statistics counters */ 2384#else 2385 uint64_t stat_ena : 1; 2386 uint64_t stat_rdclr : 1; 2387 uint64_t rx_dis_scram : 1; 2388 uint64_t rx_dis_ukwn : 1; 2389 uint64_t rx_bdry_sync : 1; 2390 uint64_t rx_scrm_sync : 1; 2391 uint64_t reserved_6_7 : 2; 2392 uint64_t rx_dis_psh_skip : 1; 2393 uint64_t reserved_9_63 : 55; 2394#endif 2395 } s; 2396 struct cvmx_ilk_rx_lnex_cfg_s cn68xx; 2397 struct cvmx_ilk_rx_lnex_cfg_cn68xxp1 { 2398#ifdef __BIG_ENDIAN_BITFIELD 2399 uint64_t reserved_5_63 : 59; 2400 uint64_t rx_bdry_sync : 1; /**< Rx word boundary sync status */ 2401 uint64_t rx_dis_ukwn : 1; /**< Disable normal response to unknown words. They are still 2402 logged but do not cause an error to all open channels */ 2403 uint64_t rx_dis_scram : 1; /**< Disable lane scrambler (debug) */ 2404 uint64_t stat_rdclr : 1; /**< CSR read to ILK_RX_LNEx_STAT* clears the selected counter after 2405 returning its current value. */ 2406 uint64_t stat_ena : 1; /**< Enable RX lane statistics counters */ 2407#else 2408 uint64_t stat_ena : 1; 2409 uint64_t stat_rdclr : 1; 2410 uint64_t rx_dis_scram : 1; 2411 uint64_t rx_dis_ukwn : 1; 2412 uint64_t rx_bdry_sync : 1; 2413 uint64_t reserved_5_63 : 59; 2414#endif 2415 } cn68xxp1; 2416}; 2417typedef union cvmx_ilk_rx_lnex_cfg cvmx_ilk_rx_lnex_cfg_t; 2418 2419/** 2420 * cvmx_ilk_rx_lne#_int 2421 */ 2422union cvmx_ilk_rx_lnex_int { 2423 uint64_t u64; 2424 struct cvmx_ilk_rx_lnex_int_s { 2425#ifdef __BIG_ENDIAN_BITFIELD 2426 uint64_t reserved_9_63 : 55; 2427 uint64_t bad_64b67b : 1; /**< Bad 64B/67B codeword encountered. Once the bad word reaches 2428 the burst control unit (as deonted by 2429 ILK_RXx_INT[LANE_BAD_WORD]) it will be tossed and all open 2430 packets will receive an error. */ 2431 uint64_t stat_cnt_ovfl : 1; /**< Rx lane statistic counter overflow */ 2432 uint64_t stat_msg : 1; /**< Status bits for the link or a lane transitioned from a '1' 2433 (healthy) to a '0' (problem) */ 2434 uint64_t dskew_fifo_ovfl : 1; /**< Rx deskew fifo overflow occurred. */ 2435 uint64_t scrm_sync_loss : 1; /**< 4 consecutive bad sync words or 3 consecutive scramble state 2436 mismatches */ 2437 uint64_t ukwn_cntl_word : 1; /**< Unknown framing control word. Block type does not match any of 2438 (SYNC,SCRAM,SKIP,DIAG) */ 2439 uint64_t crc32_err : 1; /**< Diagnostic CRC32 errors */ 2440 uint64_t bdry_sync_loss : 1; /**< Rx logic loses word boundary sync (16 tries). Hardware will 2441 automatically attempt to regain word boundary sync */ 2442 uint64_t serdes_lock_loss : 1; /**< Rx SERDES loses lock */ 2443#else 2444 uint64_t serdes_lock_loss : 1; 2445 uint64_t bdry_sync_loss : 1; 2446 uint64_t crc32_err : 1; 2447 uint64_t ukwn_cntl_word : 1; 2448 uint64_t scrm_sync_loss : 1; 2449 uint64_t dskew_fifo_ovfl : 1; 2450 uint64_t stat_msg : 1; 2451 uint64_t stat_cnt_ovfl : 1; 2452 uint64_t bad_64b67b : 1; 2453 uint64_t reserved_9_63 : 55; 2454#endif 2455 } s; 2456 struct cvmx_ilk_rx_lnex_int_s cn68xx; 2457 struct cvmx_ilk_rx_lnex_int_s cn68xxp1; 2458}; 2459typedef union cvmx_ilk_rx_lnex_int cvmx_ilk_rx_lnex_int_t; 2460 2461/** 2462 * cvmx_ilk_rx_lne#_int_en 2463 */ 2464union cvmx_ilk_rx_lnex_int_en { 2465 uint64_t u64; 2466 struct cvmx_ilk_rx_lnex_int_en_s { 2467#ifdef __BIG_ENDIAN_BITFIELD 2468 uint64_t reserved_9_63 : 55; 2469 uint64_t bad_64b67b : 1; /**< Bad 64B/67B codeword encountered. Once the bad word reaches 2470 the burst control unit (as deonted by 2471 ILK_RXx_INT[LANE_BAD_WORD]) it will be tossed and all open 2472 packets will receive an error. */ 2473 uint64_t stat_cnt_ovfl : 1; /**< Rx lane statistic counter overflow */ 2474 uint64_t stat_msg : 1; /**< Status bits for the link or a lane transitioned from a '1' 2475 (healthy) to a '0' (problem) */ 2476 uint64_t dskew_fifo_ovfl : 1; /**< Rx deskew fifo overflow occurred. */ 2477 uint64_t scrm_sync_loss : 1; /**< 4 consecutive bad sync words or 3 consecutive scramble state 2478 mismatches */ 2479 uint64_t ukwn_cntl_word : 1; /**< Unknown framing control word. Block type does not match any of 2480 (SYNC,SCRAM,SKIP,DIAG) */ 2481 uint64_t crc32_err : 1; /**< Diagnostic CRC32 error */ 2482 uint64_t bdry_sync_loss : 1; /**< Rx logic loses word boundary sync (16 tries). Hardware will 2483 automatically attempt to regain word boundary sync */ 2484 uint64_t serdes_lock_loss : 1; /**< Rx SERDES loses lock */ 2485#else 2486 uint64_t serdes_lock_loss : 1; 2487 uint64_t bdry_sync_loss : 1; 2488 uint64_t crc32_err : 1; 2489 uint64_t ukwn_cntl_word : 1; 2490 uint64_t scrm_sync_loss : 1; 2491 uint64_t dskew_fifo_ovfl : 1; 2492 uint64_t stat_msg : 1; 2493 uint64_t stat_cnt_ovfl : 1; 2494 uint64_t bad_64b67b : 1; 2495 uint64_t reserved_9_63 : 55; 2496#endif 2497 } s; 2498 struct cvmx_ilk_rx_lnex_int_en_s cn68xx; 2499 struct cvmx_ilk_rx_lnex_int_en_s cn68xxp1; 2500}; 2501typedef union cvmx_ilk_rx_lnex_int_en cvmx_ilk_rx_lnex_int_en_t; 2502 2503/** 2504 * cvmx_ilk_rx_lne#_stat0 2505 */ 2506union cvmx_ilk_rx_lnex_stat0 { 2507 uint64_t u64; 2508 struct cvmx_ilk_rx_lnex_stat0_s { 2509#ifdef __BIG_ENDIAN_BITFIELD 2510 uint64_t reserved_18_63 : 46; 2511 uint64_t ser_lock_loss_cnt : 18; /**< Number of times the lane lost clock-data-recovery. 2512 Saturates. Interrupt on saturation if 2513 ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2514#else 2515 uint64_t ser_lock_loss_cnt : 18; 2516 uint64_t reserved_18_63 : 46; 2517#endif 2518 } s; 2519 struct cvmx_ilk_rx_lnex_stat0_s cn68xx; 2520 struct cvmx_ilk_rx_lnex_stat0_s cn68xxp1; 2521}; 2522typedef union cvmx_ilk_rx_lnex_stat0 cvmx_ilk_rx_lnex_stat0_t; 2523 2524/** 2525 * cvmx_ilk_rx_lne#_stat1 2526 */ 2527union cvmx_ilk_rx_lnex_stat1 { 2528 uint64_t u64; 2529 struct cvmx_ilk_rx_lnex_stat1_s { 2530#ifdef __BIG_ENDIAN_BITFIELD 2531 uint64_t reserved_18_63 : 46; 2532 uint64_t bdry_sync_loss_cnt : 18; /**< Number of times a lane lost word boundary synchronization. 2533 Saturates. Interrupt on saturation if 2534 ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2535#else 2536 uint64_t bdry_sync_loss_cnt : 18; 2537 uint64_t reserved_18_63 : 46; 2538#endif 2539 } s; 2540 struct cvmx_ilk_rx_lnex_stat1_s cn68xx; 2541 struct cvmx_ilk_rx_lnex_stat1_s cn68xxp1; 2542}; 2543typedef union cvmx_ilk_rx_lnex_stat1 cvmx_ilk_rx_lnex_stat1_t; 2544 2545/** 2546 * cvmx_ilk_rx_lne#_stat2 2547 */ 2548union cvmx_ilk_rx_lnex_stat2 { 2549 uint64_t u64; 2550 struct cvmx_ilk_rx_lnex_stat2_s { 2551#ifdef __BIG_ENDIAN_BITFIELD 2552 uint64_t reserved_50_63 : 14; 2553 uint64_t syncw_good_cnt : 18; /**< Number of good synchronization words. Saturates. Interrupt on 2554 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2555 uint64_t reserved_18_31 : 14; 2556 uint64_t syncw_bad_cnt : 18; /**< Number of bad synchronization words. Saturates. Interrupt on 2557 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2558#else 2559 uint64_t syncw_bad_cnt : 18; 2560 uint64_t reserved_18_31 : 14; 2561 uint64_t syncw_good_cnt : 18; 2562 uint64_t reserved_50_63 : 14; 2563#endif 2564 } s; 2565 struct cvmx_ilk_rx_lnex_stat2_s cn68xx; 2566 struct cvmx_ilk_rx_lnex_stat2_s cn68xxp1; 2567}; 2568typedef union cvmx_ilk_rx_lnex_stat2 cvmx_ilk_rx_lnex_stat2_t; 2569 2570/** 2571 * cvmx_ilk_rx_lne#_stat3 2572 */ 2573union cvmx_ilk_rx_lnex_stat3 { 2574 uint64_t u64; 2575 struct cvmx_ilk_rx_lnex_stat3_s { 2576#ifdef __BIG_ENDIAN_BITFIELD 2577 uint64_t reserved_18_63 : 46; 2578 uint64_t bad_64b67b_cnt : 18; /**< Number of bad 64B/67B words, meaning bit 65 or 64 has been 2579 corrupted. Saturates. Interrupt on saturation if 2580 ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2581#else 2582 uint64_t bad_64b67b_cnt : 18; 2583 uint64_t reserved_18_63 : 46; 2584#endif 2585 } s; 2586 struct cvmx_ilk_rx_lnex_stat3_s cn68xx; 2587 struct cvmx_ilk_rx_lnex_stat3_s cn68xxp1; 2588}; 2589typedef union cvmx_ilk_rx_lnex_stat3 cvmx_ilk_rx_lnex_stat3_t; 2590 2591/** 2592 * cvmx_ilk_rx_lne#_stat4 2593 */ 2594union cvmx_ilk_rx_lnex_stat4 { 2595 uint64_t u64; 2596 struct cvmx_ilk_rx_lnex_stat4_s { 2597#ifdef __BIG_ENDIAN_BITFIELD 2598 uint64_t reserved_59_63 : 5; 2599 uint64_t cntl_word_cnt : 27; /**< Number of control words received. Saturates. Interrupt on 2600 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2601 uint64_t reserved_27_31 : 5; 2602 uint64_t data_word_cnt : 27; /**< Number of data words received. Saturates. Interrupt on 2603 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2604#else 2605 uint64_t data_word_cnt : 27; 2606 uint64_t reserved_27_31 : 5; 2607 uint64_t cntl_word_cnt : 27; 2608 uint64_t reserved_59_63 : 5; 2609#endif 2610 } s; 2611 struct cvmx_ilk_rx_lnex_stat4_s cn68xx; 2612 struct cvmx_ilk_rx_lnex_stat4_s cn68xxp1; 2613}; 2614typedef union cvmx_ilk_rx_lnex_stat4 cvmx_ilk_rx_lnex_stat4_t; 2615 2616/** 2617 * cvmx_ilk_rx_lne#_stat5 2618 */ 2619union cvmx_ilk_rx_lnex_stat5 { 2620 uint64_t u64; 2621 struct cvmx_ilk_rx_lnex_stat5_s { 2622#ifdef __BIG_ENDIAN_BITFIELD 2623 uint64_t reserved_18_63 : 46; 2624 uint64_t unkwn_word_cnt : 18; /**< Number of unknown control words. Saturates. Interrupt on 2625 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2626#else 2627 uint64_t unkwn_word_cnt : 18; 2628 uint64_t reserved_18_63 : 46; 2629#endif 2630 } s; 2631 struct cvmx_ilk_rx_lnex_stat5_s cn68xx; 2632 struct cvmx_ilk_rx_lnex_stat5_s cn68xxp1; 2633}; 2634typedef union cvmx_ilk_rx_lnex_stat5 cvmx_ilk_rx_lnex_stat5_t; 2635 2636/** 2637 * cvmx_ilk_rx_lne#_stat6 2638 */ 2639union cvmx_ilk_rx_lnex_stat6 { 2640 uint64_t u64; 2641 struct cvmx_ilk_rx_lnex_stat6_s { 2642#ifdef __BIG_ENDIAN_BITFIELD 2643 uint64_t reserved_18_63 : 46; 2644 uint64_t scrm_sync_loss_cnt : 18; /**< Number of times scrambler synchronization was lost (due to 2645 either 4 consecutive bad sync words or 3 consecutive scrambler 2646 state mismatches). Saturates. Interrupt on saturation if 2647 ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2648#else 2649 uint64_t scrm_sync_loss_cnt : 18; 2650 uint64_t reserved_18_63 : 46; 2651#endif 2652 } s; 2653 struct cvmx_ilk_rx_lnex_stat6_s cn68xx; 2654 struct cvmx_ilk_rx_lnex_stat6_s cn68xxp1; 2655}; 2656typedef union cvmx_ilk_rx_lnex_stat6 cvmx_ilk_rx_lnex_stat6_t; 2657 2658/** 2659 * cvmx_ilk_rx_lne#_stat7 2660 */ 2661union cvmx_ilk_rx_lnex_stat7 { 2662 uint64_t u64; 2663 struct cvmx_ilk_rx_lnex_stat7_s { 2664#ifdef __BIG_ENDIAN_BITFIELD 2665 uint64_t reserved_18_63 : 46; 2666 uint64_t scrm_match_cnt : 18; /**< Number of scrambler state matches received. Saturates. 2667 Interrupt on saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2668#else 2669 uint64_t scrm_match_cnt : 18; 2670 uint64_t reserved_18_63 : 46; 2671#endif 2672 } s; 2673 struct cvmx_ilk_rx_lnex_stat7_s cn68xx; 2674 struct cvmx_ilk_rx_lnex_stat7_s cn68xxp1; 2675}; 2676typedef union cvmx_ilk_rx_lnex_stat7 cvmx_ilk_rx_lnex_stat7_t; 2677 2678/** 2679 * cvmx_ilk_rx_lne#_stat8 2680 */ 2681union cvmx_ilk_rx_lnex_stat8 { 2682 uint64_t u64; 2683 struct cvmx_ilk_rx_lnex_stat8_s { 2684#ifdef __BIG_ENDIAN_BITFIELD 2685 uint64_t reserved_18_63 : 46; 2686 uint64_t skipw_good_cnt : 18; /**< Number of good skip words. Saturates. Interrupt on saturation 2687 if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2688#else 2689 uint64_t skipw_good_cnt : 18; 2690 uint64_t reserved_18_63 : 46; 2691#endif 2692 } s; 2693 struct cvmx_ilk_rx_lnex_stat8_s cn68xx; 2694 struct cvmx_ilk_rx_lnex_stat8_s cn68xxp1; 2695}; 2696typedef union cvmx_ilk_rx_lnex_stat8 cvmx_ilk_rx_lnex_stat8_t; 2697 2698/** 2699 * cvmx_ilk_rx_lne#_stat9 2700 */ 2701union cvmx_ilk_rx_lnex_stat9 { 2702 uint64_t u64; 2703 struct cvmx_ilk_rx_lnex_stat9_s { 2704#ifdef __BIG_ENDIAN_BITFIELD 2705 uint64_t reserved_50_63 : 14; 2706 uint64_t crc32_err_cnt : 18; /**< Number of errors in the lane CRC. Saturates. Interrupt on 2707 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2708 uint64_t reserved_27_31 : 5; 2709 uint64_t crc32_match_cnt : 27; /**< Number of CRC32 matches received. Saturates. Interrupt on 2710 saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */ 2711#else 2712 uint64_t crc32_match_cnt : 27; 2713 uint64_t reserved_27_31 : 5; 2714 uint64_t crc32_err_cnt : 18; 2715 uint64_t reserved_50_63 : 14; 2716#endif 2717 } s; 2718 struct cvmx_ilk_rx_lnex_stat9_s cn68xx; 2719 struct cvmx_ilk_rx_lnex_stat9_s cn68xxp1; 2720}; 2721typedef union cvmx_ilk_rx_lnex_stat9 cvmx_ilk_rx_lnex_stat9_t; 2722 2723/** 2724 * cvmx_ilk_rxf_idx_pmap 2725 */ 2726union cvmx_ilk_rxf_idx_pmap { 2727 uint64_t u64; 2728 struct cvmx_ilk_rxf_idx_pmap_s { 2729#ifdef __BIG_ENDIAN_BITFIELD 2730 uint64_t reserved_25_63 : 39; 2731 uint64_t inc : 9; /**< Increment to add to current index for next index. */ 2732 uint64_t reserved_9_15 : 7; 2733 uint64_t index : 9; /**< Specify the link/channel accessed by the next CSR read/write to 2734 port map memory. IDX[8]=link, IDX[7:0]=channel */ 2735#else 2736 uint64_t index : 9; 2737 uint64_t reserved_9_15 : 7; 2738 uint64_t inc : 9; 2739 uint64_t reserved_25_63 : 39; 2740#endif 2741 } s; 2742 struct cvmx_ilk_rxf_idx_pmap_s cn68xx; 2743 struct cvmx_ilk_rxf_idx_pmap_s cn68xxp1; 2744}; 2745typedef union cvmx_ilk_rxf_idx_pmap cvmx_ilk_rxf_idx_pmap_t; 2746 2747/** 2748 * cvmx_ilk_rxf_mem_pmap 2749 */ 2750union cvmx_ilk_rxf_mem_pmap { 2751 uint64_t u64; 2752 struct cvmx_ilk_rxf_mem_pmap_s { 2753#ifdef __BIG_ENDIAN_BITFIELD 2754 uint64_t reserved_6_63 : 58; 2755 uint64_t port_kind : 6; /**< Specify the port-kind for the link/channel selected by 2756 ILK_IDX_PMAP[IDX] */ 2757#else 2758 uint64_t port_kind : 6; 2759 uint64_t reserved_6_63 : 58; 2760#endif 2761 } s; 2762 struct cvmx_ilk_rxf_mem_pmap_s cn68xx; 2763 struct cvmx_ilk_rxf_mem_pmap_s cn68xxp1; 2764}; 2765typedef union cvmx_ilk_rxf_mem_pmap cvmx_ilk_rxf_mem_pmap_t; 2766 2767/** 2768 * cvmx_ilk_ser_cfg 2769 */ 2770union cvmx_ilk_ser_cfg { 2771 uint64_t u64; 2772 struct cvmx_ilk_ser_cfg_s { 2773#ifdef __BIG_ENDIAN_BITFIELD 2774 uint64_t reserved_57_63 : 7; 2775 uint64_t ser_rxpol_auto : 1; /**< Serdes lane receive polarity auto detection mode */ 2776 uint64_t reserved_48_55 : 8; 2777 uint64_t ser_rxpol : 8; /**< Serdes lane receive polarity 2778 - 0: rx without inversion 2779 - 1: rx with inversion */ 2780 uint64_t reserved_32_39 : 8; 2781 uint64_t ser_txpol : 8; /**< Serdes lane transmit polarity 2782 - 0: tx without inversion 2783 - 1: tx with inversion */ 2784 uint64_t reserved_16_23 : 8; 2785 uint64_t ser_reset_n : 8; /**< Serdes lane reset */ 2786 uint64_t reserved_6_7 : 2; 2787 uint64_t ser_pwrup : 2; /**< Serdes modules (QLM) power up. */ 2788 uint64_t reserved_2_3 : 2; 2789 uint64_t ser_haul : 2; /**< Serdes module (QLM) haul mode */ 2790#else 2791 uint64_t ser_haul : 2; 2792 uint64_t reserved_2_3 : 2; 2793 uint64_t ser_pwrup : 2; 2794 uint64_t reserved_6_7 : 2; 2795 uint64_t ser_reset_n : 8; 2796 uint64_t reserved_16_23 : 8; 2797 uint64_t ser_txpol : 8; 2798 uint64_t reserved_32_39 : 8; 2799 uint64_t ser_rxpol : 8; 2800 uint64_t reserved_48_55 : 8; 2801 uint64_t ser_rxpol_auto : 1; 2802 uint64_t reserved_57_63 : 7; 2803#endif 2804 } s; 2805 struct cvmx_ilk_ser_cfg_s cn68xx; 2806 struct cvmx_ilk_ser_cfg_s cn68xxp1; 2807}; 2808typedef union cvmx_ilk_ser_cfg cvmx_ilk_ser_cfg_t; 2809 2810/** 2811 * cvmx_ilk_tx#_cfg0 2812 */ 2813union cvmx_ilk_txx_cfg0 { 2814 uint64_t u64; 2815 struct cvmx_ilk_txx_cfg0_s { 2816#ifdef __BIG_ENDIAN_BITFIELD 2817 uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */ 2818 uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing 2819 transmit & receive clocks, skip word are inserted/deleted */ 2820 uint64_t int_lpbk : 1; /**< Enable Tx-Rx loopback (internal) */ 2821 uint64_t reserved_57_60 : 4; 2822 uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode. This mode allows 2823 software to send a packet containing a programmable pattern. 2824 While in this mode, the scramblers and disparity inversion will 2825 be disabled. In addition, no framing layer control words will 2826 be transmitted (ie. no SYNC, scrambler state, skip, or 2827 diagnostic words will be transmitted). 2828 2829 NOTE: Software must first write ILK_TXX_CFG0[LANE_ENA]=0 before 2830 enabling/disabling this mode. */ 2831 uint64_t reserved_55_55 : 1; 2832 uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */ 2833 uint64_t mltuse_fc_ena : 1; /**< When set, the multi-use field of control words will contain 2834 flow control status. Otherwise, the multi-use field will 2835 contain ILK_TXX_CFG1[TX_MLTUSE] */ 2836 uint64_t cal_ena : 1; /**< Enable Tx calendar, else default calendar used: 2837 First control word: 2838 Entry 0 = link 2839 Entry 1 = backpressue id 0 2840 Entry 2 = backpressue id 1 2841 ...etc. 2842 Second control word: 2843 Entry 15 = link 2844 Entry 16 = backpressue id 15 2845 Entry 17 = backpressue id 16 2846 ...etc. 2847 This continues until the status for all 64 backpressue ids gets 2848 transmitted (ie. 0-68 calendar table entries). The remaining 3 2849 calendar table entries (ie. 69-71) will always transmit XOFF. 2850 2851 To disable backpressure completely, enable the calendar table 2852 and program each calendar table entry to transmit XON */ 2853 uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word, 2854 scrambler state, diag word, zero or more skip words, and the 2855 data payload. Must be large than ILK_TXX_CFG1[SKIP_CNT]+9. 2856 Supported range:ILK_TXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */ 2857 uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of 2858 8 bytes. Supported range from 8 bytes to 512 (ie. 0 < 2859 BRST_SHRT <= 64) */ 2860 uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane striping is performed from 2861 most significant lane enabled to least significant lane 2862 enabled. LANE_ENA must be zero before changing LANE_REV. */ 2863 uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks. 2864 Supported range is from 64 bytes to 1024 bytes. (ie. 0 < 2865 BRST_MAX <= 16) */ 2866 uint64_t reserved_25_25 : 1; 2867 uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. CAL_DEPTH[2:0] must 2868 be zero. Supported range from 8 to 288. If CAL_ENA is 0, 2869 this field has no effect and the calendar depth is 72 entries. */ 2870 uint64_t reserved_8_15 : 8; 2871 uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The 2872 same lane should not be enabled in multiple ILK_TXx_CFG0. Each 2873 bit of LANE_ENA maps to a TX lane (TLE) and a QLM lane. NOTE: 2874 LANE_REV has no effect on this mapping. 2875 2876 LANE_ENA[0] = TLE0 = QLM1 lane 0 2877 LANE_ENA[1] = TLE1 = QLM1 lane 1 2878 LANE_ENA[2] = TLE2 = QLM1 lane 2 2879 LANE_ENA[3] = TLE3 = QLM1 lane 3 2880 LANE_ENA[4] = TLE4 = QLM2 lane 0 2881 LANE_ENA[5] = TLE5 = QLM2 lane 1 2882 LANE_ENA[6] = TLE6 = QLM2 lane 2 2883 LANE_ENA[7] = TLE7 = QLM2 lane 3 */ 2884#else 2885 uint64_t lane_ena : 8; 2886 uint64_t reserved_8_15 : 8; 2887 uint64_t cal_depth : 9; 2888 uint64_t reserved_25_25 : 1; 2889 uint64_t brst_max : 5; 2890 uint64_t lane_rev : 1; 2891 uint64_t brst_shrt : 7; 2892 uint64_t mfrm_len : 13; 2893 uint64_t cal_ena : 1; 2894 uint64_t mltuse_fc_ena : 1; 2895 uint64_t lnk_stats_ena : 1; 2896 uint64_t reserved_55_55 : 1; 2897 uint64_t ptrn_mode : 1; 2898 uint64_t reserved_57_60 : 4; 2899 uint64_t int_lpbk : 1; 2900 uint64_t ext_lpbk : 1; 2901 uint64_t ext_lpbk_fc : 1; 2902#endif 2903 } s; 2904 struct cvmx_ilk_txx_cfg0_s cn68xx; 2905 struct cvmx_ilk_txx_cfg0_s cn68xxp1; 2906}; 2907typedef union cvmx_ilk_txx_cfg0 cvmx_ilk_txx_cfg0_t; 2908 2909/** 2910 * cvmx_ilk_tx#_cfg1 2911 */ 2912union cvmx_ilk_txx_cfg1 { 2913 uint64_t u64; 2914 struct cvmx_ilk_txx_cfg1_s { 2915#ifdef __BIG_ENDIAN_BITFIELD 2916 uint64_t reserved_33_63 : 31; 2917 uint64_t pkt_busy : 1; /**< Tx-Link is transmitting data. */ 2918 uint64_t pipe_crd_dis : 1; /**< Disable pipe credits. Should be set when PKO is configure to 2919 ignore pipe credits. */ 2920 uint64_t ptp_delay : 5; /**< Timestamp commit delay. Must not be zero. */ 2921 uint64_t skip_cnt : 4; /**< Number of skip words to insert after the scrambler state */ 2922 uint64_t pkt_flush : 1; /**< Packet transmit flush. While PKT_FLUSH=1, the TxFifo will 2923 continuously drain; all data will be dropped. Software should 2924 first write PKT_ENA=0 and wait packet transmission to stop. */ 2925 uint64_t pkt_ena : 1; /**< Packet transmit enable. When PKT_ENA=0, the Tx-Link will stop 2926 transmitting packets, as per RX_LINK_FC_PKT */ 2927 uint64_t la_mode : 1; /**< 0 = Interlaken 2928 1 = Interlaken Look-Aside */ 2929 uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link 2930 XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */ 2931 uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words. 2932 When RX_LINK_FC_IGN=0, XOFF will cause Tx-Link to stop 2933 transmitting on all channels. */ 2934 uint64_t reserved_12_16 : 5; 2935 uint64_t tx_link_fc_jam : 1; /**< All flow control transmitted in burst/idle control words will 2936 be XOFF whenever TX_LINK_FC is XOFF. Enable this to allow 2937 link XOFF to automatically XOFF all channels. */ 2938 uint64_t rx_link_fc_pkt : 1; /**< Link flow control received in burst/idle control words causes 2939 Tx-Link to stop transmitting at the end of a packet instead of 2940 the end of a burst */ 2941 uint64_t rx_link_fc_ign : 1; /**< Ignore the link flow control status received in burst/idle 2942 control words */ 2943 uint64_t rmatch : 1; /**< Enable rate matching circuitry */ 2944 uint64_t tx_mltuse : 8; /**< Multiple Use bits used when ILKx_TX_CFG[LA_MODE=0] and 2945 ILKx_TX_CFG[MLTUSE_FC_ENA] is zero */ 2946#else 2947 uint64_t tx_mltuse : 8; 2948 uint64_t rmatch : 1; 2949 uint64_t rx_link_fc_ign : 1; 2950 uint64_t rx_link_fc_pkt : 1; 2951 uint64_t tx_link_fc_jam : 1; 2952 uint64_t reserved_12_16 : 5; 2953 uint64_t rx_link_fc : 1; 2954 uint64_t tx_link_fc : 1; 2955 uint64_t la_mode : 1; 2956 uint64_t pkt_ena : 1; 2957 uint64_t pkt_flush : 1; 2958 uint64_t skip_cnt : 4; 2959 uint64_t ptp_delay : 5; 2960 uint64_t pipe_crd_dis : 1; 2961 uint64_t pkt_busy : 1; 2962 uint64_t reserved_33_63 : 31; 2963#endif 2964 } s; 2965 struct cvmx_ilk_txx_cfg1_s cn68xx; 2966 struct cvmx_ilk_txx_cfg1_cn68xxp1 { 2967#ifdef __BIG_ENDIAN_BITFIELD 2968 uint64_t reserved_32_63 : 32; 2969 uint64_t pipe_crd_dis : 1; /**< Disable pipe credits. Should be set when PKO is configure to 2970 ignore pipe credits. */ 2971 uint64_t ptp_delay : 5; /**< Timestamp commit delay. Must not be zero. */ 2972 uint64_t skip_cnt : 4; /**< Number of skip words to insert after the scrambler state */ 2973 uint64_t pkt_flush : 1; /**< Packet transmit flush. While PKT_FLUSH=1, the TxFifo will 2974 continuously drain; all data will be dropped. Software should 2975 first write PKT_ENA=0 and wait packet transmission to stop. */ 2976 uint64_t pkt_ena : 1; /**< Packet transmit enable. When PKT_ENA=0, the Tx-Link will stop 2977 transmitting packets, as per RX_LINK_FC_PKT */ 2978 uint64_t la_mode : 1; /**< 0 = Interlaken 2979 1 = Interlaken Look-Aside */ 2980 uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link 2981 XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */ 2982 uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words. 2983 When RX_LINK_FC_IGN=0, XOFF will cause Tx-Link to stop 2984 transmitting on all channels. */ 2985 uint64_t reserved_12_16 : 5; 2986 uint64_t tx_link_fc_jam : 1; /**< All flow control transmitted in burst/idle control words will 2987 be XOFF whenever TX_LINK_FC is XOFF. Enable this to allow 2988 link XOFF to automatically XOFF all channels. */ 2989 uint64_t rx_link_fc_pkt : 1; /**< Link flow control received in burst/idle control words causes 2990 Tx-Link to stop transmitting at the end of a packet instead of 2991 the end of a burst */ 2992 uint64_t rx_link_fc_ign : 1; /**< Ignore the link flow control status received in burst/idle 2993 control words */ 2994 uint64_t rmatch : 1; /**< Enable rate matching circuitry */ 2995 uint64_t tx_mltuse : 8; /**< Multiple Use bits used when ILKx_TX_CFG[LA_MODE=0] and 2996 ILKx_TX_CFG[MLTUSE_FC_ENA] is zero */ 2997#else 2998 uint64_t tx_mltuse : 8; 2999 uint64_t rmatch : 1; 3000 uint64_t rx_link_fc_ign : 1; 3001 uint64_t rx_link_fc_pkt : 1; 3002 uint64_t tx_link_fc_jam : 1; 3003 uint64_t reserved_12_16 : 5; 3004 uint64_t rx_link_fc : 1; 3005 uint64_t tx_link_fc : 1; 3006 uint64_t la_mode : 1; 3007 uint64_t pkt_ena : 1; 3008 uint64_t pkt_flush : 1; 3009 uint64_t skip_cnt : 4; 3010 uint64_t ptp_delay : 5; 3011 uint64_t pipe_crd_dis : 1; 3012 uint64_t reserved_32_63 : 32; 3013#endif 3014 } cn68xxp1; 3015}; 3016typedef union cvmx_ilk_txx_cfg1 cvmx_ilk_txx_cfg1_t; 3017 3018/** 3019 * cvmx_ilk_tx#_dbg 3020 */ 3021union cvmx_ilk_txx_dbg { 3022 uint64_t u64; 3023 struct cvmx_ilk_txx_dbg_s { 3024#ifdef __BIG_ENDIAN_BITFIELD 3025 uint64_t reserved_3_63 : 61; 3026 uint64_t tx_bad_crc24 : 1; /**< Send a control word with bad CRC24. Hardware will clear this 3027 field once the injection is performed. */ 3028 uint64_t tx_bad_ctlw2 : 1; /**< Send a control word without the control bit set */ 3029 uint64_t tx_bad_ctlw1 : 1; /**< Send a data word with the control bit set */ 3030#else 3031 uint64_t tx_bad_ctlw1 : 1; 3032 uint64_t tx_bad_ctlw2 : 1; 3033 uint64_t tx_bad_crc24 : 1; 3034 uint64_t reserved_3_63 : 61; 3035#endif 3036 } s; 3037 struct cvmx_ilk_txx_dbg_s cn68xx; 3038 struct cvmx_ilk_txx_dbg_s cn68xxp1; 3039}; 3040typedef union cvmx_ilk_txx_dbg cvmx_ilk_txx_dbg_t; 3041 3042/** 3043 * cvmx_ilk_tx#_flow_ctl0 3044 */ 3045union cvmx_ilk_txx_flow_ctl0 { 3046 uint64_t u64; 3047 struct cvmx_ilk_txx_flow_ctl0_s { 3048#ifdef __BIG_ENDIAN_BITFIELD 3049 uint64_t status : 64; /**< IPD flow control status for backpressue id 63-0, where a 0 3050 indicates the presence of backpressure (ie. XOFF) and 1 3051 indicates the absence of backpressure (ie. XON) */ 3052#else 3053 uint64_t status : 64; 3054#endif 3055 } s; 3056 struct cvmx_ilk_txx_flow_ctl0_s cn68xx; 3057 struct cvmx_ilk_txx_flow_ctl0_s cn68xxp1; 3058}; 3059typedef union cvmx_ilk_txx_flow_ctl0 cvmx_ilk_txx_flow_ctl0_t; 3060 3061/** 3062 * cvmx_ilk_tx#_flow_ctl1 3063 * 3064 * Notes: 3065 * Do not publish. 3066 * 3067 */ 3068union cvmx_ilk_txx_flow_ctl1 { 3069 uint64_t u64; 3070 struct cvmx_ilk_txx_flow_ctl1_s { 3071#ifdef __BIG_ENDIAN_BITFIELD 3072 uint64_t reserved_0_63 : 64; 3073#else 3074 uint64_t reserved_0_63 : 64; 3075#endif 3076 } s; 3077 struct cvmx_ilk_txx_flow_ctl1_s cn68xx; 3078 struct cvmx_ilk_txx_flow_ctl1_s cn68xxp1; 3079}; 3080typedef union cvmx_ilk_txx_flow_ctl1 cvmx_ilk_txx_flow_ctl1_t; 3081 3082/** 3083 * cvmx_ilk_tx#_idx_cal 3084 */ 3085union cvmx_ilk_txx_idx_cal { 3086 uint64_t u64; 3087 struct cvmx_ilk_txx_idx_cal_s { 3088#ifdef __BIG_ENDIAN_BITFIELD 3089 uint64_t reserved_14_63 : 50; 3090 uint64_t inc : 6; /**< Increment to add to current index for next index. NOTE: 3091 Increment only performed after *MEM_CAL1 access (ie. not 3092 *MEM_CAL0) */ 3093 uint64_t reserved_6_7 : 2; 3094 uint64_t index : 6; /**< Specify the group of 8 entries accessed by the next CSR 3095 read/write to calendar table memory. Software must ensure IDX 3096 is <36 whenever writing to *MEM_CAL1 */ 3097#else 3098 uint64_t index : 6; 3099 uint64_t reserved_6_7 : 2; 3100 uint64_t inc : 6; 3101 uint64_t reserved_14_63 : 50; 3102#endif 3103 } s; 3104 struct cvmx_ilk_txx_idx_cal_s cn68xx; 3105 struct cvmx_ilk_txx_idx_cal_s cn68xxp1; 3106}; 3107typedef union cvmx_ilk_txx_idx_cal cvmx_ilk_txx_idx_cal_t; 3108 3109/** 3110 * cvmx_ilk_tx#_idx_pmap 3111 */ 3112union cvmx_ilk_txx_idx_pmap { 3113 uint64_t u64; 3114 struct cvmx_ilk_txx_idx_pmap_s { 3115#ifdef __BIG_ENDIAN_BITFIELD 3116 uint64_t reserved_23_63 : 41; 3117 uint64_t inc : 7; /**< Increment to add to current index for next index. */ 3118 uint64_t reserved_7_15 : 9; 3119 uint64_t index : 7; /**< Specify the port-pipe accessed by the next CSR read/write to 3120 ILK_TXx_MEM_PMAP. Note that IDX=n is always port-pipe n, 3121 regardless of ILK_TXx_PIPE[BASE] */ 3122#else 3123 uint64_t index : 7; 3124 uint64_t reserved_7_15 : 9; 3125 uint64_t inc : 7; 3126 uint64_t reserved_23_63 : 41; 3127#endif 3128 } s; 3129 struct cvmx_ilk_txx_idx_pmap_s cn68xx; 3130 struct cvmx_ilk_txx_idx_pmap_s cn68xxp1; 3131}; 3132typedef union cvmx_ilk_txx_idx_pmap cvmx_ilk_txx_idx_pmap_t; 3133 3134/** 3135 * cvmx_ilk_tx#_idx_stat0 3136 */ 3137union cvmx_ilk_txx_idx_stat0 { 3138 uint64_t u64; 3139 struct cvmx_ilk_txx_idx_stat0_s { 3140#ifdef __BIG_ENDIAN_BITFIELD 3141 uint64_t reserved_32_63 : 32; 3142 uint64_t clr : 1; /**< CSR read to ILK_TXx_MEM_STAT0 clears the selected counter after 3143 returning its current value. */ 3144 uint64_t reserved_24_30 : 7; 3145 uint64_t inc : 8; /**< Increment to add to current index for next index */ 3146 uint64_t reserved_8_15 : 8; 3147 uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the 3148 ILK_TXx_MEM_STAT0 */ 3149#else 3150 uint64_t index : 8; 3151 uint64_t reserved_8_15 : 8; 3152 uint64_t inc : 8; 3153 uint64_t reserved_24_30 : 7; 3154 uint64_t clr : 1; 3155 uint64_t reserved_32_63 : 32; 3156#endif 3157 } s; 3158 struct cvmx_ilk_txx_idx_stat0_s cn68xx; 3159 struct cvmx_ilk_txx_idx_stat0_s cn68xxp1; 3160}; 3161typedef union cvmx_ilk_txx_idx_stat0 cvmx_ilk_txx_idx_stat0_t; 3162 3163/** 3164 * cvmx_ilk_tx#_idx_stat1 3165 */ 3166union cvmx_ilk_txx_idx_stat1 { 3167 uint64_t u64; 3168 struct cvmx_ilk_txx_idx_stat1_s { 3169#ifdef __BIG_ENDIAN_BITFIELD 3170 uint64_t reserved_32_63 : 32; 3171 uint64_t clr : 1; /**< CSR read to ILK_TXx_MEM_STAT1 clears the selected counter after 3172 returning its current value. */ 3173 uint64_t reserved_24_30 : 7; 3174 uint64_t inc : 8; /**< Increment to add to current index for next index */ 3175 uint64_t reserved_8_15 : 8; 3176 uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the 3177 ILK_TXx_MEM_STAT1 */ 3178#else 3179 uint64_t index : 8; 3180 uint64_t reserved_8_15 : 8; 3181 uint64_t inc : 8; 3182 uint64_t reserved_24_30 : 7; 3183 uint64_t clr : 1; 3184 uint64_t reserved_32_63 : 32; 3185#endif 3186 } s; 3187 struct cvmx_ilk_txx_idx_stat1_s cn68xx; 3188 struct cvmx_ilk_txx_idx_stat1_s cn68xxp1; 3189}; 3190typedef union cvmx_ilk_txx_idx_stat1 cvmx_ilk_txx_idx_stat1_t; 3191 3192/** 3193 * cvmx_ilk_tx#_int 3194 */ 3195union cvmx_ilk_txx_int { 3196 uint64_t u64; 3197 struct cvmx_ilk_txx_int_s { 3198#ifdef __BIG_ENDIAN_BITFIELD 3199 uint64_t reserved_4_63 : 60; 3200 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 3201 uint64_t bad_pipe : 1; /**< Received a PKO port-pipe out of the range specified by 3202 ILK_TXX_PIPE */ 3203 uint64_t bad_seq : 1; /**< Received sequence is not SOP followed by 0 or more data cycles 3204 followed by EOP. PKO config assigned multiple engines to the 3205 same ILK Tx Link. */ 3206 uint64_t txf_err : 1; /**< TX fifo parity error occurred. At EOP time, EOP_Format will 3207 reflect the error. */ 3208#else 3209 uint64_t txf_err : 1; 3210 uint64_t bad_seq : 1; 3211 uint64_t bad_pipe : 1; 3212 uint64_t stat_cnt_ovfl : 1; 3213 uint64_t reserved_4_63 : 60; 3214#endif 3215 } s; 3216 struct cvmx_ilk_txx_int_s cn68xx; 3217 struct cvmx_ilk_txx_int_s cn68xxp1; 3218}; 3219typedef union cvmx_ilk_txx_int cvmx_ilk_txx_int_t; 3220 3221/** 3222 * cvmx_ilk_tx#_int_en 3223 */ 3224union cvmx_ilk_txx_int_en { 3225 uint64_t u64; 3226 struct cvmx_ilk_txx_int_en_s { 3227#ifdef __BIG_ENDIAN_BITFIELD 3228 uint64_t reserved_4_63 : 60; 3229 uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */ 3230 uint64_t bad_pipe : 1; /**< Received a PKO port-pipe out of the range specified by 3231 ILK_TXX_PIPE. */ 3232 uint64_t bad_seq : 1; /**< Received sequence is not SOP followed by 0 or more data cycles 3233 followed by EOP. PKO config assigned multiple engines to the 3234 same ILK Tx Link. */ 3235 uint64_t txf_err : 1; /**< TX fifo parity error occurred. At EOP time, EOP_Format will 3236 reflect the error. */ 3237#else 3238 uint64_t txf_err : 1; 3239 uint64_t bad_seq : 1; 3240 uint64_t bad_pipe : 1; 3241 uint64_t stat_cnt_ovfl : 1; 3242 uint64_t reserved_4_63 : 60; 3243#endif 3244 } s; 3245 struct cvmx_ilk_txx_int_en_s cn68xx; 3246 struct cvmx_ilk_txx_int_en_s cn68xxp1; 3247}; 3248typedef union cvmx_ilk_txx_int_en cvmx_ilk_txx_int_en_t; 3249 3250/** 3251 * cvmx_ilk_tx#_mem_cal0 3252 * 3253 * Notes: 3254 * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software 3255 * must never read them in reverse order or read one without reading the 3256 * other. 3257 * 3258 * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. 3259 * Software must never write them in reverse order or write one without 3260 * writing the other. 3261 */ 3262union cvmx_ilk_txx_mem_cal0 { 3263 uint64_t u64; 3264 struct cvmx_ilk_txx_mem_cal0_s { 3265#ifdef __BIG_ENDIAN_BITFIELD 3266 uint64_t reserved_36_63 : 28; 3267 uint64_t entry_ctl3 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+3 3268 - 0: IPD backpressue id 3269 - 1: Link 3270 - 2: XOFF 3271 - 3: XON */ 3272 uint64_t reserved_33_33 : 1; 3273 uint64_t bpid3 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+3 3274 (unused if ENTRY_CTL3 != 0) */ 3275 uint64_t entry_ctl2 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+2 3276 - 0: IPD backpressue id 3277 - 1: Link 3278 - 2: XOFF 3279 - 3: XON */ 3280 uint64_t reserved_24_24 : 1; 3281 uint64_t bpid2 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+2 3282 (unused if ENTRY_CTL2 != 0) */ 3283 uint64_t entry_ctl1 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+1 3284 - 0: IPD backpressue id 3285 - 1: Link 3286 - 2: XOFF 3287 - 3: XON */ 3288 uint64_t reserved_15_15 : 1; 3289 uint64_t bpid1 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+1 3290 (unused if ENTRY_CTL1 != 0) */ 3291 uint64_t entry_ctl0 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+0 3292 - 0: IPD backpressue id 3293 - 1: Link 3294 - 2: XOFF 3295 - 3: XON */ 3296 uint64_t reserved_6_6 : 1; 3297 uint64_t bpid0 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+0 3298 (unused if ENTRY_CTL0 != 0) */ 3299#else 3300 uint64_t bpid0 : 6; 3301 uint64_t reserved_6_6 : 1; 3302 uint64_t entry_ctl0 : 2; 3303 uint64_t bpid1 : 6; 3304 uint64_t reserved_15_15 : 1; 3305 uint64_t entry_ctl1 : 2; 3306 uint64_t bpid2 : 6; 3307 uint64_t reserved_24_24 : 1; 3308 uint64_t entry_ctl2 : 2; 3309 uint64_t bpid3 : 6; 3310 uint64_t reserved_33_33 : 1; 3311 uint64_t entry_ctl3 : 2; 3312 uint64_t reserved_36_63 : 28; 3313#endif 3314 } s; 3315 struct cvmx_ilk_txx_mem_cal0_s cn68xx; 3316 struct cvmx_ilk_txx_mem_cal0_s cn68xxp1; 3317}; 3318typedef union cvmx_ilk_txx_mem_cal0 cvmx_ilk_txx_mem_cal0_t; 3319 3320/** 3321 * cvmx_ilk_tx#_mem_cal1 3322 * 3323 * Notes: 3324 * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software 3325 * must never read them in reverse order or read one without reading the 3326 * other. 3327 * 3328 * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. 3329 * Software must never write them in reverse order or write one without 3330 * writing the other. 3331 */ 3332union cvmx_ilk_txx_mem_cal1 { 3333 uint64_t u64; 3334 struct cvmx_ilk_txx_mem_cal1_s { 3335#ifdef __BIG_ENDIAN_BITFIELD 3336 uint64_t reserved_36_63 : 28; 3337 uint64_t entry_ctl7 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+7 3338 - 0: IPD backpressue id 3339 - 1: Link 3340 - 2: XOFF 3341 - 3: XON */ 3342 uint64_t reserved_33_33 : 1; 3343 uint64_t bpid7 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+7 3344 (unused if ENTRY_CTL7 != 0) */ 3345 uint64_t entry_ctl6 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+6 3346 - 0: IPD backpressue id 3347 - 1: Link 3348 - 2: XOFF 3349 - 3: XON */ 3350 uint64_t reserved_24_24 : 1; 3351 uint64_t bpid6 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+6 3352 (unused if ENTRY_CTL6 != 0) */ 3353 uint64_t entry_ctl5 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+5 3354 - 0: IPD backpressue id 3355 - 1: Link 3356 - 2: XOFF 3357 - 3: XON */ 3358 uint64_t reserved_15_15 : 1; 3359 uint64_t bpid5 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+5 3360 (unused if ENTRY_CTL5 != 0) */ 3361 uint64_t entry_ctl4 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+4 3362 - 0: IPD backpressue id 3363 - 1: Link 3364 - 2: XOFF 3365 - 3: XON */ 3366 uint64_t reserved_6_6 : 1; 3367 uint64_t bpid4 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+4 3368 (unused if ENTRY_CTL4 != 0) */ 3369#else 3370 uint64_t bpid4 : 6; 3371 uint64_t reserved_6_6 : 1; 3372 uint64_t entry_ctl4 : 2; 3373 uint64_t bpid5 : 6; 3374 uint64_t reserved_15_15 : 1; 3375 uint64_t entry_ctl5 : 2; 3376 uint64_t bpid6 : 6; 3377 uint64_t reserved_24_24 : 1; 3378 uint64_t entry_ctl6 : 2; 3379 uint64_t bpid7 : 6; 3380 uint64_t reserved_33_33 : 1; 3381 uint64_t entry_ctl7 : 2; 3382 uint64_t reserved_36_63 : 28; 3383#endif 3384 } s; 3385 struct cvmx_ilk_txx_mem_cal1_s cn68xx; 3386 struct cvmx_ilk_txx_mem_cal1_s cn68xxp1; 3387}; 3388typedef union cvmx_ilk_txx_mem_cal1 cvmx_ilk_txx_mem_cal1_t; 3389 3390/** 3391 * cvmx_ilk_tx#_mem_pmap 3392 */ 3393union cvmx_ilk_txx_mem_pmap { 3394 uint64_t u64; 3395 struct cvmx_ilk_txx_mem_pmap_s { 3396#ifdef __BIG_ENDIAN_BITFIELD 3397 uint64_t reserved_17_63 : 47; 3398 uint64_t remap : 1; /**< Dynamically select channel using bits[39:32] of an 8-byte 3399 header prepended to any packet transmitted on the port-pipe 3400 selected by ILK_TXx_IDX_PMAP[IDX]. 3401 3402 ***NOTE: Added in pass 2.0 */ 3403 uint64_t reserved_8_15 : 8; 3404 uint64_t channel : 8; /**< Specify the channel for the port-pipe selected by 3405 ILK_TXx_IDX_PMAP[IDX] */ 3406#else 3407 uint64_t channel : 8; 3408 uint64_t reserved_8_15 : 8; 3409 uint64_t remap : 1; 3410 uint64_t reserved_17_63 : 47; 3411#endif 3412 } s; 3413 struct cvmx_ilk_txx_mem_pmap_s cn68xx; 3414 struct cvmx_ilk_txx_mem_pmap_cn68xxp1 { 3415#ifdef __BIG_ENDIAN_BITFIELD 3416 uint64_t reserved_8_63 : 56; 3417 uint64_t channel : 8; /**< Specify the channel for the port-pipe selected by 3418 ILK_TXx_IDX_PMAP[IDX] */ 3419#else 3420 uint64_t channel : 8; 3421 uint64_t reserved_8_63 : 56; 3422#endif 3423 } cn68xxp1; 3424}; 3425typedef union cvmx_ilk_txx_mem_pmap cvmx_ilk_txx_mem_pmap_t; 3426 3427/** 3428 * cvmx_ilk_tx#_mem_stat0 3429 */ 3430union cvmx_ilk_txx_mem_stat0 { 3431 uint64_t u64; 3432 struct cvmx_ilk_txx_mem_stat0_s { 3433#ifdef __BIG_ENDIAN_BITFIELD 3434 uint64_t reserved_28_63 : 36; 3435 uint64_t tx_pkt : 28; /**< Number of packets transmitted per channel (256M) 3436 Channel selected by ILK_TXx_IDX_STAT0[IDX]. Interrupt on 3437 saturation if ILK_TXX_INT_EN[STAT_CNT_OVFL]=1. */ 3438#else 3439 uint64_t tx_pkt : 28; 3440 uint64_t reserved_28_63 : 36; 3441#endif 3442 } s; 3443 struct cvmx_ilk_txx_mem_stat0_s cn68xx; 3444 struct cvmx_ilk_txx_mem_stat0_s cn68xxp1; 3445}; 3446typedef union cvmx_ilk_txx_mem_stat0 cvmx_ilk_txx_mem_stat0_t; 3447 3448/** 3449 * cvmx_ilk_tx#_mem_stat1 3450 */ 3451union cvmx_ilk_txx_mem_stat1 { 3452 uint64_t u64; 3453 struct cvmx_ilk_txx_mem_stat1_s { 3454#ifdef __BIG_ENDIAN_BITFIELD 3455 uint64_t reserved_36_63 : 28; 3456 uint64_t tx_bytes : 36; /**< Number of bytes transmitted per channel (64GB) Channel selected 3457 by ILK_TXx_IDX_STAT1[IDX]. Saturates. Interrupt on 3458 saturation if ILK_TXX_INT_EN[STAT_CNT_OVFL]=1. */ 3459#else 3460 uint64_t tx_bytes : 36; 3461 uint64_t reserved_36_63 : 28; 3462#endif 3463 } s; 3464 struct cvmx_ilk_txx_mem_stat1_s cn68xx; 3465 struct cvmx_ilk_txx_mem_stat1_s cn68xxp1; 3466}; 3467typedef union cvmx_ilk_txx_mem_stat1 cvmx_ilk_txx_mem_stat1_t; 3468 3469/** 3470 * cvmx_ilk_tx#_pipe 3471 */ 3472union cvmx_ilk_txx_pipe { 3473 uint64_t u64; 3474 struct cvmx_ilk_txx_pipe_s { 3475#ifdef __BIG_ENDIAN_BITFIELD 3476 uint64_t reserved_24_63 : 40; 3477 uint64_t nump : 8; /**< Number of pipes assigned to this Tx Link */ 3478 uint64_t reserved_7_15 : 9; 3479 uint64_t base : 7; /**< When NUMP is non-zero, indicates the base pipe number this 3480 Tx link will accept. This Tx will accept PKO packets from 3481 pipes in the range of: BASE .. (BASE+(NUMP-1)) 3482 3483 BASE and NUMP must be constrained such that 3484 1) BASE+(NUMP-1) < 127 3485 2) Each used PKO pipe must map to exactly 3486 one port|channel 3487 3) The pipe ranges must be consistent with 3488 the PKO configuration. */ 3489#else 3490 uint64_t base : 7; 3491 uint64_t reserved_7_15 : 9; 3492 uint64_t nump : 8; 3493 uint64_t reserved_24_63 : 40; 3494#endif 3495 } s; 3496 struct cvmx_ilk_txx_pipe_s cn68xx; 3497 struct cvmx_ilk_txx_pipe_s cn68xxp1; 3498}; 3499typedef union cvmx_ilk_txx_pipe cvmx_ilk_txx_pipe_t; 3500 3501/** 3502 * cvmx_ilk_tx#_rmatch 3503 */ 3504union cvmx_ilk_txx_rmatch { 3505 uint64_t u64; 3506 struct cvmx_ilk_txx_rmatch_s { 3507#ifdef __BIG_ENDIAN_BITFIELD 3508 uint64_t reserved_50_63 : 14; 3509 uint64_t grnlrty : 2; /**< Granularity of a token, where 1 token equal (1<<GRNLRTY) bytes. */ 3510 uint64_t brst_limit : 16; /**< Size of token bucket, also the maximum quantity of data that 3511 may be burst across the interface before invoking rate limiting 3512 logic. */ 3513 uint64_t time_limit : 16; /**< Number of cycles per time interval. (Must be >= 4) */ 3514 uint64_t rate_limit : 16; /**< Number of tokens added to the bucket when the interval timer 3515 expires. */ 3516#else 3517 uint64_t rate_limit : 16; 3518 uint64_t time_limit : 16; 3519 uint64_t brst_limit : 16; 3520 uint64_t grnlrty : 2; 3521 uint64_t reserved_50_63 : 14; 3522#endif 3523 } s; 3524 struct cvmx_ilk_txx_rmatch_s cn68xx; 3525 struct cvmx_ilk_txx_rmatch_s cn68xxp1; 3526}; 3527typedef union cvmx_ilk_txx_rmatch cvmx_ilk_txx_rmatch_t; 3528 3529#endif 3530