1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-ipd-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon ipd. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_IPD_TYPEDEFS_H__ 53#define __CVMX_IPD_TYPEDEFS_H__ 54 55#define CVMX_IPD_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000000ull)) 56#define CVMX_IPD_1st_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000150ull)) 57#define CVMX_IPD_2nd_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000158ull)) 58#define CVMX_IPD_BIST_STATUS (CVMX_ADD_IO_SEG(0x00014F00000007F8ull)) 59#define CVMX_IPD_BP_PRT_RED_END (CVMX_ADD_IO_SEG(0x00014F0000000328ull)) 60#define CVMX_IPD_CLK_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000338ull)) 61#define CVMX_IPD_CTL_STATUS (CVMX_ADD_IO_SEG(0x00014F0000000018ull)) 62#define CVMX_IPD_INT_ENB (CVMX_ADD_IO_SEG(0x00014F0000000160ull)) 63#define CVMX_IPD_INT_SUM (CVMX_ADD_IO_SEG(0x00014F0000000168ull)) 64#define CVMX_IPD_NOT_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000008ull)) 65#define CVMX_IPD_PACKET_MBUFF_SIZE (CVMX_ADD_IO_SEG(0x00014F0000000010ull)) 66#define CVMX_IPD_PKT_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000358ull)) 67#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 68static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT(unsigned long offset) 69{ 70 if (!( 71 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) || 72 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) || 73 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) || 74 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) || 75 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) || 76 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) || 77 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) || 78 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))))) 79 cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT(%lu) is invalid on this chip\n", offset); 80 return CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8; 81} 82#else 83#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8) 84#endif 85#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 86static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT2(unsigned long offset) 87{ 88 if (!( 89 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39)))) || 90 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) || 91 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 36) && (offset <= 39)))))) 92 cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT2(%lu) is invalid on this chip\n", offset); 93 return CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36; 94} 95#else 96#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) (CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36) 97#endif 98#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 99static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT3(unsigned long offset) 100{ 101 if (!( 102 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))))) 103 cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT3(%lu) is invalid on this chip\n", offset); 104 return CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40; 105} 106#else 107#define CVMX_IPD_PORTX_BP_PAGE_CNT3(offset) (CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40) 108#endif 109#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 110static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(unsigned long offset) 111{ 112 if (!( 113 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39)))) || 114 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) || 115 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 36) && (offset <= 39)))))) 116 cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(%lu) is invalid on this chip\n", offset); 117 return CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36; 118} 119#else 120#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36) 121#endif 122#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 123static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(unsigned long offset) 124{ 125 if (!( 126 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))))) 127 cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(%lu) is invalid on this chip\n", offset); 128 return CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40; 129} 130#else 131#define CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40) 132#endif 133#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 134static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS_PAIRX(unsigned long offset) 135{ 136 if (!( 137 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) || 138 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) || 139 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) || 140 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) || 141 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) || 142 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) || 143 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) || 144 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))))) 145 cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS_PAIRX(%lu) is invalid on this chip\n", offset); 146 return CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8; 147} 148#else 149#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8) 150#endif 151#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 152static inline uint64_t CVMX_IPD_PORT_QOS_INTX(unsigned long offset) 153{ 154 if (!( 155 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4))) || 156 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) || 157 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0) || (offset == 4) || (offset == 5))))) 158 cvmx_warn("CVMX_IPD_PORT_QOS_INTX(%lu) is invalid on this chip\n", offset); 159 return CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8; 160} 161#else 162#define CVMX_IPD_PORT_QOS_INTX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8) 163#endif 164#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 165static inline uint64_t CVMX_IPD_PORT_QOS_INT_ENBX(unsigned long offset) 166{ 167 if (!( 168 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4))) || 169 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) || 170 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0) || (offset == 4) || (offset == 5))))) 171 cvmx_warn("CVMX_IPD_PORT_QOS_INT_ENBX(%lu) is invalid on this chip\n", offset); 172 return CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8; 173} 174#else 175#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8) 176#endif 177#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 178static inline uint64_t CVMX_IPD_PORT_QOS_X_CNT(unsigned long offset) 179{ 180 if (!( 181 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31) || ((offset >= 256) && (offset <= 319)))) || 182 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 319)))) || 183 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31) || ((offset >= 256) && (offset <= 351)))))) 184 cvmx_warn("CVMX_IPD_PORT_QOS_X_CNT(%lu) is invalid on this chip\n", offset); 185 return CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8; 186} 187#else 188#define CVMX_IPD_PORT_QOS_X_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8) 189#endif 190#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000348ull)) 191#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000350ull)) 192#define CVMX_IPD_PTR_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000320ull)) 193#define CVMX_IPD_PWP_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000340ull)) 194#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0) 195#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1) 196#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2) 197#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3) 198#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4) 199#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5) 200#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6) 201#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7) 202#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 203static inline uint64_t CVMX_IPD_QOSX_RED_MARKS(unsigned long offset) 204{ 205 if (!( 206 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) || 207 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) || 208 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) || 209 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) || 210 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) || 211 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) || 212 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) || 213 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))))) 214 cvmx_warn("CVMX_IPD_QOSX_RED_MARKS(%lu) is invalid on this chip\n", offset); 215 return CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8; 216} 217#else 218#define CVMX_IPD_QOSX_RED_MARKS(offset) (CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8) 219#endif 220#define CVMX_IPD_QUE0_FREE_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000330ull)) 221#define CVMX_IPD_RED_PORT_ENABLE (CVMX_ADD_IO_SEG(0x00014F00000002D8ull)) 222#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 223#define CVMX_IPD_RED_PORT_ENABLE2 CVMX_IPD_RED_PORT_ENABLE2_FUNC() 224static inline uint64_t CVMX_IPD_RED_PORT_ENABLE2_FUNC(void) 225{ 226 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))) 227 cvmx_warn("CVMX_IPD_RED_PORT_ENABLE2 not supported on this chip\n"); 228 return CVMX_ADD_IO_SEG(0x00014F00000003A8ull); 229} 230#else 231#define CVMX_IPD_RED_PORT_ENABLE2 (CVMX_ADD_IO_SEG(0x00014F00000003A8ull)) 232#endif 233#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0) 234#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1) 235#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2) 236#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3) 237#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4) 238#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5) 239#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6) 240#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7) 241#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 242static inline uint64_t CVMX_IPD_RED_QUEX_PARAM(unsigned long offset) 243{ 244 if (!( 245 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) || 246 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) || 247 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) || 248 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) || 249 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) || 250 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) || 251 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) || 252 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))))) 253 cvmx_warn("CVMX_IPD_RED_QUEX_PARAM(%lu) is invalid on this chip\n", offset); 254 return CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8; 255} 256#else 257#define CVMX_IPD_RED_QUEX_PARAM(offset) (CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8) 258#endif 259#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000148ull)) 260#define CVMX_IPD_SUB_PORT_FCS (CVMX_ADD_IO_SEG(0x00014F0000000170ull)) 261#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 262#define CVMX_IPD_SUB_PORT_QOS_CNT CVMX_IPD_SUB_PORT_QOS_CNT_FUNC() 263static inline uint64_t CVMX_IPD_SUB_PORT_QOS_CNT_FUNC(void) 264{ 265 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))) 266 cvmx_warn("CVMX_IPD_SUB_PORT_QOS_CNT not supported on this chip\n"); 267 return CVMX_ADD_IO_SEG(0x00014F0000000800ull); 268} 269#else 270#define CVMX_IPD_SUB_PORT_QOS_CNT (CVMX_ADD_IO_SEG(0x00014F0000000800ull)) 271#endif 272#define CVMX_IPD_WQE_FPA_QUEUE (CVMX_ADD_IO_SEG(0x00014F0000000020ull)) 273#define CVMX_IPD_WQE_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000360ull)) 274 275/** 276 * cvmx_ipd_1st_mbuff_skip 277 * 278 * IPD_1ST_MBUFF_SKIP = IPD First MBUFF Word Skip Size 279 * 280 * The number of words that the IPD will skip when writing the first MBUFF. 281 */ 282union cvmx_ipd_1st_mbuff_skip 283{ 284 uint64_t u64; 285 struct cvmx_ipd_1st_mbuff_skip_s 286 { 287#if __BYTE_ORDER == __BIG_ENDIAN 288 uint64_t reserved_6_63 : 58; 289 uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of the 290 1st MBUFF that the IPD will store the next-pointer. 291 Legal values are 0 to 32, where the MAX value 292 is also limited to: 293 IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 18. 294 Must be at least 16 when IPD_CTL_STATUS[NO_WPTR] 295 is set. */ 296#else 297 uint64_t skip_sz : 6; 298 uint64_t reserved_6_63 : 58; 299#endif 300 } s; 301 struct cvmx_ipd_1st_mbuff_skip_s cn30xx; 302 struct cvmx_ipd_1st_mbuff_skip_s cn31xx; 303 struct cvmx_ipd_1st_mbuff_skip_s cn38xx; 304 struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2; 305 struct cvmx_ipd_1st_mbuff_skip_s cn50xx; 306 struct cvmx_ipd_1st_mbuff_skip_s cn52xx; 307 struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1; 308 struct cvmx_ipd_1st_mbuff_skip_s cn56xx; 309 struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1; 310 struct cvmx_ipd_1st_mbuff_skip_s cn58xx; 311 struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1; 312 struct cvmx_ipd_1st_mbuff_skip_s cn63xx; 313 struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1; 314}; 315typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_1st_mbuff_skip_t; 316 317/** 318 * cvmx_ipd_1st_next_ptr_back 319 * 320 * IPD_1st_NEXT_PTR_BACK = IPD First Next Pointer Back Values 321 * 322 * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF 323 */ 324union cvmx_ipd_1st_next_ptr_back 325{ 326 uint64_t u64; 327 struct cvmx_ipd_1st_next_ptr_back_s 328 { 329#if __BYTE_ORDER == __BIG_ENDIAN 330 uint64_t reserved_4_63 : 60; 331 uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */ 332#else 333 uint64_t back : 4; 334 uint64_t reserved_4_63 : 60; 335#endif 336 } s; 337 struct cvmx_ipd_1st_next_ptr_back_s cn30xx; 338 struct cvmx_ipd_1st_next_ptr_back_s cn31xx; 339 struct cvmx_ipd_1st_next_ptr_back_s cn38xx; 340 struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2; 341 struct cvmx_ipd_1st_next_ptr_back_s cn50xx; 342 struct cvmx_ipd_1st_next_ptr_back_s cn52xx; 343 struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1; 344 struct cvmx_ipd_1st_next_ptr_back_s cn56xx; 345 struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1; 346 struct cvmx_ipd_1st_next_ptr_back_s cn58xx; 347 struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1; 348 struct cvmx_ipd_1st_next_ptr_back_s cn63xx; 349 struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1; 350}; 351typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_1st_next_ptr_back_t; 352 353/** 354 * cvmx_ipd_2nd_next_ptr_back 355 * 356 * IPD_2nd_NEXT_PTR_BACK = IPD Second Next Pointer Back Value 357 * 358 * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF 359 */ 360union cvmx_ipd_2nd_next_ptr_back 361{ 362 uint64_t u64; 363 struct cvmx_ipd_2nd_next_ptr_back_s 364 { 365#if __BYTE_ORDER == __BIG_ENDIAN 366 uint64_t reserved_4_63 : 60; 367 uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */ 368#else 369 uint64_t back : 4; 370 uint64_t reserved_4_63 : 60; 371#endif 372 } s; 373 struct cvmx_ipd_2nd_next_ptr_back_s cn30xx; 374 struct cvmx_ipd_2nd_next_ptr_back_s cn31xx; 375 struct cvmx_ipd_2nd_next_ptr_back_s cn38xx; 376 struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2; 377 struct cvmx_ipd_2nd_next_ptr_back_s cn50xx; 378 struct cvmx_ipd_2nd_next_ptr_back_s cn52xx; 379 struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1; 380 struct cvmx_ipd_2nd_next_ptr_back_s cn56xx; 381 struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1; 382 struct cvmx_ipd_2nd_next_ptr_back_s cn58xx; 383 struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1; 384 struct cvmx_ipd_2nd_next_ptr_back_s cn63xx; 385 struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1; 386}; 387typedef union cvmx_ipd_2nd_next_ptr_back cvmx_ipd_2nd_next_ptr_back_t; 388 389/** 390 * cvmx_ipd_bist_status 391 * 392 * IPD_BIST_STATUS = IPD BIST STATUS 393 * 394 * BIST Status for IPD's Memories. 395 */ 396union cvmx_ipd_bist_status 397{ 398 uint64_t u64; 399 struct cvmx_ipd_bist_status_s 400 { 401#if __BYTE_ORDER == __BIG_ENDIAN 402 uint64_t reserved_18_63 : 46; 403 uint64_t csr_mem : 1; /**< CSR Register Memory Bist Status. */ 404 uint64_t csr_ncmd : 1; /**< CSR NCB Commands Memory Bist Status. */ 405 uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */ 406 uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */ 407 uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */ 408 uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */ 409 uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */ 410 uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */ 411 uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */ 412 uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */ 413 uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */ 414 uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */ 415 uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */ 416 uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */ 417 uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */ 418 uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */ 419 uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */ 420 uint64_t pwp : 1; /**< PWP Memory Bist Status. */ 421#else 422 uint64_t pwp : 1; 423 uint64_t ipd_new : 1; 424 uint64_t ipd_old : 1; 425 uint64_t prc_off : 1; 426 uint64_t pwq0 : 1; 427 uint64_t pwq1 : 1; 428 uint64_t pbm_word : 1; 429 uint64_t pbm0 : 1; 430 uint64_t pbm1 : 1; 431 uint64_t pbm2 : 1; 432 uint64_t pbm3 : 1; 433 uint64_t ipq_pbe0 : 1; 434 uint64_t ipq_pbe1 : 1; 435 uint64_t pwq_pow : 1; 436 uint64_t pwq_wp1 : 1; 437 uint64_t pwq_wqed : 1; 438 uint64_t csr_ncmd : 1; 439 uint64_t csr_mem : 1; 440 uint64_t reserved_18_63 : 46; 441#endif 442 } s; 443 struct cvmx_ipd_bist_status_cn30xx 444 { 445#if __BYTE_ORDER == __BIG_ENDIAN 446 uint64_t reserved_16_63 : 48; 447 uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */ 448 uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */ 449 uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */ 450 uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */ 451 uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */ 452 uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */ 453 uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */ 454 uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */ 455 uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */ 456 uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */ 457 uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */ 458 uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */ 459 uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */ 460 uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */ 461 uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */ 462 uint64_t pwp : 1; /**< PWP Memory Bist Status. */ 463#else 464 uint64_t pwp : 1; 465 uint64_t ipd_new : 1; 466 uint64_t ipd_old : 1; 467 uint64_t prc_off : 1; 468 uint64_t pwq0 : 1; 469 uint64_t pwq1 : 1; 470 uint64_t pbm_word : 1; 471 uint64_t pbm0 : 1; 472 uint64_t pbm1 : 1; 473 uint64_t pbm2 : 1; 474 uint64_t pbm3 : 1; 475 uint64_t ipq_pbe0 : 1; 476 uint64_t ipq_pbe1 : 1; 477 uint64_t pwq_pow : 1; 478 uint64_t pwq_wp1 : 1; 479 uint64_t pwq_wqed : 1; 480 uint64_t reserved_16_63 : 48; 481#endif 482 } cn30xx; 483 struct cvmx_ipd_bist_status_cn30xx cn31xx; 484 struct cvmx_ipd_bist_status_cn30xx cn38xx; 485 struct cvmx_ipd_bist_status_cn30xx cn38xxp2; 486 struct cvmx_ipd_bist_status_cn30xx cn50xx; 487 struct cvmx_ipd_bist_status_s cn52xx; 488 struct cvmx_ipd_bist_status_s cn52xxp1; 489 struct cvmx_ipd_bist_status_s cn56xx; 490 struct cvmx_ipd_bist_status_s cn56xxp1; 491 struct cvmx_ipd_bist_status_cn30xx cn58xx; 492 struct cvmx_ipd_bist_status_cn30xx cn58xxp1; 493 struct cvmx_ipd_bist_status_s cn63xx; 494 struct cvmx_ipd_bist_status_s cn63xxp1; 495}; 496typedef union cvmx_ipd_bist_status cvmx_ipd_bist_status_t; 497 498/** 499 * cvmx_ipd_bp_prt_red_end 500 * 501 * IPD_BP_PRT_RED_END = IPD Backpressure Port RED Enable 502 * 503 * When IPD applies backpressure to a PORT and the corresponding bit in this register is set, 504 * the RED Unit will drop packets for that port. 505 */ 506union cvmx_ipd_bp_prt_red_end 507{ 508 uint64_t u64; 509 struct cvmx_ipd_bp_prt_red_end_s 510 { 511#if __BYTE_ORDER == __BIG_ENDIAN 512 uint64_t reserved_44_63 : 20; 513 uint64_t prt_enb : 44; /**< The port corresponding to the bit position in this 514 field will drop all NON-RAW packets to that port 515 when port level backpressure is applied to that 516 port. The applying of port-level backpressure for 517 this dropping does not take into consideration the 518 value of IPD_PORTX_BP_PAGE_CNT[BP_ENB], nor 519 IPD_RED_PORT_ENABLE[PRT_ENB]. */ 520#else 521 uint64_t prt_enb : 44; 522 uint64_t reserved_44_63 : 20; 523#endif 524 } s; 525 struct cvmx_ipd_bp_prt_red_end_cn30xx 526 { 527#if __BYTE_ORDER == __BIG_ENDIAN 528 uint64_t reserved_36_63 : 28; 529 uint64_t prt_enb : 36; /**< The port corresponding to the bit position in this 530 field, will allow RED to drop back when port level 531 backpressure is applied to the port. The applying 532 of port-level backpressure for this RED dropping 533 does not take into consideration the value of 534 IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */ 535#else 536 uint64_t prt_enb : 36; 537 uint64_t reserved_36_63 : 28; 538#endif 539 } cn30xx; 540 struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx; 541 struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx; 542 struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2; 543 struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx; 544 struct cvmx_ipd_bp_prt_red_end_cn52xx 545 { 546#if __BYTE_ORDER == __BIG_ENDIAN 547 uint64_t reserved_40_63 : 24; 548 uint64_t prt_enb : 40; /**< The port corresponding to the bit position in this 549 field, will allow RED to drop back when port level 550 backpressure is applied to the port. The applying 551 of port-level backpressure for this RED dropping 552 does not take into consideration the value of 553 IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */ 554#else 555 uint64_t prt_enb : 40; 556 uint64_t reserved_40_63 : 24; 557#endif 558 } cn52xx; 559 struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1; 560 struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx; 561 struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1; 562 struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx; 563 struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1; 564 struct cvmx_ipd_bp_prt_red_end_s cn63xx; 565 struct cvmx_ipd_bp_prt_red_end_s cn63xxp1; 566}; 567typedef union cvmx_ipd_bp_prt_red_end cvmx_ipd_bp_prt_red_end_t; 568 569/** 570 * cvmx_ipd_clk_count 571 * 572 * IPD_CLK_COUNT = IPD Clock Count 573 * 574 * Counts the number of core clocks periods since the de-asserition of reset. 575 */ 576union cvmx_ipd_clk_count 577{ 578 uint64_t u64; 579 struct cvmx_ipd_clk_count_s 580 { 581#if __BYTE_ORDER == __BIG_ENDIAN 582 uint64_t clk_cnt : 64; /**< This counter will be zeroed when reset is applied 583 and will increment every rising edge of the 584 core-clock. */ 585#else 586 uint64_t clk_cnt : 64; 587#endif 588 } s; 589 struct cvmx_ipd_clk_count_s cn30xx; 590 struct cvmx_ipd_clk_count_s cn31xx; 591 struct cvmx_ipd_clk_count_s cn38xx; 592 struct cvmx_ipd_clk_count_s cn38xxp2; 593 struct cvmx_ipd_clk_count_s cn50xx; 594 struct cvmx_ipd_clk_count_s cn52xx; 595 struct cvmx_ipd_clk_count_s cn52xxp1; 596 struct cvmx_ipd_clk_count_s cn56xx; 597 struct cvmx_ipd_clk_count_s cn56xxp1; 598 struct cvmx_ipd_clk_count_s cn58xx; 599 struct cvmx_ipd_clk_count_s cn58xxp1; 600 struct cvmx_ipd_clk_count_s cn63xx; 601 struct cvmx_ipd_clk_count_s cn63xxp1; 602}; 603typedef union cvmx_ipd_clk_count cvmx_ipd_clk_count_t; 604 605/** 606 * cvmx_ipd_ctl_status 607 * 608 * IPD_CTL_STATUS = IPD's Control Status Register 609 * 610 * The number of words in a MBUFF used for packet data store. 611 */ 612union cvmx_ipd_ctl_status 613{ 614 uint64_t u64; 615 struct cvmx_ipd_ctl_status_s 616 { 617#if __BYTE_ORDER == __BIG_ENDIAN 618 uint64_t reserved_18_63 : 46; 619 uint64_t use_sop : 1; /**< When '1' the SOP sent by the MAC will be used in 620 place of the SOP generated by the IPD. */ 621 uint64_t rst_done : 1; /**< When '0' IPD has finished reset. No access 622 except the reading of this bit should occur to the 623 IPD until this is asserted. Or a 1000 core clock 624 cycles has passed after the de-assertion of reset. */ 625 uint64_t clken : 1; /**< Controls the conditional clocking within IPD 626 0=Allow HW to control the clocks 627 1=Force the clocks to be always on */ 628 uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and 629 the WQE will be located at the front of the packet. 630 When set: 631 - IPD_WQE_FPA_QUEUE[WQE_QUE] is not used 632 - IPD_1ST_MBUFF_SKIP[SKIP_SZ] must be at least 16 633 - If 16 <= IPD_1ST_MBUFF_SKIP[SKIP_SZ] <= 31 then 634 the WQE will be written into the first 128B 635 cache block in the first buffer that contains 636 the packet. 637 - If IPD_1ST_MBUFF_SKIP[SKIP_SZ] == 32 then 638 the WQE will be written into the second 128B 639 cache block in the first buffer that contains 640 the packet. */ 641 uint64_t pq_apkt : 1; /**< When set IPD_PORT_QOS_X_CNT WILL be incremented 642 by one for every work queue entry that is sent to 643 POW. */ 644 uint64_t pq_nabuf : 1; /**< When set IPD_PORT_QOS_X_CNT WILL NOT be 645 incremented when IPD allocates a buffer for a 646 packet. */ 647 uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. 648 When set '1' the IPD drive the IPD_BUFF_FULL line to 649 the IOB-arbiter, telling it to not give grants to 650 NCB devices sending packet data. */ 651 uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, 652 buffering the received packet data. When set '1' 653 the IPD will not buffer the received packet data. */ 654 uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the 655 data-length field in the header written to the 656 POW and the top of a MBUFF. 657 OCTEAN generates a length that includes the 658 length of the data + 8 for the header-field. By 659 setting this bit the 8 for the instr-field will 660 not be included in the length field of the header. 661 NOTE: IPD is compliant with the spec when this 662 field is '1'. */ 663 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 664 RSL. */ 665 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 666 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL], 667 IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and 668 IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL] 669 WILL be incremented by one for every work 670 queue entry that is sent to POW. */ 671 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 672 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL], 673 IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and 674 IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL] 675 WILL NOT be incremented when IPD allocates a 676 buffer for a packet on the port. */ 677 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 678 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 679 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 680 the sending of port level backpressure to the 681 Octane input-ports. The application should NOT 682 de-assert this bit after asserting it. The 683 receivers of this bit may have been put into 684 backpressure mode and can only be released by 685 IPD informing them that the backpressure has 686 been released. 687 GMXX_INF_MODE[EN] must be set to '1' for each 688 packet interface which requires port back pressure 689 prior to setting PBP_EN to '1'. */ 690 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 691 is written through to memory. 692 1 ==> All packet data (and next buffer pointers) is 693 written into the cache. 694 2 ==> The first aligned cache block holding the 695 packet data (and initial next buffer pointer) is 696 written to the L2 cache, all remaining cache blocks 697 are not written to the L2 cache. 698 3 ==> The first two aligned cache blocks holding 699 the packet data (and initial next buffer pointer) 700 are written to the L2 cache, all remaining cache 701 blocks are not written to the L2 cache. */ 702 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. 703 When clear '0', the IPD will appear to the 704 IOB-arbiter to be applying backpressure, this 705 causes the IOB-Arbiter to not send grants to NCB 706 devices requesting to send packet data to the IPD. */ 707#else 708 uint64_t ipd_en : 1; 709 cvmx_ipd_mode_t opc_mode : 2; 710 uint64_t pbp_en : 1; 711 uint64_t wqe_lend : 1; 712 uint64_t pkt_lend : 1; 713 uint64_t naddbuf : 1; 714 uint64_t addpkt : 1; 715 uint64_t reset : 1; 716 uint64_t len_m8 : 1; 717 uint64_t pkt_off : 1; 718 uint64_t ipd_full : 1; 719 uint64_t pq_nabuf : 1; 720 uint64_t pq_apkt : 1; 721 uint64_t no_wptr : 1; 722 uint64_t clken : 1; 723 uint64_t rst_done : 1; 724 uint64_t use_sop : 1; 725 uint64_t reserved_18_63 : 46; 726#endif 727 } s; 728 struct cvmx_ipd_ctl_status_cn30xx 729 { 730#if __BYTE_ORDER == __BIG_ENDIAN 731 uint64_t reserved_10_63 : 54; 732 uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the 733 data-length field in the header written wo the 734 POW and the top of a MBUFF. 735 OCTEAN generates a length that includes the 736 length of the data + 8 for the header-field. By 737 setting this bit the 8 for the instr-field will 738 not be included in the length field of the header. 739 NOTE: IPD is compliant with the spec when this 740 field is '1'. */ 741 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 742 RSL. */ 743 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 744 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 745 WILL be incremented by one for every work 746 queue entry that is sent to POW. */ 747 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 748 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 749 WILL NOT be incremented when IPD allocates a 750 buffer for a packet on the port. */ 751 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 752 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 753 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 754 the sending of port level backpressure to the 755 Octane input-ports. Once enabled the sending of 756 port-level-backpressure can not be disabled by 757 changing the value of this bit. 758 GMXX_INF_MODE[EN] must be set to '1' for each 759 packet interface which requires port back pressure 760 prior to setting PBP_EN to '1'. */ 761 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 762 is written through to memory. 763 1 ==> All packet data (and next buffer pointers) is 764 written into the cache. 765 2 ==> The first aligned cache block holding the 766 packet data (and initial next buffer pointer) is 767 written to the L2 cache, all remaining cache blocks 768 are not written to the L2 cache. 769 3 ==> The first two aligned cache blocks holding 770 the packet data (and initial next buffer pointer) 771 are written to the L2 cache, all remaining cache 772 blocks are not written to the L2 cache. */ 773 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */ 774#else 775 uint64_t ipd_en : 1; 776 cvmx_ipd_mode_t opc_mode : 2; 777 uint64_t pbp_en : 1; 778 uint64_t wqe_lend : 1; 779 uint64_t pkt_lend : 1; 780 uint64_t naddbuf : 1; 781 uint64_t addpkt : 1; 782 uint64_t reset : 1; 783 uint64_t len_m8 : 1; 784 uint64_t reserved_10_63 : 54; 785#endif 786 } cn30xx; 787 struct cvmx_ipd_ctl_status_cn30xx cn31xx; 788 struct cvmx_ipd_ctl_status_cn30xx cn38xx; 789 struct cvmx_ipd_ctl_status_cn38xxp2 790 { 791#if __BYTE_ORDER == __BIG_ENDIAN 792 uint64_t reserved_9_63 : 55; 793 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 794 RSL. */ 795 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 796 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 797 WILL be incremented by one for every work 798 queue entry that is sent to POW. 799 PASS-2 Field. */ 800 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 801 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 802 WILL NOT be incremented when IPD allocates a 803 buffer for a packet on the port. 804 PASS-2 Field. */ 805 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 806 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 807 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 808 the sending of port level backpressure to the 809 Octane input-ports. Once enabled the sending of 810 port-level-backpressure can not be disabled by 811 changing the value of this bit. */ 812 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 813 is written through to memory. 814 1 ==> All packet data (and next buffer pointers) is 815 written into the cache. 816 2 ==> The first aligned cache block holding the 817 packet data (and initial next buffer pointer) is 818 written to the L2 cache, all remaining cache blocks 819 are not written to the L2 cache. 820 3 ==> The first two aligned cache blocks holding 821 the packet data (and initial next buffer pointer) 822 are written to the L2 cache, all remaining cache 823 blocks are not written to the L2 cache. */ 824 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */ 825#else 826 uint64_t ipd_en : 1; 827 cvmx_ipd_mode_t opc_mode : 2; 828 uint64_t pbp_en : 1; 829 uint64_t wqe_lend : 1; 830 uint64_t pkt_lend : 1; 831 uint64_t naddbuf : 1; 832 uint64_t addpkt : 1; 833 uint64_t reset : 1; 834 uint64_t reserved_9_63 : 55; 835#endif 836 } cn38xxp2; 837 struct cvmx_ipd_ctl_status_cn50xx 838 { 839#if __BYTE_ORDER == __BIG_ENDIAN 840 uint64_t reserved_15_63 : 49; 841 uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and 842 the WQE will be located at the front of the packet. */ 843 uint64_t pq_apkt : 1; /**< Reserved. */ 844 uint64_t pq_nabuf : 1; /**< Reserved. */ 845 uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. 846 When set '1' the IPD drive the IPD_BUFF_FULL line to 847 the IOB-arbiter, telling it to not give grants to 848 NCB devices sending packet data. */ 849 uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, 850 buffering the received packet data. When set '1' 851 the IPD will not buffer the received packet data. */ 852 uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the 853 data-length field in the header written wo the 854 POW and the top of a MBUFF. 855 OCTEAN generates a length that includes the 856 length of the data + 8 for the header-field. By 857 setting this bit the 8 for the instr-field will 858 not be included in the length field of the header. 859 NOTE: IPD is compliant with the spec when this 860 field is '1'. */ 861 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 862 RSL. */ 863 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 864 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 865 WILL be incremented by one for every work 866 queue entry that is sent to POW. */ 867 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 868 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 869 WILL NOT be incremented when IPD allocates a 870 buffer for a packet on the port. */ 871 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 872 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 873 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 874 the sending of port level backpressure to the 875 Octane input-ports. Once enabled the sending of 876 port-level-backpressure can not be disabled by 877 changing the value of this bit. 878 GMXX_INF_MODE[EN] must be set to '1' for each 879 packet interface which requires port back pressure 880 prior to setting PBP_EN to '1'. */ 881 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 882 is written through to memory. 883 1 ==> All packet data (and next buffer pointers) is 884 written into the cache. 885 2 ==> The first aligned cache block holding the 886 packet data (and initial next buffer pointer) is 887 written to the L2 cache, all remaining cache blocks 888 are not written to the L2 cache. 889 3 ==> The first two aligned cache blocks holding 890 the packet data (and initial next buffer pointer) 891 are written to the L2 cache, all remaining cache 892 blocks are not written to the L2 cache. */ 893 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. 894 When clear '0', the IPD will appear to the 895 IOB-arbiter to be applying backpressure, this 896 causes the IOB-Arbiter to not send grants to NCB 897 devices requesting to send packet data to the IPD. */ 898#else 899 uint64_t ipd_en : 1; 900 cvmx_ipd_mode_t opc_mode : 2; 901 uint64_t pbp_en : 1; 902 uint64_t wqe_lend : 1; 903 uint64_t pkt_lend : 1; 904 uint64_t naddbuf : 1; 905 uint64_t addpkt : 1; 906 uint64_t reset : 1; 907 uint64_t len_m8 : 1; 908 uint64_t pkt_off : 1; 909 uint64_t ipd_full : 1; 910 uint64_t pq_nabuf : 1; 911 uint64_t pq_apkt : 1; 912 uint64_t no_wptr : 1; 913 uint64_t reserved_15_63 : 49; 914#endif 915 } cn50xx; 916 struct cvmx_ipd_ctl_status_cn50xx cn52xx; 917 struct cvmx_ipd_ctl_status_cn50xx cn52xxp1; 918 struct cvmx_ipd_ctl_status_cn50xx cn56xx; 919 struct cvmx_ipd_ctl_status_cn50xx cn56xxp1; 920 struct cvmx_ipd_ctl_status_cn58xx 921 { 922#if __BYTE_ORDER == __BIG_ENDIAN 923 uint64_t reserved_12_63 : 52; 924 uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. 925 When set '1' the IPD drive the IPD_BUFF_FULL line to 926 the IOB-arbiter, telling it to not give grants to 927 NCB devices sending packet data. */ 928 uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, 929 buffering the received packet data. When set '1' 930 the IPD will not buffer the received packet data. */ 931 uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the 932 data-length field in the header written wo the 933 POW and the top of a MBUFF. 934 OCTEAN PASS2 generates a length that includes the 935 length of the data + 8 for the header-field. By 936 setting this bit the 8 for the instr-field will 937 not be included in the length field of the header. 938 NOTE: IPD is compliant with the spec when this 939 field is '1'. */ 940 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 941 RSL. */ 942 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 943 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 944 WILL be incremented by one for every work 945 queue entry that is sent to POW. 946 PASS-2 Field. */ 947 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 948 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] 949 WILL NOT be incremented when IPD allocates a 950 buffer for a packet on the port. 951 PASS-2 Field. */ 952 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 953 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 954 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 955 the sending of port level backpressure to the 956 Octane input-ports. Once enabled the sending of 957 port-level-backpressure can not be disabled by 958 changing the value of this bit. */ 959 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 960 is written through to memory. 961 1 ==> All packet data (and next buffer pointers) is 962 written into the cache. 963 2 ==> The first aligned cache block holding the 964 packet data (and initial next buffer pointer) is 965 written to the L2 cache, all remaining cache blocks 966 are not written to the L2 cache. 967 3 ==> The first two aligned cache blocks holding 968 the packet data (and initial next buffer pointer) 969 are written to the L2 cache, all remaining cache 970 blocks are not written to the L2 cache. */ 971 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. 972 When clear '0', the IPD will appear to the 973 IOB-arbiter to be applying backpressure, this 974 causes the IOB-Arbiter to not send grants to NCB 975 devices requesting to send packet data to the IPD. */ 976#else 977 uint64_t ipd_en : 1; 978 cvmx_ipd_mode_t opc_mode : 2; 979 uint64_t pbp_en : 1; 980 uint64_t wqe_lend : 1; 981 uint64_t pkt_lend : 1; 982 uint64_t naddbuf : 1; 983 uint64_t addpkt : 1; 984 uint64_t reset : 1; 985 uint64_t len_m8 : 1; 986 uint64_t pkt_off : 1; 987 uint64_t ipd_full : 1; 988 uint64_t reserved_12_63 : 52; 989#endif 990 } cn58xx; 991 struct cvmx_ipd_ctl_status_cn58xx cn58xxp1; 992 struct cvmx_ipd_ctl_status_s cn63xx; 993 struct cvmx_ipd_ctl_status_cn63xxp1 994 { 995#if __BYTE_ORDER == __BIG_ENDIAN 996 uint64_t reserved_16_63 : 48; 997 uint64_t clken : 1; /**< Controls the conditional clocking within IPD 998 0=Allow HW to control the clocks 999 1=Force the clocks to be always on */ 1000 uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and 1001 the WQE will be located at the front of the packet. 1002 When set: 1003 - IPD_WQE_FPA_QUEUE[WQE_QUE] is not used 1004 - IPD_1ST_MBUFF_SKIP[SKIP_SZ] must be at least 16 1005 - If 16 <= IPD_1ST_MBUFF_SKIP[SKIP_SZ] <= 31 then 1006 the WQE will be written into the first 128B 1007 cache block in the first buffer that contains 1008 the packet. 1009 - If IPD_1ST_MBUFF_SKIP[SKIP_SZ] == 32 then 1010 the WQE will be written into the second 128B 1011 cache block in the first buffer that contains 1012 the packet. */ 1013 uint64_t pq_apkt : 1; /**< When set IPD_PORT_QOS_X_CNT WILL be incremented 1014 by one for every work queue entry that is sent to 1015 POW. */ 1016 uint64_t pq_nabuf : 1; /**< When set IPD_PORT_QOS_X_CNT WILL NOT be 1017 incremented when IPD allocates a buffer for a 1018 packet. */ 1019 uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. 1020 When set '1' the IPD drive the IPD_BUFF_FULL line to 1021 the IOB-arbiter, telling it to not give grants to 1022 NCB devices sending packet data. */ 1023 uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, 1024 buffering the received packet data. When set '1' 1025 the IPD will not buffer the received packet data. */ 1026 uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the 1027 data-length field in the header written to the 1028 POW and the top of a MBUFF. 1029 OCTEAN generates a length that includes the 1030 length of the data + 8 for the header-field. By 1031 setting this bit the 8 for the instr-field will 1032 not be included in the length field of the header. 1033 NOTE: IPD is compliant with the spec when this 1034 field is '1'. */ 1035 uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except 1036 RSL. */ 1037 uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, 1038 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL], 1039 IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and 1040 IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL] 1041 WILL be incremented by one for every work 1042 queue entry that is sent to POW. */ 1043 uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, 1044 IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL], 1045 IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and 1046 IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL] 1047 WILL NOT be incremented when IPD allocates a 1048 buffer for a packet on the port. */ 1049 uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ 1050 uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ 1051 uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables 1052 the sending of port level backpressure to the 1053 Octane input-ports. The application should NOT 1054 de-assert this bit after asserting it. The 1055 receivers of this bit may have been put into 1056 backpressure mode and can only be released by 1057 IPD informing them that the backpressure has 1058 been released. 1059 GMXX_INF_MODE[EN] must be set to '1' for each 1060 packet interface which requires port back pressure 1061 prior to setting PBP_EN to '1'. */ 1062 cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) 1063 is written through to memory. 1064 1 ==> All packet data (and next buffer pointers) is 1065 written into the cache. 1066 2 ==> The first aligned cache block holding the 1067 packet data (and initial next buffer pointer) is 1068 written to the L2 cache, all remaining cache blocks 1069 are not written to the L2 cache. 1070 3 ==> The first two aligned cache blocks holding 1071 the packet data (and initial next buffer pointer) 1072 are written to the L2 cache, all remaining cache 1073 blocks are not written to the L2 cache. */ 1074 uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. 1075 When clear '0', the IPD will appear to the 1076 IOB-arbiter to be applying backpressure, this 1077 causes the IOB-Arbiter to not send grants to NCB 1078 devices requesting to send packet data to the IPD. */ 1079#else 1080 uint64_t ipd_en : 1; 1081 cvmx_ipd_mode_t opc_mode : 2; 1082 uint64_t pbp_en : 1; 1083 uint64_t wqe_lend : 1; 1084 uint64_t pkt_lend : 1; 1085 uint64_t naddbuf : 1; 1086 uint64_t addpkt : 1; 1087 uint64_t reset : 1; 1088 uint64_t len_m8 : 1; 1089 uint64_t pkt_off : 1; 1090 uint64_t ipd_full : 1; 1091 uint64_t pq_nabuf : 1; 1092 uint64_t pq_apkt : 1; 1093 uint64_t no_wptr : 1; 1094 uint64_t clken : 1; 1095 uint64_t reserved_16_63 : 48; 1096#endif 1097 } cn63xxp1; 1098}; 1099typedef union cvmx_ipd_ctl_status cvmx_ipd_ctl_status_t; 1100 1101/** 1102 * cvmx_ipd_int_enb 1103 * 1104 * IPD_INTERRUPT_ENB = IPD Interrupt Enable Register 1105 * 1106 * Used to enable the various interrupting conditions of IPD 1107 */ 1108union cvmx_ipd_int_enb 1109{ 1110 uint64_t u64; 1111 struct cvmx_ipd_int_enb_s 1112 { 1113#if __BYTE_ORDER == __BIG_ENDIAN 1114 uint64_t reserved_12_63 : 52; 1115 uint64_t pq_sub : 1; /**< Allows an interrupt to be sent when the 1116 corresponding bit in the IPD_INT_SUM is set. */ 1117 uint64_t pq_add : 1; /**< Allows an interrupt to be sent when the 1118 corresponding bit in the IPD_INT_SUM is set. */ 1119 uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the 1120 corresponding bit in the IPD_INT_SUM is set. */ 1121 uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the 1122 corresponding bit in the IPD_INT_SUM is set. */ 1123 uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the 1124 corresponding bit in the IPD_INT_SUM is set. */ 1125 uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the 1126 corresponding bit in the IPD_INT_SUM is set. */ 1127 uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the 1128 corresponding bit in the IPD_INT_SUM is set. */ 1129 uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract 1130 has an illegal value. */ 1131 uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits 1132 [127:96] of the PBM memory. */ 1133 uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits 1134 [95:64] of the PBM memory. */ 1135 uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits 1136 [63:32] of the PBM memory. */ 1137 uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits 1138 [31:0] of the PBM memory. */ 1139#else 1140 uint64_t prc_par0 : 1; 1141 uint64_t prc_par1 : 1; 1142 uint64_t prc_par2 : 1; 1143 uint64_t prc_par3 : 1; 1144 uint64_t bp_sub : 1; 1145 uint64_t dc_ovr : 1; 1146 uint64_t cc_ovr : 1; 1147 uint64_t c_coll : 1; 1148 uint64_t d_coll : 1; 1149 uint64_t bc_ovr : 1; 1150 uint64_t pq_add : 1; 1151 uint64_t pq_sub : 1; 1152 uint64_t reserved_12_63 : 52; 1153#endif 1154 } s; 1155 struct cvmx_ipd_int_enb_cn30xx 1156 { 1157#if __BYTE_ORDER == __BIG_ENDIAN 1158 uint64_t reserved_5_63 : 59; 1159 uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract 1160 has an illegal value. */ 1161 uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits 1162 [127:96] of the PBM memory. */ 1163 uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits 1164 [95:64] of the PBM memory. */ 1165 uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits 1166 [63:32] of the PBM memory. */ 1167 uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits 1168 [31:0] of the PBM memory. */ 1169#else 1170 uint64_t prc_par0 : 1; 1171 uint64_t prc_par1 : 1; 1172 uint64_t prc_par2 : 1; 1173 uint64_t prc_par3 : 1; 1174 uint64_t bp_sub : 1; 1175 uint64_t reserved_5_63 : 59; 1176#endif 1177 } cn30xx; 1178 struct cvmx_ipd_int_enb_cn30xx cn31xx; 1179 struct cvmx_ipd_int_enb_cn38xx 1180 { 1181#if __BYTE_ORDER == __BIG_ENDIAN 1182 uint64_t reserved_10_63 : 54; 1183 uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the 1184 corresponding bit in the IPD_INT_SUM is set. 1185 This is a PASS-3 Field. */ 1186 uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the 1187 corresponding bit in the IPD_INT_SUM is set. 1188 This is a PASS-3 Field. */ 1189 uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the 1190 corresponding bit in the IPD_INT_SUM is set. 1191 This is a PASS-3 Field. */ 1192 uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the 1193 corresponding bit in the IPD_INT_SUM is set. 1194 This is a PASS-3 Field. */ 1195 uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the 1196 corresponding bit in the IPD_INT_SUM is set. 1197 This is a PASS-3 Field. */ 1198 uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract 1199 has an illegal value. */ 1200 uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits 1201 [127:96] of the PBM memory. */ 1202 uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits 1203 [95:64] of the PBM memory. */ 1204 uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits 1205 [63:32] of the PBM memory. */ 1206 uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits 1207 [31:0] of the PBM memory. */ 1208#else 1209 uint64_t prc_par0 : 1; 1210 uint64_t prc_par1 : 1; 1211 uint64_t prc_par2 : 1; 1212 uint64_t prc_par3 : 1; 1213 uint64_t bp_sub : 1; 1214 uint64_t dc_ovr : 1; 1215 uint64_t cc_ovr : 1; 1216 uint64_t c_coll : 1; 1217 uint64_t d_coll : 1; 1218 uint64_t bc_ovr : 1; 1219 uint64_t reserved_10_63 : 54; 1220#endif 1221 } cn38xx; 1222 struct cvmx_ipd_int_enb_cn30xx cn38xxp2; 1223 struct cvmx_ipd_int_enb_cn38xx cn50xx; 1224 struct cvmx_ipd_int_enb_s cn52xx; 1225 struct cvmx_ipd_int_enb_s cn52xxp1; 1226 struct cvmx_ipd_int_enb_s cn56xx; 1227 struct cvmx_ipd_int_enb_s cn56xxp1; 1228 struct cvmx_ipd_int_enb_cn38xx cn58xx; 1229 struct cvmx_ipd_int_enb_cn38xx cn58xxp1; 1230 struct cvmx_ipd_int_enb_s cn63xx; 1231 struct cvmx_ipd_int_enb_s cn63xxp1; 1232}; 1233typedef union cvmx_ipd_int_enb cvmx_ipd_int_enb_t; 1234 1235/** 1236 * cvmx_ipd_int_sum 1237 * 1238 * IPD_INTERRUPT_SUM = IPD Interrupt Summary Register 1239 * 1240 * Set when an interrupt condition occurs, write '1' to clear. 1241 */ 1242union cvmx_ipd_int_sum 1243{ 1244 uint64_t u64; 1245 struct cvmx_ipd_int_sum_s 1246 { 1247#if __BYTE_ORDER == __BIG_ENDIAN 1248 uint64_t reserved_12_63 : 52; 1249 uint64_t pq_sub : 1; /**< Set when a port-qos does an sub to the count 1250 that causes the counter to wrap. */ 1251 uint64_t pq_add : 1; /**< Set when a port-qos does an add to the count 1252 that causes the counter to wrap. */ 1253 uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. */ 1254 uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB 1255 collides. */ 1256 uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB 1257 collides. */ 1258 uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. */ 1259 uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. */ 1260 uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a 1261 supplied illegal value. */ 1262 uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits 1263 [127:96] of the PBM memory. */ 1264 uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits 1265 [95:64] of the PBM memory. */ 1266 uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits 1267 [63:32] of the PBM memory. */ 1268 uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits 1269 [31:0] of the PBM memory. */ 1270#else 1271 uint64_t prc_par0 : 1; 1272 uint64_t prc_par1 : 1; 1273 uint64_t prc_par2 : 1; 1274 uint64_t prc_par3 : 1; 1275 uint64_t bp_sub : 1; 1276 uint64_t dc_ovr : 1; 1277 uint64_t cc_ovr : 1; 1278 uint64_t c_coll : 1; 1279 uint64_t d_coll : 1; 1280 uint64_t bc_ovr : 1; 1281 uint64_t pq_add : 1; 1282 uint64_t pq_sub : 1; 1283 uint64_t reserved_12_63 : 52; 1284#endif 1285 } s; 1286 struct cvmx_ipd_int_sum_cn30xx 1287 { 1288#if __BYTE_ORDER == __BIG_ENDIAN 1289 uint64_t reserved_5_63 : 59; 1290 uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a 1291 supplied illegal value. */ 1292 uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits 1293 [127:96] of the PBM memory. */ 1294 uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits 1295 [95:64] of the PBM memory. */ 1296 uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits 1297 [63:32] of the PBM memory. */ 1298 uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits 1299 [31:0] of the PBM memory. */ 1300#else 1301 uint64_t prc_par0 : 1; 1302 uint64_t prc_par1 : 1; 1303 uint64_t prc_par2 : 1; 1304 uint64_t prc_par3 : 1; 1305 uint64_t bp_sub : 1; 1306 uint64_t reserved_5_63 : 59; 1307#endif 1308 } cn30xx; 1309 struct cvmx_ipd_int_sum_cn30xx cn31xx; 1310 struct cvmx_ipd_int_sum_cn38xx 1311 { 1312#if __BYTE_ORDER == __BIG_ENDIAN 1313 uint64_t reserved_10_63 : 54; 1314 uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. 1315 This is a PASS-3 Field. */ 1316 uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB 1317 collides. 1318 This is a PASS-3 Field. */ 1319 uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB 1320 collides. 1321 This is a PASS-3 Field. */ 1322 uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. 1323 This is a PASS-3 Field. */ 1324 uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. 1325 This is a PASS-3 Field. */ 1326 uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a 1327 supplied illegal value. */ 1328 uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits 1329 [127:96] of the PBM memory. */ 1330 uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits 1331 [95:64] of the PBM memory. */ 1332 uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits 1333 [63:32] of the PBM memory. */ 1334 uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits 1335 [31:0] of the PBM memory. */ 1336#else 1337 uint64_t prc_par0 : 1; 1338 uint64_t prc_par1 : 1; 1339 uint64_t prc_par2 : 1; 1340 uint64_t prc_par3 : 1; 1341 uint64_t bp_sub : 1; 1342 uint64_t dc_ovr : 1; 1343 uint64_t cc_ovr : 1; 1344 uint64_t c_coll : 1; 1345 uint64_t d_coll : 1; 1346 uint64_t bc_ovr : 1; 1347 uint64_t reserved_10_63 : 54; 1348#endif 1349 } cn38xx; 1350 struct cvmx_ipd_int_sum_cn30xx cn38xxp2; 1351 struct cvmx_ipd_int_sum_cn38xx cn50xx; 1352 struct cvmx_ipd_int_sum_s cn52xx; 1353 struct cvmx_ipd_int_sum_s cn52xxp1; 1354 struct cvmx_ipd_int_sum_s cn56xx; 1355 struct cvmx_ipd_int_sum_s cn56xxp1; 1356 struct cvmx_ipd_int_sum_cn38xx cn58xx; 1357 struct cvmx_ipd_int_sum_cn38xx cn58xxp1; 1358 struct cvmx_ipd_int_sum_s cn63xx; 1359 struct cvmx_ipd_int_sum_s cn63xxp1; 1360}; 1361typedef union cvmx_ipd_int_sum cvmx_ipd_int_sum_t; 1362 1363/** 1364 * cvmx_ipd_not_1st_mbuff_skip 1365 * 1366 * IPD_NOT_1ST_MBUFF_SKIP = IPD Not First MBUFF Word Skip Size 1367 * 1368 * The number of words that the IPD will skip when writing any MBUFF that is not the first. 1369 */ 1370union cvmx_ipd_not_1st_mbuff_skip 1371{ 1372 uint64_t u64; 1373 struct cvmx_ipd_not_1st_mbuff_skip_s 1374 { 1375#if __BYTE_ORDER == __BIG_ENDIAN 1376 uint64_t reserved_6_63 : 58; 1377 uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of any 1378 MBUFF, that is not the 1st MBUFF, that the IPD 1379 will write the next-pointer. 1380 Legal values are 0 to 32, where the MAX value 1381 is also limited to: 1382 IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 16. */ 1383#else 1384 uint64_t skip_sz : 6; 1385 uint64_t reserved_6_63 : 58; 1386#endif 1387 } s; 1388 struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx; 1389 struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx; 1390 struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx; 1391 struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2; 1392 struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx; 1393 struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx; 1394 struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1; 1395 struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx; 1396 struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1; 1397 struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx; 1398 struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1; 1399 struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx; 1400 struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1; 1401}; 1402typedef union cvmx_ipd_not_1st_mbuff_skip cvmx_ipd_not_1st_mbuff_skip_t; 1403 1404/** 1405 * cvmx_ipd_packet_mbuff_size 1406 * 1407 * IPD_PACKET_MBUFF_SIZE = IPD's PACKET MUBUF Size In Words 1408 * 1409 * The number of words in a MBUFF used for packet data store. 1410 */ 1411union cvmx_ipd_packet_mbuff_size 1412{ 1413 uint64_t u64; 1414 struct cvmx_ipd_packet_mbuff_size_s 1415 { 1416#if __BYTE_ORDER == __BIG_ENDIAN 1417 uint64_t reserved_12_63 : 52; 1418 uint64_t mb_size : 12; /**< The number of 8-byte words in a MBUF. 1419 This must be a number in the range of 32 to 1420 2048. 1421 This is also the size of the FPA's 1422 Queue-0 Free-Page. */ 1423#else 1424 uint64_t mb_size : 12; 1425 uint64_t reserved_12_63 : 52; 1426#endif 1427 } s; 1428 struct cvmx_ipd_packet_mbuff_size_s cn30xx; 1429 struct cvmx_ipd_packet_mbuff_size_s cn31xx; 1430 struct cvmx_ipd_packet_mbuff_size_s cn38xx; 1431 struct cvmx_ipd_packet_mbuff_size_s cn38xxp2; 1432 struct cvmx_ipd_packet_mbuff_size_s cn50xx; 1433 struct cvmx_ipd_packet_mbuff_size_s cn52xx; 1434 struct cvmx_ipd_packet_mbuff_size_s cn52xxp1; 1435 struct cvmx_ipd_packet_mbuff_size_s cn56xx; 1436 struct cvmx_ipd_packet_mbuff_size_s cn56xxp1; 1437 struct cvmx_ipd_packet_mbuff_size_s cn58xx; 1438 struct cvmx_ipd_packet_mbuff_size_s cn58xxp1; 1439 struct cvmx_ipd_packet_mbuff_size_s cn63xx; 1440 struct cvmx_ipd_packet_mbuff_size_s cn63xxp1; 1441}; 1442typedef union cvmx_ipd_packet_mbuff_size cvmx_ipd_packet_mbuff_size_t; 1443 1444/** 1445 * cvmx_ipd_pkt_ptr_valid 1446 * 1447 * IPD_PKT_PTR_VALID = IPD's Packet Pointer Valid 1448 * 1449 * The value of the packet-pointer fetched and in the valid register. 1450 */ 1451union cvmx_ipd_pkt_ptr_valid 1452{ 1453 uint64_t u64; 1454 struct cvmx_ipd_pkt_ptr_valid_s 1455 { 1456#if __BYTE_ORDER == __BIG_ENDIAN 1457 uint64_t reserved_29_63 : 35; 1458 uint64_t ptr : 29; /**< Pointer value. */ 1459#else 1460 uint64_t ptr : 29; 1461 uint64_t reserved_29_63 : 35; 1462#endif 1463 } s; 1464 struct cvmx_ipd_pkt_ptr_valid_s cn30xx; 1465 struct cvmx_ipd_pkt_ptr_valid_s cn31xx; 1466 struct cvmx_ipd_pkt_ptr_valid_s cn38xx; 1467 struct cvmx_ipd_pkt_ptr_valid_s cn50xx; 1468 struct cvmx_ipd_pkt_ptr_valid_s cn52xx; 1469 struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1; 1470 struct cvmx_ipd_pkt_ptr_valid_s cn56xx; 1471 struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1; 1472 struct cvmx_ipd_pkt_ptr_valid_s cn58xx; 1473 struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1; 1474 struct cvmx_ipd_pkt_ptr_valid_s cn63xx; 1475 struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1; 1476}; 1477typedef union cvmx_ipd_pkt_ptr_valid cvmx_ipd_pkt_ptr_valid_t; 1478 1479/** 1480 * cvmx_ipd_port#_bp_page_cnt 1481 * 1482 * IPD_PORTX_BP_PAGE_CNT = IPD Port Backpressure Page Count 1483 * 1484 * The number of pages in use by the port that when exceeded, backpressure will be applied to the port. 1485 * See also IPD_PORTX_BP_PAGE_CNT2 1486 * See also IPD_PORTX_BP_PAGE_CNT3 1487 */ 1488union cvmx_ipd_portx_bp_page_cnt 1489{ 1490 uint64_t u64; 1491 struct cvmx_ipd_portx_bp_page_cnt_s 1492 { 1493#if __BYTE_ORDER == __BIG_ENDIAN 1494 uint64_t reserved_18_63 : 46; 1495 uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will 1496 not be applied to port. */ 1497 uint64_t page_cnt : 17; /**< The number of page pointers assigned to 1498 the port, that when exceeded will cause 1499 back-pressure to be applied to the port. 1500 This value is in 256 page-pointer increments, 1501 (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */ 1502#else 1503 uint64_t page_cnt : 17; 1504 uint64_t bp_enb : 1; 1505 uint64_t reserved_18_63 : 46; 1506#endif 1507 } s; 1508 struct cvmx_ipd_portx_bp_page_cnt_s cn30xx; 1509 struct cvmx_ipd_portx_bp_page_cnt_s cn31xx; 1510 struct cvmx_ipd_portx_bp_page_cnt_s cn38xx; 1511 struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2; 1512 struct cvmx_ipd_portx_bp_page_cnt_s cn50xx; 1513 struct cvmx_ipd_portx_bp_page_cnt_s cn52xx; 1514 struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1; 1515 struct cvmx_ipd_portx_bp_page_cnt_s cn56xx; 1516 struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1; 1517 struct cvmx_ipd_portx_bp_page_cnt_s cn58xx; 1518 struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1; 1519 struct cvmx_ipd_portx_bp_page_cnt_s cn63xx; 1520 struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1; 1521}; 1522typedef union cvmx_ipd_portx_bp_page_cnt cvmx_ipd_portx_bp_page_cnt_t; 1523 1524/** 1525 * cvmx_ipd_port#_bp_page_cnt2 1526 * 1527 * IPD_PORTX_BP_PAGE_CNT2 = IPD Port Backpressure Page Count 1528 * 1529 * The number of pages in use by the port that when exceeded, backpressure will be applied to the port. 1530 * See also IPD_PORTX_BP_PAGE_CNT 1531 * See also IPD_PORTX_BP_PAGE_CNT3 1532 */ 1533union cvmx_ipd_portx_bp_page_cnt2 1534{ 1535 uint64_t u64; 1536 struct cvmx_ipd_portx_bp_page_cnt2_s 1537 { 1538#if __BYTE_ORDER == __BIG_ENDIAN 1539 uint64_t reserved_18_63 : 46; 1540 uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will 1541 not be applied to port. */ 1542 uint64_t page_cnt : 17; /**< The number of page pointers assigned to 1543 the port, that when exceeded will cause 1544 back-pressure to be applied to the port. 1545 This value is in 256 page-pointer increments, 1546 (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */ 1547#else 1548 uint64_t page_cnt : 17; 1549 uint64_t bp_enb : 1; 1550 uint64_t reserved_18_63 : 46; 1551#endif 1552 } s; 1553 struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx; 1554 struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1; 1555 struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx; 1556 struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1; 1557 struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx; 1558 struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1; 1559}; 1560typedef union cvmx_ipd_portx_bp_page_cnt2 cvmx_ipd_portx_bp_page_cnt2_t; 1561 1562/** 1563 * cvmx_ipd_port#_bp_page_cnt3 1564 * 1565 * IPD_PORTX_BP_PAGE_CNT3 = IPD Port Backpressure Page Count 1566 * 1567 * The number of pages in use by the port that when exceeded, backpressure will be applied to the port. 1568 * See also IPD_PORTX_BP_PAGE_CNT 1569 * See also IPD_PORTX_BP_PAGE_CNT2 1570 */ 1571union cvmx_ipd_portx_bp_page_cnt3 1572{ 1573 uint64_t u64; 1574 struct cvmx_ipd_portx_bp_page_cnt3_s 1575 { 1576#if __BYTE_ORDER == __BIG_ENDIAN 1577 uint64_t reserved_18_63 : 46; 1578 uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will 1579 not be applied to port. */ 1580 uint64_t page_cnt : 17; /**< The number of page pointers assigned to 1581 the port, that when exceeded will cause 1582 back-pressure to be applied to the port. 1583 This value is in 256 page-pointer increments, 1584 (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */ 1585#else 1586 uint64_t page_cnt : 17; 1587 uint64_t bp_enb : 1; 1588 uint64_t reserved_18_63 : 46; 1589#endif 1590 } s; 1591 struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx; 1592 struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1; 1593}; 1594typedef union cvmx_ipd_portx_bp_page_cnt3 cvmx_ipd_portx_bp_page_cnt3_t; 1595 1596/** 1597 * cvmx_ipd_port_bp_counters2_pair# 1598 * 1599 * IPD_PORT_BP_COUNTERS2_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port. 1600 * See also IPD_PORT_BP_COUNTERS_PAIRX 1601 * See also IPD_PORT_BP_COUNTERS3_PAIRX 1602 */ 1603union cvmx_ipd_port_bp_counters2_pairx 1604{ 1605 uint64_t u64; 1606 struct cvmx_ipd_port_bp_counters2_pairx_s 1607 { 1608#if __BYTE_ORDER == __BIG_ENDIAN 1609 uint64_t reserved_25_63 : 39; 1610 uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */ 1611#else 1612 uint64_t cnt_val : 25; 1613 uint64_t reserved_25_63 : 39; 1614#endif 1615 } s; 1616 struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx; 1617 struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1; 1618 struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx; 1619 struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1; 1620 struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx; 1621 struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1; 1622}; 1623typedef union cvmx_ipd_port_bp_counters2_pairx cvmx_ipd_port_bp_counters2_pairx_t; 1624 1625/** 1626 * cvmx_ipd_port_bp_counters3_pair# 1627 * 1628 * IPD_PORT_BP_COUNTERS3_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port. 1629 * See also IPD_PORT_BP_COUNTERS_PAIRX 1630 * See also IPD_PORT_BP_COUNTERS2_PAIRX 1631 */ 1632union cvmx_ipd_port_bp_counters3_pairx 1633{ 1634 uint64_t u64; 1635 struct cvmx_ipd_port_bp_counters3_pairx_s 1636 { 1637#if __BYTE_ORDER == __BIG_ENDIAN 1638 uint64_t reserved_25_63 : 39; 1639 uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */ 1640#else 1641 uint64_t cnt_val : 25; 1642 uint64_t reserved_25_63 : 39; 1643#endif 1644 } s; 1645 struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx; 1646 struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1; 1647}; 1648typedef union cvmx_ipd_port_bp_counters3_pairx cvmx_ipd_port_bp_counters3_pairx_t; 1649 1650/** 1651 * cvmx_ipd_port_bp_counters_pair# 1652 * 1653 * IPD_PORT_BP_COUNTERS_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port. 1654 * See also IPD_PORT_BP_COUNTERS2_PAIRX 1655 * See also IPD_PORT_BP_COUNTERS3_PAIRX 1656 */ 1657union cvmx_ipd_port_bp_counters_pairx 1658{ 1659 uint64_t u64; 1660 struct cvmx_ipd_port_bp_counters_pairx_s 1661 { 1662#if __BYTE_ORDER == __BIG_ENDIAN 1663 uint64_t reserved_25_63 : 39; 1664 uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */ 1665#else 1666 uint64_t cnt_val : 25; 1667 uint64_t reserved_25_63 : 39; 1668#endif 1669 } s; 1670 struct cvmx_ipd_port_bp_counters_pairx_s cn30xx; 1671 struct cvmx_ipd_port_bp_counters_pairx_s cn31xx; 1672 struct cvmx_ipd_port_bp_counters_pairx_s cn38xx; 1673 struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2; 1674 struct cvmx_ipd_port_bp_counters_pairx_s cn50xx; 1675 struct cvmx_ipd_port_bp_counters_pairx_s cn52xx; 1676 struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1; 1677 struct cvmx_ipd_port_bp_counters_pairx_s cn56xx; 1678 struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1; 1679 struct cvmx_ipd_port_bp_counters_pairx_s cn58xx; 1680 struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1; 1681 struct cvmx_ipd_port_bp_counters_pairx_s cn63xx; 1682 struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1; 1683}; 1684typedef union cvmx_ipd_port_bp_counters_pairx cvmx_ipd_port_bp_counters_pairx_t; 1685 1686/** 1687 * cvmx_ipd_port_qos_#_cnt 1688 * 1689 * IPD_PORT_QOS_X_CNT = IPD PortX QOS-0 Count 1690 * 1691 * A counter per port/qos. Counter are originzed in sequence where the first 8 counter (0-7) belong to Port-0 1692 * QOS 0-7 respectively followed by port 1 at (8-15), etc 1693 * Ports 0-3, 32-43 1694 */ 1695union cvmx_ipd_port_qos_x_cnt 1696{ 1697 uint64_t u64; 1698 struct cvmx_ipd_port_qos_x_cnt_s 1699 { 1700#if __BYTE_ORDER == __BIG_ENDIAN 1701 uint64_t wmark : 32; /**< When the field CNT after being modified is equal to 1702 or crosses this value (i.e. value was greater than 1703 then becomes less then, or value was less than and 1704 becomes greater than) the corresponding bit in 1705 IPD_PORT_QOS_INTX is set. */ 1706 uint64_t cnt : 32; /**< The packet related count that is incremented as 1707 specified by IPD_SUB_PORT_QOS_CNT. */ 1708#else 1709 uint64_t cnt : 32; 1710 uint64_t wmark : 32; 1711#endif 1712 } s; 1713 struct cvmx_ipd_port_qos_x_cnt_s cn52xx; 1714 struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1; 1715 struct cvmx_ipd_port_qos_x_cnt_s cn56xx; 1716 struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1; 1717 struct cvmx_ipd_port_qos_x_cnt_s cn63xx; 1718 struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1; 1719}; 1720typedef union cvmx_ipd_port_qos_x_cnt cvmx_ipd_port_qos_x_cnt_t; 1721 1722/** 1723 * cvmx_ipd_port_qos_int# 1724 * 1725 * IPD_PORT_QOS_INTX = IPD PORT-QOS Interrupt 1726 * 1727 * See the description for IPD_PORT_QOS_X_CNT 1728 * 1729 * 0=P0-7; 1=P8-15; 2=P16-23; 3=P24-31; 4=P32-39; 5=P40-47; 6=P48-55; 7=P56-63 1730 * 1731 * Only ports used are: P0-3, P32-39, and P40-43. Therefore only IPD_PORT_QOS_INT0, IPD_PORT_QOS_INT4, 1732 * and IPD_PORT_QOS_INT5 exist and, furthermore: <63:32> of IPD_PORT_QOS_INT0 and IPD_PORT_QOS_INT5, 1733 * are reserved. 1734 */ 1735union cvmx_ipd_port_qos_intx 1736{ 1737 uint64_t u64; 1738 struct cvmx_ipd_port_qos_intx_s 1739 { 1740#if __BYTE_ORDER == __BIG_ENDIAN 1741 uint64_t intr : 64; /**< Interrupt bits. */ 1742#else 1743 uint64_t intr : 64; 1744#endif 1745 } s; 1746 struct cvmx_ipd_port_qos_intx_s cn52xx; 1747 struct cvmx_ipd_port_qos_intx_s cn52xxp1; 1748 struct cvmx_ipd_port_qos_intx_s cn56xx; 1749 struct cvmx_ipd_port_qos_intx_s cn56xxp1; 1750 struct cvmx_ipd_port_qos_intx_s cn63xx; 1751 struct cvmx_ipd_port_qos_intx_s cn63xxp1; 1752}; 1753typedef union cvmx_ipd_port_qos_intx cvmx_ipd_port_qos_intx_t; 1754 1755/** 1756 * cvmx_ipd_port_qos_int_enb# 1757 * 1758 * IPD_PORT_QOS_INT_ENBX = IPD PORT-QOS Interrupt Enable 1759 * 1760 * When the IPD_PORT_QOS_INTX[\#] is '1' and IPD_PORT_QOS_INT_ENBX[\#] is '1' a interrupt will be generated. 1761 */ 1762union cvmx_ipd_port_qos_int_enbx 1763{ 1764 uint64_t u64; 1765 struct cvmx_ipd_port_qos_int_enbx_s 1766 { 1767#if __BYTE_ORDER == __BIG_ENDIAN 1768 uint64_t enb : 64; /**< Enable bits. */ 1769#else 1770 uint64_t enb : 64; 1771#endif 1772 } s; 1773 struct cvmx_ipd_port_qos_int_enbx_s cn52xx; 1774 struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1; 1775 struct cvmx_ipd_port_qos_int_enbx_s cn56xx; 1776 struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1; 1777 struct cvmx_ipd_port_qos_int_enbx_s cn63xx; 1778 struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1; 1779}; 1780typedef union cvmx_ipd_port_qos_int_enbx cvmx_ipd_port_qos_int_enbx_t; 1781 1782/** 1783 * cvmx_ipd_prc_hold_ptr_fifo_ctl 1784 * 1785 * IPD_PRC_HOLD_PTR_FIFO_CTL = IPD's PRC Holding Pointer FIFO Control 1786 * 1787 * Allows reading of the Page-Pointers stored in the IPD's PRC Holding Fifo. 1788 */ 1789union cvmx_ipd_prc_hold_ptr_fifo_ctl 1790{ 1791 uint64_t u64; 1792 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s 1793 { 1794#if __BYTE_ORDER == __BIG_ENDIAN 1795 uint64_t reserved_39_63 : 25; 1796 uint64_t max_pkt : 3; /**< Maximum number of Packet-Pointers that COULD be 1797 in the FIFO. */ 1798 uint64_t praddr : 3; /**< Present Packet-Pointer read address. */ 1799 uint64_t ptr : 29; /**< The output of the prc-holding-fifo. */ 1800 uint64_t cena : 1; /**< Active low Chip Enable that controls the 1801 MUX-select that steers [RADDR] to the fifo. 1802 *WARNING - Setting this field to '0' will allow 1803 reading of the memories thorugh the PTR field, 1804 but will cause unpredictable operation of the IPD 1805 under normal operation. */ 1806 uint64_t raddr : 3; /**< Sets the address to read from in the holding. 1807 fifo in the PRC. This FIFO holds Packet-Pointers 1808 to be used for packet data storage. */ 1809#else 1810 uint64_t raddr : 3; 1811 uint64_t cena : 1; 1812 uint64_t ptr : 29; 1813 uint64_t praddr : 3; 1814 uint64_t max_pkt : 3; 1815 uint64_t reserved_39_63 : 25; 1816#endif 1817 } s; 1818 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx; 1819 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx; 1820 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx; 1821 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx; 1822 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx; 1823 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1; 1824 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx; 1825 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1; 1826 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx; 1827 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1; 1828 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx; 1829 struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1; 1830}; 1831typedef union cvmx_ipd_prc_hold_ptr_fifo_ctl cvmx_ipd_prc_hold_ptr_fifo_ctl_t; 1832 1833/** 1834 * cvmx_ipd_prc_port_ptr_fifo_ctl 1835 * 1836 * IPD_PRC_PORT_PTR_FIFO_CTL = IPD's PRC PORT Pointer FIFO Control 1837 * 1838 * Allows reading of the Page-Pointers stored in the IPD's PRC PORT Fifo. 1839 */ 1840union cvmx_ipd_prc_port_ptr_fifo_ctl 1841{ 1842 uint64_t u64; 1843 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s 1844 { 1845#if __BYTE_ORDER == __BIG_ENDIAN 1846 uint64_t reserved_44_63 : 20; 1847 uint64_t max_pkt : 7; /**< Maximum number of Packet-Pointers that are in 1848 in the FIFO. */ 1849 uint64_t ptr : 29; /**< The output of the prc-port-ptr-fifo. */ 1850 uint64_t cena : 1; /**< Active low Chip Enable to the read port of the 1851 pwp_fifo. This bit also controls the MUX-select 1852 that steers [RADDR] to the pwp_fifo. 1853 *WARNING - Setting this field to '0' will allow 1854 reading of the memories thorugh the PTR field, 1855 but will cause unpredictable operation of the IPD 1856 under normal operation. */ 1857 uint64_t raddr : 7; /**< Sets the address to read from in the port 1858 fifo in the PRC. This FIFO holds Packet-Pointers 1859 to be used for packet data storage. */ 1860#else 1861 uint64_t raddr : 7; 1862 uint64_t cena : 1; 1863 uint64_t ptr : 29; 1864 uint64_t max_pkt : 7; 1865 uint64_t reserved_44_63 : 20; 1866#endif 1867 } s; 1868 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx; 1869 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx; 1870 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx; 1871 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx; 1872 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx; 1873 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1; 1874 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx; 1875 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1; 1876 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx; 1877 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1; 1878 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx; 1879 struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1; 1880}; 1881typedef union cvmx_ipd_prc_port_ptr_fifo_ctl cvmx_ipd_prc_port_ptr_fifo_ctl_t; 1882 1883/** 1884 * cvmx_ipd_ptr_count 1885 * 1886 * IPD_PTR_COUNT = IPD Page Pointer Count 1887 * 1888 * Shows the number of WQE and Packet Page Pointers stored in the IPD. 1889 */ 1890union cvmx_ipd_ptr_count 1891{ 1892 uint64_t u64; 1893 struct cvmx_ipd_ptr_count_s 1894 { 1895#if __BYTE_ORDER == __BIG_ENDIAN 1896 uint64_t reserved_19_63 : 45; 1897 uint64_t pktv_cnt : 1; /**< PKT Ptr Valid. */ 1898 uint64_t wqev_cnt : 1; /**< WQE Ptr Valid. This value is '1' when a WQE 1899 is being for use by the IPD. The value of this 1900 field should be added to tha value of the 1901 WQE_PCNT field, of this register, for a total 1902 count of the WQE Page Pointers being held by IPD. 1903 When IPD_CTL_STATUS[NO_WPTR] is set '1' this field 1904 represents a Packet-Pointer NOT a WQE pointer. */ 1905 uint64_t pfif_cnt : 3; /**< See PKT_PCNT. */ 1906 uint64_t pkt_pcnt : 7; /**< This value plus PFIF_CNT plus 1907 IPD_PRC_PORT_PTR_FIFO_CTL[MAX_PKT] is the number 1908 of PKT Page Pointers in IPD. */ 1909 uint64_t wqe_pcnt : 7; /**< Number of page pointers for WQE storage that are 1910 buffered in the IPD. The total count is the value 1911 of this buffer plus the field [WQEV_CNT]. For 1912 PASS-1 (which does not have the WQEV_CNT field) 1913 when the value of this register is '0' there still 1914 may be 1 pointer being held by IPD. */ 1915#else 1916 uint64_t wqe_pcnt : 7; 1917 uint64_t pkt_pcnt : 7; 1918 uint64_t pfif_cnt : 3; 1919 uint64_t wqev_cnt : 1; 1920 uint64_t pktv_cnt : 1; 1921 uint64_t reserved_19_63 : 45; 1922#endif 1923 } s; 1924 struct cvmx_ipd_ptr_count_s cn30xx; 1925 struct cvmx_ipd_ptr_count_s cn31xx; 1926 struct cvmx_ipd_ptr_count_s cn38xx; 1927 struct cvmx_ipd_ptr_count_s cn38xxp2; 1928 struct cvmx_ipd_ptr_count_s cn50xx; 1929 struct cvmx_ipd_ptr_count_s cn52xx; 1930 struct cvmx_ipd_ptr_count_s cn52xxp1; 1931 struct cvmx_ipd_ptr_count_s cn56xx; 1932 struct cvmx_ipd_ptr_count_s cn56xxp1; 1933 struct cvmx_ipd_ptr_count_s cn58xx; 1934 struct cvmx_ipd_ptr_count_s cn58xxp1; 1935 struct cvmx_ipd_ptr_count_s cn63xx; 1936 struct cvmx_ipd_ptr_count_s cn63xxp1; 1937}; 1938typedef union cvmx_ipd_ptr_count cvmx_ipd_ptr_count_t; 1939 1940/** 1941 * cvmx_ipd_pwp_ptr_fifo_ctl 1942 * 1943 * IPD_PWP_PTR_FIFO_CTL = IPD's PWP Pointer FIFO Control 1944 * 1945 * Allows reading of the Page-Pointers stored in the IPD's PWP Fifo. 1946 */ 1947union cvmx_ipd_pwp_ptr_fifo_ctl 1948{ 1949 uint64_t u64; 1950 struct cvmx_ipd_pwp_ptr_fifo_ctl_s 1951 { 1952#if __BYTE_ORDER == __BIG_ENDIAN 1953 uint64_t reserved_61_63 : 3; 1954 uint64_t max_cnts : 7; /**< Maximum number of Packet-Pointers or WQE-Pointers 1955 that COULD be in the FIFO. 1956 When IPD_CTL_STATUS[NO_WPTR] is set '1' this field 1957 only represents the Max number of Packet-Pointers, 1958 WQE-Pointers are not used in this mode. */ 1959 uint64_t wraddr : 8; /**< Present FIFO WQE Read address. */ 1960 uint64_t praddr : 8; /**< Present FIFO Packet Read address. */ 1961 uint64_t ptr : 29; /**< The output of the pwp_fifo. */ 1962 uint64_t cena : 1; /**< Active low Chip Enable to the read port of the 1963 pwp_fifo. This bit also controls the MUX-select 1964 that steers [RADDR] to the pwp_fifo. 1965 *WARNING - Setting this field to '0' will allow 1966 reading of the memories thorugh the PTR field, 1967 but will cause unpredictable operation of the IPD 1968 under normal operation. */ 1969 uint64_t raddr : 8; /**< Sets the address to read from in the pwp_fifo. 1970 Addresses 0 through 63 contain Packet-Pointers and 1971 addresses 64 through 127 contain WQE-Pointers. 1972 When IPD_CTL_STATUS[NO_WPTR] is set '1' addresses 1973 64 through 127 are not valid. */ 1974#else 1975 uint64_t raddr : 8; 1976 uint64_t cena : 1; 1977 uint64_t ptr : 29; 1978 uint64_t praddr : 8; 1979 uint64_t wraddr : 8; 1980 uint64_t max_cnts : 7; 1981 uint64_t reserved_61_63 : 3; 1982#endif 1983 } s; 1984 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx; 1985 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx; 1986 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx; 1987 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx; 1988 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx; 1989 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1; 1990 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx; 1991 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1; 1992 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx; 1993 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1; 1994 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx; 1995 struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1; 1996}; 1997typedef union cvmx_ipd_pwp_ptr_fifo_ctl cvmx_ipd_pwp_ptr_fifo_ctl_t; 1998 1999/** 2000 * cvmx_ipd_qos#_red_marks 2001 * 2002 * IPD_QOS0_RED_MARKS = IPD QOS 0 Marks Red High Low 2003 * 2004 * Set the pass-drop marks for qos level. 2005 */ 2006union cvmx_ipd_qosx_red_marks 2007{ 2008 uint64_t u64; 2009 struct cvmx_ipd_qosx_red_marks_s 2010 { 2011#if __BYTE_ORDER == __BIG_ENDIAN 2012 uint64_t drop : 32; /**< Packets will be dropped when the average value of 2013 IPD_QUE0_FREE_PAGE_CNT is equal to or less than 2014 this value. */ 2015 uint64_t pass : 32; /**< Packets will be passed when the average value of 2016 IPD_QUE0_FREE_PAGE_CNT is larger than this value. */ 2017#else 2018 uint64_t pass : 32; 2019 uint64_t drop : 32; 2020#endif 2021 } s; 2022 struct cvmx_ipd_qosx_red_marks_s cn30xx; 2023 struct cvmx_ipd_qosx_red_marks_s cn31xx; 2024 struct cvmx_ipd_qosx_red_marks_s cn38xx; 2025 struct cvmx_ipd_qosx_red_marks_s cn38xxp2; 2026 struct cvmx_ipd_qosx_red_marks_s cn50xx; 2027 struct cvmx_ipd_qosx_red_marks_s cn52xx; 2028 struct cvmx_ipd_qosx_red_marks_s cn52xxp1; 2029 struct cvmx_ipd_qosx_red_marks_s cn56xx; 2030 struct cvmx_ipd_qosx_red_marks_s cn56xxp1; 2031 struct cvmx_ipd_qosx_red_marks_s cn58xx; 2032 struct cvmx_ipd_qosx_red_marks_s cn58xxp1; 2033 struct cvmx_ipd_qosx_red_marks_s cn63xx; 2034 struct cvmx_ipd_qosx_red_marks_s cn63xxp1; 2035}; 2036typedef union cvmx_ipd_qosx_red_marks cvmx_ipd_qosx_red_marks_t; 2037 2038/** 2039 * cvmx_ipd_que0_free_page_cnt 2040 * 2041 * IPD_QUE0_FREE_PAGE_CNT = IPD Queue0 Free Page Count 2042 * 2043 * Number of Free-Page Pointer that are available for use in the FPA for Queue-0. 2044 */ 2045union cvmx_ipd_que0_free_page_cnt 2046{ 2047 uint64_t u64; 2048 struct cvmx_ipd_que0_free_page_cnt_s 2049 { 2050#if __BYTE_ORDER == __BIG_ENDIAN 2051 uint64_t reserved_32_63 : 32; 2052 uint64_t q0_pcnt : 32; /**< Number of Queue-0 Page Pointers Available. */ 2053#else 2054 uint64_t q0_pcnt : 32; 2055 uint64_t reserved_32_63 : 32; 2056#endif 2057 } s; 2058 struct cvmx_ipd_que0_free_page_cnt_s cn30xx; 2059 struct cvmx_ipd_que0_free_page_cnt_s cn31xx; 2060 struct cvmx_ipd_que0_free_page_cnt_s cn38xx; 2061 struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2; 2062 struct cvmx_ipd_que0_free_page_cnt_s cn50xx; 2063 struct cvmx_ipd_que0_free_page_cnt_s cn52xx; 2064 struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1; 2065 struct cvmx_ipd_que0_free_page_cnt_s cn56xx; 2066 struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1; 2067 struct cvmx_ipd_que0_free_page_cnt_s cn58xx; 2068 struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1; 2069 struct cvmx_ipd_que0_free_page_cnt_s cn63xx; 2070 struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1; 2071}; 2072typedef union cvmx_ipd_que0_free_page_cnt cvmx_ipd_que0_free_page_cnt_t; 2073 2074/** 2075 * cvmx_ipd_red_port_enable 2076 * 2077 * IPD_RED_PORT_ENABLE = IPD RED Port Enable 2078 * 2079 * Set the pass-drop marks for qos level. 2080 */ 2081union cvmx_ipd_red_port_enable 2082{ 2083 uint64_t u64; 2084 struct cvmx_ipd_red_port_enable_s 2085 { 2086#if __BYTE_ORDER == __BIG_ENDIAN 2087 uint64_t prb_dly : 14; /**< Number (core clocks periods + 68) * 8 to wait 2088 before calculating the new packet drop 2089 probability for each QOS level. */ 2090 uint64_t avg_dly : 14; /**< Number (core clocks periods + 10) * 8 to wait 2091 before calculating the moving average for each 2092 QOS level. 2093 Larger AVG_DLY values cause the moving averages 2094 of ALL QOS levels to track changes in the actual 2095 free space more slowly. Smaller NEW_CON (and 2096 larger AVG_CON) values can have a similar effect, 2097 but only affect an individual QOS level, rather 2098 than all. */ 2099 uint64_t prt_enb : 36; /**< The bit position will enable the corresponding 2100 Ports ability to have packets dropped by RED 2101 probability. */ 2102#else 2103 uint64_t prt_enb : 36; 2104 uint64_t avg_dly : 14; 2105 uint64_t prb_dly : 14; 2106#endif 2107 } s; 2108 struct cvmx_ipd_red_port_enable_s cn30xx; 2109 struct cvmx_ipd_red_port_enable_s cn31xx; 2110 struct cvmx_ipd_red_port_enable_s cn38xx; 2111 struct cvmx_ipd_red_port_enable_s cn38xxp2; 2112 struct cvmx_ipd_red_port_enable_s cn50xx; 2113 struct cvmx_ipd_red_port_enable_s cn52xx; 2114 struct cvmx_ipd_red_port_enable_s cn52xxp1; 2115 struct cvmx_ipd_red_port_enable_s cn56xx; 2116 struct cvmx_ipd_red_port_enable_s cn56xxp1; 2117 struct cvmx_ipd_red_port_enable_s cn58xx; 2118 struct cvmx_ipd_red_port_enable_s cn58xxp1; 2119 struct cvmx_ipd_red_port_enable_s cn63xx; 2120 struct cvmx_ipd_red_port_enable_s cn63xxp1; 2121}; 2122typedef union cvmx_ipd_red_port_enable cvmx_ipd_red_port_enable_t; 2123 2124/** 2125 * cvmx_ipd_red_port_enable2 2126 * 2127 * IPD_RED_PORT_ENABLE2 = IPD RED Port Enable2 2128 * 2129 * Set the pass-drop marks for qos level. 2130 */ 2131union cvmx_ipd_red_port_enable2 2132{ 2133 uint64_t u64; 2134 struct cvmx_ipd_red_port_enable2_s 2135 { 2136#if __BYTE_ORDER == __BIG_ENDIAN 2137 uint64_t reserved_8_63 : 56; 2138 uint64_t prt_enb : 8; /**< Bits 7-0 corresponds to ports 43-36. These bits 2139 have the same meaning as the PRT_ENB field of 2140 IPD_RED_PORT_ENABLE. */ 2141#else 2142 uint64_t prt_enb : 8; 2143 uint64_t reserved_8_63 : 56; 2144#endif 2145 } s; 2146 struct cvmx_ipd_red_port_enable2_cn52xx 2147 { 2148#if __BYTE_ORDER == __BIG_ENDIAN 2149 uint64_t reserved_4_63 : 60; 2150 uint64_t prt_enb : 4; /**< Bits 3-0 cooresponds to ports 39-36. These bits 2151 have the same meaning as the PRT_ENB field of 2152 IPD_RED_PORT_ENABLE. */ 2153#else 2154 uint64_t prt_enb : 4; 2155 uint64_t reserved_4_63 : 60; 2156#endif 2157 } cn52xx; 2158 struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1; 2159 struct cvmx_ipd_red_port_enable2_cn52xx cn56xx; 2160 struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1; 2161 struct cvmx_ipd_red_port_enable2_s cn63xx; 2162 struct cvmx_ipd_red_port_enable2_s cn63xxp1; 2163}; 2164typedef union cvmx_ipd_red_port_enable2 cvmx_ipd_red_port_enable2_t; 2165 2166/** 2167 * cvmx_ipd_red_que#_param 2168 * 2169 * IPD_RED_QUE0_PARAM = IPD RED Queue-0 Parameters 2170 * 2171 * Value control the Passing and Dropping of packets by the red engine for QOS Level-0. 2172 */ 2173union cvmx_ipd_red_quex_param 2174{ 2175 uint64_t u64; 2176 struct cvmx_ipd_red_quex_param_s 2177 { 2178#if __BYTE_ORDER == __BIG_ENDIAN 2179 uint64_t reserved_49_63 : 15; 2180 uint64_t use_pcnt : 1; /**< When set '1' red will use the actual Packet-Page 2181 Count in place of the Average for RED calculations. */ 2182 uint64_t new_con : 8; /**< This value is used control how much of the present 2183 Actual Queue Size is used to calculate the new 2184 Average Queue Size. The value is a number from 0 2185 256, which represents NEW_CON/256 of the Actual 2186 Queue Size that will be used in the calculation. 2187 The number in this field plus the value of 2188 AVG_CON must be equal to 256. 2189 Larger AVG_DLY values cause the moving averages 2190 of ALL QOS levels to track changes in the actual 2191 free space more slowly. Smaller NEW_CON (and 2192 larger AVG_CON) values can have a similar effect, 2193 but only affect an individual QOS level, rather 2194 than all. */ 2195 uint64_t avg_con : 8; /**< This value is used control how much of the present 2196 Average Queue Size is used to calculate the new 2197 Average Queue Size. The value is a number from 0 2198 256, which represents AVG_CON/256 of the Average 2199 Queue Size that will be used in the calculation. 2200 The number in this field plus the value of 2201 NEW_CON must be equal to 256. 2202 Larger AVG_DLY values cause the moving averages 2203 of ALL QOS levels to track changes in the actual 2204 free space more slowly. Smaller NEW_CON (and 2205 larger AVG_CON) values can have a similar effect, 2206 but only affect an individual QOS level, rather 2207 than all. */ 2208 uint64_t prb_con : 32; /**< Used in computing the probability of a packet being 2209 passed or drop by the WRED engine. The field is 2210 calculated to be (255 * 2^24)/(PASS-DROP). Where 2211 PASS and DROP are the field from the 2212 IPD_QOS0_RED_MARKS CSR. */ 2213#else 2214 uint64_t prb_con : 32; 2215 uint64_t avg_con : 8; 2216 uint64_t new_con : 8; 2217 uint64_t use_pcnt : 1; 2218 uint64_t reserved_49_63 : 15; 2219#endif 2220 } s; 2221 struct cvmx_ipd_red_quex_param_s cn30xx; 2222 struct cvmx_ipd_red_quex_param_s cn31xx; 2223 struct cvmx_ipd_red_quex_param_s cn38xx; 2224 struct cvmx_ipd_red_quex_param_s cn38xxp2; 2225 struct cvmx_ipd_red_quex_param_s cn50xx; 2226 struct cvmx_ipd_red_quex_param_s cn52xx; 2227 struct cvmx_ipd_red_quex_param_s cn52xxp1; 2228 struct cvmx_ipd_red_quex_param_s cn56xx; 2229 struct cvmx_ipd_red_quex_param_s cn56xxp1; 2230 struct cvmx_ipd_red_quex_param_s cn58xx; 2231 struct cvmx_ipd_red_quex_param_s cn58xxp1; 2232 struct cvmx_ipd_red_quex_param_s cn63xx; 2233 struct cvmx_ipd_red_quex_param_s cn63xxp1; 2234}; 2235typedef union cvmx_ipd_red_quex_param cvmx_ipd_red_quex_param_t; 2236 2237/** 2238 * cvmx_ipd_sub_port_bp_page_cnt 2239 * 2240 * IPD_SUB_PORT_BP_PAGE_CNT = IPD Subtract Port Backpressure Page Count 2241 * 2242 * Will add the value to the indicated port count register, the number of pages supplied. The value added should 2243 * be the 2's complement of the value that needs to be subtracted. Users add 2's complement values to the 2244 * port-mbuf-count register to return (lower the count) mbufs to the counter in order to avoid port-level 2245 * backpressure being applied to the port. Backpressure is applied when the MBUF used count of a port exceeds the 2246 * value in the IPD_PORTX_BP_PAGE_CNT, IPD_PORTX_BP_PAGE_CNT2, and IPD_PORTX_BP_PAGE_CNT3. 2247 * 2248 * This register can't be written from the PCI via a window write. 2249 */ 2250union cvmx_ipd_sub_port_bp_page_cnt 2251{ 2252 uint64_t u64; 2253 struct cvmx_ipd_sub_port_bp_page_cnt_s 2254 { 2255#if __BYTE_ORDER == __BIG_ENDIAN 2256 uint64_t reserved_31_63 : 33; 2257 uint64_t port : 6; /**< The port to add the PAGE_CNT field to. */ 2258 uint64_t page_cnt : 25; /**< The number of page pointers to add to 2259 the port counter pointed to by the 2260 PORT Field. */ 2261#else 2262 uint64_t page_cnt : 25; 2263 uint64_t port : 6; 2264 uint64_t reserved_31_63 : 33; 2265#endif 2266 } s; 2267 struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx; 2268 struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx; 2269 struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx; 2270 struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2; 2271 struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx; 2272 struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx; 2273 struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1; 2274 struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx; 2275 struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1; 2276 struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx; 2277 struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1; 2278 struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx; 2279 struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1; 2280}; 2281typedef union cvmx_ipd_sub_port_bp_page_cnt cvmx_ipd_sub_port_bp_page_cnt_t; 2282 2283/** 2284 * cvmx_ipd_sub_port_fcs 2285 * 2286 * IPD_SUB_PORT_FCS = IPD Subtract Ports FCS Register 2287 * 2288 * When set '1' the port corresponding to the bit set will subtract 4 bytes from the end of 2289 * the packet. 2290 */ 2291union cvmx_ipd_sub_port_fcs 2292{ 2293 uint64_t u64; 2294 struct cvmx_ipd_sub_port_fcs_s 2295 { 2296#if __BYTE_ORDER == __BIG_ENDIAN 2297 uint64_t reserved_40_63 : 24; 2298 uint64_t port_bit2 : 4; /**< When set '1', the port corresponding to the bit 2299 position set, will subtract the FCS for packets 2300 on that port. */ 2301 uint64_t reserved_32_35 : 4; 2302 uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit 2303 position set, will subtract the FCS for packets 2304 on that port. */ 2305#else 2306 uint64_t port_bit : 32; 2307 uint64_t reserved_32_35 : 4; 2308 uint64_t port_bit2 : 4; 2309 uint64_t reserved_40_63 : 24; 2310#endif 2311 } s; 2312 struct cvmx_ipd_sub_port_fcs_cn30xx 2313 { 2314#if __BYTE_ORDER == __BIG_ENDIAN 2315 uint64_t reserved_3_63 : 61; 2316 uint64_t port_bit : 3; /**< When set '1', the port corresponding to the bit 2317 position set, will subtract the FCS for packets 2318 on that port. */ 2319#else 2320 uint64_t port_bit : 3; 2321 uint64_t reserved_3_63 : 61; 2322#endif 2323 } cn30xx; 2324 struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx; 2325 struct cvmx_ipd_sub_port_fcs_cn38xx 2326 { 2327#if __BYTE_ORDER == __BIG_ENDIAN 2328 uint64_t reserved_32_63 : 32; 2329 uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit 2330 position set, will subtract the FCS for packets 2331 on that port. */ 2332#else 2333 uint64_t port_bit : 32; 2334 uint64_t reserved_32_63 : 32; 2335#endif 2336 } cn38xx; 2337 struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2; 2338 struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx; 2339 struct cvmx_ipd_sub_port_fcs_s cn52xx; 2340 struct cvmx_ipd_sub_port_fcs_s cn52xxp1; 2341 struct cvmx_ipd_sub_port_fcs_s cn56xx; 2342 struct cvmx_ipd_sub_port_fcs_s cn56xxp1; 2343 struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx; 2344 struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1; 2345 struct cvmx_ipd_sub_port_fcs_s cn63xx; 2346 struct cvmx_ipd_sub_port_fcs_s cn63xxp1; 2347}; 2348typedef union cvmx_ipd_sub_port_fcs cvmx_ipd_sub_port_fcs_t; 2349 2350/** 2351 * cvmx_ipd_sub_port_qos_cnt 2352 * 2353 * IPD_SUB_PORT_QOS_CNT = IPD Subtract Port QOS Count 2354 * 2355 * Will add the value (CNT) to the indicated Port-QOS register (PORT_QOS). The value added must be 2356 * be the 2's complement of the value that needs to be subtracted. 2357 */ 2358union cvmx_ipd_sub_port_qos_cnt 2359{ 2360 uint64_t u64; 2361 struct cvmx_ipd_sub_port_qos_cnt_s 2362 { 2363#if __BYTE_ORDER == __BIG_ENDIAN 2364 uint64_t reserved_41_63 : 23; 2365 uint64_t port_qos : 9; /**< The port to add the CNT field to. */ 2366 uint64_t cnt : 32; /**< The value to be added to the register selected 2367 in the PORT_QOS field. */ 2368#else 2369 uint64_t cnt : 32; 2370 uint64_t port_qos : 9; 2371 uint64_t reserved_41_63 : 23; 2372#endif 2373 } s; 2374 struct cvmx_ipd_sub_port_qos_cnt_s cn52xx; 2375 struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1; 2376 struct cvmx_ipd_sub_port_qos_cnt_s cn56xx; 2377 struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1; 2378 struct cvmx_ipd_sub_port_qos_cnt_s cn63xx; 2379 struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1; 2380}; 2381typedef union cvmx_ipd_sub_port_qos_cnt cvmx_ipd_sub_port_qos_cnt_t; 2382 2383/** 2384 * cvmx_ipd_wqe_fpa_queue 2385 * 2386 * IPD_WQE_FPA_QUEUE = IPD Work-Queue-Entry FPA Page Size 2387 * 2388 * Which FPA Queue (0-7) to fetch page-pointers from for WQE's 2389 */ 2390union cvmx_ipd_wqe_fpa_queue 2391{ 2392 uint64_t u64; 2393 struct cvmx_ipd_wqe_fpa_queue_s 2394 { 2395#if __BYTE_ORDER == __BIG_ENDIAN 2396 uint64_t reserved_3_63 : 61; 2397 uint64_t wqe_pool : 3; /**< Which FPA Queue to fetch page-pointers 2398 from for WQE's. 2399 Not used when IPD_CTL_STATUS[NO_WPTR] is set. */ 2400#else 2401 uint64_t wqe_pool : 3; 2402 uint64_t reserved_3_63 : 61; 2403#endif 2404 } s; 2405 struct cvmx_ipd_wqe_fpa_queue_s cn30xx; 2406 struct cvmx_ipd_wqe_fpa_queue_s cn31xx; 2407 struct cvmx_ipd_wqe_fpa_queue_s cn38xx; 2408 struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2; 2409 struct cvmx_ipd_wqe_fpa_queue_s cn50xx; 2410 struct cvmx_ipd_wqe_fpa_queue_s cn52xx; 2411 struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1; 2412 struct cvmx_ipd_wqe_fpa_queue_s cn56xx; 2413 struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1; 2414 struct cvmx_ipd_wqe_fpa_queue_s cn58xx; 2415 struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1; 2416 struct cvmx_ipd_wqe_fpa_queue_s cn63xx; 2417 struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1; 2418}; 2419typedef union cvmx_ipd_wqe_fpa_queue cvmx_ipd_wqe_fpa_queue_t; 2420 2421/** 2422 * cvmx_ipd_wqe_ptr_valid 2423 * 2424 * IPD_WQE_PTR_VALID = IPD's WQE Pointer Valid 2425 * 2426 * The value of the WQE-pointer fetched and in the valid register. 2427 */ 2428union cvmx_ipd_wqe_ptr_valid 2429{ 2430 uint64_t u64; 2431 struct cvmx_ipd_wqe_ptr_valid_s 2432 { 2433#if __BYTE_ORDER == __BIG_ENDIAN 2434 uint64_t reserved_29_63 : 35; 2435 uint64_t ptr : 29; /**< Pointer value. 2436 When IPD_CTL_STATUS[NO_WPTR] is set '1' this field 2437 represents a Packet-Pointer NOT a WQE pointer. */ 2438#else 2439 uint64_t ptr : 29; 2440 uint64_t reserved_29_63 : 35; 2441#endif 2442 } s; 2443 struct cvmx_ipd_wqe_ptr_valid_s cn30xx; 2444 struct cvmx_ipd_wqe_ptr_valid_s cn31xx; 2445 struct cvmx_ipd_wqe_ptr_valid_s cn38xx; 2446 struct cvmx_ipd_wqe_ptr_valid_s cn50xx; 2447 struct cvmx_ipd_wqe_ptr_valid_s cn52xx; 2448 struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1; 2449 struct cvmx_ipd_wqe_ptr_valid_s cn56xx; 2450 struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1; 2451 struct cvmx_ipd_wqe_ptr_valid_s cn58xx; 2452 struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1; 2453 struct cvmx_ipd_wqe_ptr_valid_s cn63xx; 2454 struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1; 2455}; 2456typedef union cvmx_ipd_wqe_ptr_valid cvmx_ipd_wqe_ptr_valid_t; 2457 2458#endif 2459