1/***********************license start*************** 2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-agl-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon agl. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_AGL_DEFS_H__ 53#define __CVMX_AGL_DEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56#define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC() 57static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void) 58{ 59 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 60 cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n"); 61 return CVMX_ADD_IO_SEG(0x00011800E0000518ull); 62} 63#else 64#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull)) 65#endif 66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 67#define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC() 68static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void) 69{ 70 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 71 cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n"); 72 return CVMX_ADD_IO_SEG(0x00011800E0000400ull); 73} 74#else 75#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull)) 76#endif 77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 78#define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC() 79static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void) 80{ 81 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 82 cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n"); 83 return CVMX_ADD_IO_SEG(0x00011800E00007F0ull); 84} 85#else 86#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull)) 87#endif 88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 89#define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC() 90static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void) 91{ 92 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 93 cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n"); 94 return CVMX_ADD_IO_SEG(0x00011800E00007F8ull); 95} 96#else 97#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull)) 98#endif 99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 100static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset) 101{ 102 if (!( 103 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 104 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 105 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 106 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 107 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 108 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 109 cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset); 110 return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048; 111} 112#else 113#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048) 114#endif 115#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 116static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset) 117{ 118 if (!( 119 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 120 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 121 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 122 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 123 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 124 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 125 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset); 126 return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048; 127} 128#else 129#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048) 130#endif 131#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 132static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset) 133{ 134 if (!( 135 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 136 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 137 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 138 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 139 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 140 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 141 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset); 142 return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048; 143} 144#else 145#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048) 146#endif 147#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 148static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset) 149{ 150 if (!( 151 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 152 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 153 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 154 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 155 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 156 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 157 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset); 158 return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048; 159} 160#else 161#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048) 162#endif 163#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 164static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset) 165{ 166 if (!( 167 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 168 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 169 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 170 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 171 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 172 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 173 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset); 174 return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048; 175} 176#else 177#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048) 178#endif 179#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 180static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset) 181{ 182 if (!( 183 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 184 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 185 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 186 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 187 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 188 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 189 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset); 190 return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048; 191} 192#else 193#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048) 194#endif 195#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 196static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset) 197{ 198 if (!( 199 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 200 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 201 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 202 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 203 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 204 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 205 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset); 206 return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048; 207} 208#else 209#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048) 210#endif 211#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 212static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset) 213{ 214 if (!( 215 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 216 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 217 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 218 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 219 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 220 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 221 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset); 222 return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048; 223} 224#else 225#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048) 226#endif 227#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 228static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset) 229{ 230 if (!( 231 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 232 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 233 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 234 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 235 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 236 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 237 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset); 238 return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048; 239} 240#else 241#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048) 242#endif 243#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 244static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset) 245{ 246 if (!( 247 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 248 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 249 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 250 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 251 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 252 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 253 cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset); 254 return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048; 255} 256#else 257#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048) 258#endif 259#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 260static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset) 261{ 262 if (!( 263 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 264 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 265 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 266 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 267 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 268 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 269 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset); 270 return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048; 271} 272#else 273#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048) 274#endif 275#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 276static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset) 277{ 278 if (!( 279 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 280 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 281 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 282 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 283 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 284 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 285 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset); 286 return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048; 287} 288#else 289#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048) 290#endif 291#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 292static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset) 293{ 294 if (!( 295 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 296 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 297 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 298 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 299 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 300 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 301 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset); 302 return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048; 303} 304#else 305#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048) 306#endif 307#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 308static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset) 309{ 310 if (!( 311 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 312 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 313 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 314 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 315 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 316 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 317 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset); 318 return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048; 319} 320#else 321#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048) 322#endif 323#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 324static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset) 325{ 326 if (!( 327 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 328 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 329 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 330 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 331 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 332 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 333 cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset); 334 return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048; 335} 336#else 337#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048) 338#endif 339#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 340static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset) 341{ 342 if (!( 343 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 344 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 345 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 346 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 347 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 348 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 349 cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset); 350 return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048; 351} 352#else 353#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048) 354#endif 355#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 356static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset) 357{ 358 if (!( 359 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 360 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 361 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 362 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 363 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 364 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 365 cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset); 366 return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048; 367} 368#else 369#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048) 370#endif 371#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 372static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset) 373{ 374 if (!( 375 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 376 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 377 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 378 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 379 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 380 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 381 cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset); 382 return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048; 383} 384#else 385#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048) 386#endif 387#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 388static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset) 389{ 390 if (!( 391 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 392 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 393 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 394 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 395 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 396 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 397 cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset); 398 return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048; 399} 400#else 401#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048) 402#endif 403#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 404static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset) 405{ 406 if (!( 407 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 408 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 409 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 410 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 411 cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset); 412 return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048; 413} 414#else 415#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048) 416#endif 417#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 418static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset) 419{ 420 if (!( 421 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 422 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 423 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 424 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 425 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 426 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 427 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset); 428 return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048; 429} 430#else 431#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048) 432#endif 433#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 434static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset) 435{ 436 if (!( 437 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 438 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 439 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 440 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 441 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 442 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 443 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset); 444 return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048; 445} 446#else 447#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048) 448#endif 449#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 450static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset) 451{ 452 if (!( 453 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 454 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 455 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 456 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 457 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 458 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 459 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset); 460 return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048; 461} 462#else 463#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048) 464#endif 465#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 466static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset) 467{ 468 if (!( 469 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 470 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 471 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 472 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 473 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 474 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 475 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset); 476 return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048; 477} 478#else 479#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048) 480#endif 481#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 482static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset) 483{ 484 if (!( 485 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 486 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 487 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 488 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 489 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 490 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 491 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset); 492 return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048; 493} 494#else 495#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048) 496#endif 497#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 498static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset) 499{ 500 if (!( 501 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 502 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 503 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 504 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 505 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 506 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 507 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset); 508 return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048; 509} 510#else 511#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048) 512#endif 513#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 514static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset) 515{ 516 if (!( 517 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 518 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 519 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 520 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 521 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 522 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 523 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset); 524 return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048; 525} 526#else 527#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048) 528#endif 529#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 530static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset) 531{ 532 if (!( 533 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 534 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 535 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 536 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 537 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 538 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 539 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset); 540 return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048; 541} 542#else 543#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048) 544#endif 545#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 546static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset) 547{ 548 if (!( 549 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 550 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 551 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 552 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 553 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 554 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 555 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset); 556 return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048; 557} 558#else 559#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048) 560#endif 561#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 562static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset) 563{ 564 if (!( 565 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 566 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 567 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 568 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 569 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 570 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 571 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset); 572 return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048; 573} 574#else 575#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048) 576#endif 577#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 578static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset) 579{ 580 if (!( 581 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 582 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 583 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 584 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 585 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 586 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 587 cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset); 588 return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048; 589} 590#else 591#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048) 592#endif 593#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 594static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset) 595{ 596 if (!( 597 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 598 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 599 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 600 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 601 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 602 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 603 cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset); 604 return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8; 605} 606#else 607#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8) 608#endif 609#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 610static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset) 611{ 612 if (!( 613 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 614 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 615 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 616 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 617 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 618 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 619 cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset); 620 return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8; 621} 622#else 623#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8) 624#endif 625#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 626static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset) 627{ 628 if (!( 629 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 630 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 631 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 632 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 633 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 634 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 635 cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset); 636 return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8; 637} 638#else 639#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8) 640#endif 641#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 642#define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC() 643static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void) 644{ 645 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 646 cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n"); 647 return CVMX_ADD_IO_SEG(0x00011800E00004E8ull); 648} 649#else 650#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull)) 651#endif 652#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 653#define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC() 654static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void) 655{ 656 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 657 cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n"); 658 return CVMX_ADD_IO_SEG(0x00011800E00007E8ull); 659} 660#else 661#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull)) 662#endif 663#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 664static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset) 665{ 666 if (!( 667 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 668 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 669 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 670 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 671 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 672 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 673 cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset); 674 return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048; 675} 676#else 677#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048) 678#endif 679#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 680#define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC() 681static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void) 682{ 683 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 684 cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n"); 685 return CVMX_ADD_IO_SEG(0x00011800E0000520ull); 686} 687#else 688#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull)) 689#endif 690#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 691static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset) 692{ 693 if (!( 694 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 695 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 696 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 697 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 698 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 699 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 700 cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset); 701 return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048; 702} 703#else 704#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048) 705#endif 706#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 707static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset) 708{ 709 if (!( 710 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 711 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 712 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 713 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 714 cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset); 715 return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048; 716} 717#else 718#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048) 719#endif 720#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 721static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset) 722{ 723 if (!( 724 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 725 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 726 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 727 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 728 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 729 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 730 cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset); 731 return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048; 732} 733#else 734#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048) 735#endif 736#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 737static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset) 738{ 739 if (!( 740 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 741 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 742 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 743 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 744 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 745 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 746 cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset); 747 return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048; 748} 749#else 750#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048) 751#endif 752#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 753static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset) 754{ 755 if (!( 756 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 757 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 758 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 759 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 760 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 761 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 762 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset); 763 return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048; 764} 765#else 766#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048) 767#endif 768#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 769static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset) 770{ 771 if (!( 772 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 773 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 774 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 775 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 776 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 777 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 778 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset); 779 return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048; 780} 781#else 782#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048) 783#endif 784#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 785static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset) 786{ 787 if (!( 788 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 789 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 790 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 791 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 792 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 793 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 794 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset); 795 return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048; 796} 797#else 798#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048) 799#endif 800#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 801static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset) 802{ 803 if (!( 804 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 805 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 806 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 807 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 808 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 809 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 810 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset); 811 return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048; 812} 813#else 814#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048) 815#endif 816#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 817static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset) 818{ 819 if (!( 820 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 821 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 822 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 823 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 824 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 825 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 826 cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset); 827 return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048; 828} 829#else 830#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048) 831#endif 832#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 833static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset) 834{ 835 if (!( 836 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 837 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 838 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 839 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 840 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 841 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 842 cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset); 843 return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048; 844} 845#else 846#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048) 847#endif 848#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 849static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset) 850{ 851 if (!( 852 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 853 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 854 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 855 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 856 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 857 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 858 cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset); 859 return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048; 860} 861#else 862#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048) 863#endif 864#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 865static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset) 866{ 867 if (!( 868 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 869 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 870 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 871 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 872 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 873 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 874 cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset); 875 return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048; 876} 877#else 878#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048) 879#endif 880#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 881static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset) 882{ 883 if (!( 884 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 885 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 886 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 887 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 888 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 889 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 890 cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset); 891 return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048; 892} 893#else 894#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048) 895#endif 896#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 897static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset) 898{ 899 if (!( 900 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 901 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 902 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 903 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 904 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 905 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 906 cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset); 907 return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048; 908} 909#else 910#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048) 911#endif 912#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 913static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset) 914{ 915 if (!( 916 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 917 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 918 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 919 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 920 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 921 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 922 cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset); 923 return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048; 924} 925#else 926#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048) 927#endif 928#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 929static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset) 930{ 931 if (!( 932 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 933 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 934 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 935 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 936 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 937 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 938 cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset); 939 return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048; 940} 941#else 942#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048) 943#endif 944#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 945static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset) 946{ 947 if (!( 948 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 949 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 950 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 951 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 952 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 953 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 954 cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset); 955 return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048; 956} 957#else 958#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048) 959#endif 960#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 961static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset) 962{ 963 if (!( 964 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 965 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 966 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 967 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 968 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 969 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 970 cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset); 971 return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048; 972} 973#else 974#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048) 975#endif 976#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 977static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset) 978{ 979 if (!( 980 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 981 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 982 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 983 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 984 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 985 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 986 cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset); 987 return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048; 988} 989#else 990#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048) 991#endif 992#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 993static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset) 994{ 995 if (!( 996 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 997 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 998 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 999 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 1000 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 1001 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 1002 cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset); 1003 return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048; 1004} 1005#else 1006#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048) 1007#endif 1008#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1009static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset) 1010{ 1011 if (!( 1012 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 1013 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) || 1014 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 1015 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 1016 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 1017 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 1018 cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset); 1019 return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048; 1020} 1021#else 1022#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048) 1023#endif 1024#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1025#define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC() 1026static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void) 1027{ 1028 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1029 cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n"); 1030 return CVMX_ADD_IO_SEG(0x00011800E00004D0ull); 1031} 1032#else 1033#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull)) 1034#endif 1035#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1036#define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC() 1037static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void) 1038{ 1039 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1040 cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n"); 1041 return CVMX_ADD_IO_SEG(0x00011800E0000498ull); 1042} 1043#else 1044#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull)) 1045#endif 1046#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1047#define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC() 1048static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void) 1049{ 1050 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1051 cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n"); 1052 return CVMX_ADD_IO_SEG(0x00011800E0000488ull); 1053} 1054#else 1055#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull)) 1056#endif 1057#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1058#define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC() 1059static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void) 1060{ 1061 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1062 cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n"); 1063 return CVMX_ADD_IO_SEG(0x00011800E0000508ull); 1064} 1065#else 1066#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull)) 1067#endif 1068#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1069#define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC() 1070static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void) 1071{ 1072 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1073 cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n"); 1074 return CVMX_ADD_IO_SEG(0x00011800E0000500ull); 1075} 1076#else 1077#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull)) 1078#endif 1079#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1080#define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC() 1081static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void) 1082{ 1083 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1084 cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n"); 1085 return CVMX_ADD_IO_SEG(0x00011800E0000490ull); 1086} 1087#else 1088#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull)) 1089#endif 1090#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1091#define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC() 1092static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void) 1093{ 1094 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1095 cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n"); 1096 return CVMX_ADD_IO_SEG(0x00011800E00004F8ull); 1097} 1098#else 1099#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull)) 1100#endif 1101#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1102#define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC() 1103static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void) 1104{ 1105 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1106 cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n"); 1107 return CVMX_ADD_IO_SEG(0x00011800E00004C8ull); 1108} 1109#else 1110#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull)) 1111#endif 1112#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1113#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC() 1114static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void) 1115{ 1116 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1117 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n"); 1118 return CVMX_ADD_IO_SEG(0x00011800E00004A0ull); 1119} 1120#else 1121#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull)) 1122#endif 1123#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1124#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC() 1125static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void) 1126{ 1127 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))) 1128 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n"); 1129 return CVMX_ADD_IO_SEG(0x00011800E00004A8ull); 1130} 1131#else 1132#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull)) 1133#endif 1134#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1135static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset) 1136{ 1137 if (!( 1138 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) || 1139 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) || 1140 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) || 1141 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0))))) 1142 cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset); 1143 return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8; 1144} 1145#else 1146#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8) 1147#endif 1148 1149/** 1150 * cvmx_agl_gmx_bad_reg 1151 * 1152 * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong 1153 * 1154 * 1155 * Notes: 1156 * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1. 1157 * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1. 1158 * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1. 1159 */ 1160union cvmx_agl_gmx_bad_reg { 1161 uint64_t u64; 1162 struct cvmx_agl_gmx_bad_reg_s { 1163#ifdef __BIG_ENDIAN_BITFIELD 1164 uint64_t reserved_38_63 : 26; 1165 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */ 1166 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */ 1167 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */ 1168 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */ 1169 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */ 1170 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */ 1171 uint64_t reserved_27_31 : 5; 1172 uint64_t statovr : 1; /**< TX Statistics overflow */ 1173 uint64_t reserved_24_25 : 2; 1174 uint64_t loststat : 2; /**< TX Statistics data was over-written 1175 In MII/RGMII, one bit per port 1176 TX Stats are corrupted */ 1177 uint64_t reserved_4_21 : 18; 1178 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */ 1179 uint64_t reserved_0_1 : 2; 1180#else 1181 uint64_t reserved_0_1 : 2; 1182 uint64_t out_ovr : 2; 1183 uint64_t reserved_4_21 : 18; 1184 uint64_t loststat : 2; 1185 uint64_t reserved_24_25 : 2; 1186 uint64_t statovr : 1; 1187 uint64_t reserved_27_31 : 5; 1188 uint64_t ovrflw : 1; 1189 uint64_t txpop : 1; 1190 uint64_t txpsh : 1; 1191 uint64_t ovrflw1 : 1; 1192 uint64_t txpop1 : 1; 1193 uint64_t txpsh1 : 1; 1194 uint64_t reserved_38_63 : 26; 1195#endif 1196 } s; 1197 struct cvmx_agl_gmx_bad_reg_cn52xx { 1198#ifdef __BIG_ENDIAN_BITFIELD 1199 uint64_t reserved_38_63 : 26; 1200 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */ 1201 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */ 1202 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */ 1203 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */ 1204 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */ 1205 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */ 1206 uint64_t reserved_27_31 : 5; 1207 uint64_t statovr : 1; /**< TX Statistics overflow */ 1208 uint64_t reserved_23_25 : 3; 1209 uint64_t loststat : 1; /**< TX Statistics data was over-written 1210 TX Stats are corrupted */ 1211 uint64_t reserved_4_21 : 18; 1212 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */ 1213 uint64_t reserved_0_1 : 2; 1214#else 1215 uint64_t reserved_0_1 : 2; 1216 uint64_t out_ovr : 2; 1217 uint64_t reserved_4_21 : 18; 1218 uint64_t loststat : 1; 1219 uint64_t reserved_23_25 : 3; 1220 uint64_t statovr : 1; 1221 uint64_t reserved_27_31 : 5; 1222 uint64_t ovrflw : 1; 1223 uint64_t txpop : 1; 1224 uint64_t txpsh : 1; 1225 uint64_t ovrflw1 : 1; 1226 uint64_t txpop1 : 1; 1227 uint64_t txpsh1 : 1; 1228 uint64_t reserved_38_63 : 26; 1229#endif 1230 } cn52xx; 1231 struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1; 1232 struct cvmx_agl_gmx_bad_reg_cn56xx { 1233#ifdef __BIG_ENDIAN_BITFIELD 1234 uint64_t reserved_35_63 : 29; 1235 uint64_t txpsh : 1; /**< TX FIFO overflow */ 1236 uint64_t txpop : 1; /**< TX FIFO underflow */ 1237 uint64_t ovrflw : 1; /**< RX FIFO overflow */ 1238 uint64_t reserved_27_31 : 5; 1239 uint64_t statovr : 1; /**< TX Statistics overflow */ 1240 uint64_t reserved_23_25 : 3; 1241 uint64_t loststat : 1; /**< TX Statistics data was over-written 1242 TX Stats are corrupted */ 1243 uint64_t reserved_3_21 : 19; 1244 uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */ 1245 uint64_t reserved_0_1 : 2; 1246#else 1247 uint64_t reserved_0_1 : 2; 1248 uint64_t out_ovr : 1; 1249 uint64_t reserved_3_21 : 19; 1250 uint64_t loststat : 1; 1251 uint64_t reserved_23_25 : 3; 1252 uint64_t statovr : 1; 1253 uint64_t reserved_27_31 : 5; 1254 uint64_t ovrflw : 1; 1255 uint64_t txpop : 1; 1256 uint64_t txpsh : 1; 1257 uint64_t reserved_35_63 : 29; 1258#endif 1259 } cn56xx; 1260 struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1; 1261 struct cvmx_agl_gmx_bad_reg_s cn61xx; 1262 struct cvmx_agl_gmx_bad_reg_s cn63xx; 1263 struct cvmx_agl_gmx_bad_reg_s cn63xxp1; 1264 struct cvmx_agl_gmx_bad_reg_s cn66xx; 1265 struct cvmx_agl_gmx_bad_reg_s cn68xx; 1266 struct cvmx_agl_gmx_bad_reg_s cn68xxp1; 1267}; 1268typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t; 1269 1270/** 1271 * cvmx_agl_gmx_bist 1272 * 1273 * AGL_GMX_BIST = GMX BIST Results 1274 * 1275 * 1276 * Notes: 1277 * Not reset when MIX*_CTL[RESET] is set to 1. 1278 * 1279 */ 1280union cvmx_agl_gmx_bist { 1281 uint64_t u64; 1282 struct cvmx_agl_gmx_bist_s { 1283#ifdef __BIG_ENDIAN_BITFIELD 1284 uint64_t reserved_25_63 : 39; 1285 uint64_t status : 25; /**< BIST Results. 1286 HW sets a bit in BIST for for memory that fails 1287 - 0: gmx#.inb.fif_bnk0 1288 - 1: gmx#.inb.fif_bnk1 1289 - 2: gmx#.inb.fif_bnk2 1290 - 3: gmx#.inb.fif_bnk3 1291 - 4: gmx#.inb.fif_bnk_ext0 1292 - 5: gmx#.inb.fif_bnk_ext1 1293 - 6: gmx#.inb.fif_bnk_ext2 1294 - 7: gmx#.inb.fif_bnk_ext3 1295 - 8: gmx#.outb.fif.fif_bnk0 1296 - 9: gmx#.outb.fif.fif_bnk1 1297 - 10: RAZ 1298 - 11: RAZ 1299 - 12: gmx#.outb.fif.fif_bnk_ext0 1300 - 13: gmx#.outb.fif.fif_bnk_ext1 1301 - 14: RAZ 1302 - 15: RAZ 1303 - 16: RAZ 1304 - 17: RAZ 1305 - 18: RAZ 1306 - 19: RAZ 1307 - 20: gmx#.csr.drf20x32m2_bist 1308 - 21: gmx#.csr.drf20x48m2_bist 1309 - 22: gmx#.outb.stat.drf16x27m1_bist 1310 - 23: gmx#.outb.stat.drf40x64m1_bist 1311 - 24: RAZ */ 1312#else 1313 uint64_t status : 25; 1314 uint64_t reserved_25_63 : 39; 1315#endif 1316 } s; 1317 struct cvmx_agl_gmx_bist_cn52xx { 1318#ifdef __BIG_ENDIAN_BITFIELD 1319 uint64_t reserved_10_63 : 54; 1320 uint64_t status : 10; /**< BIST Results. 1321 HW sets a bit in BIST for for memory that fails 1322 - 0: gmx#.inb.drf128x78m1_bist 1323 - 1: gmx#.outb.fif.drf128x71m1_bist 1324 - 2: gmx#.csr.gmi0.srf8x64m1_bist 1325 - 3: gmx#.csr.gmi1.srf8x64m1_bist 1326 - 4: 0 1327 - 5: 0 1328 - 6: gmx#.csr.drf20x80m1_bist 1329 - 7: gmx#.outb.stat.drf16x27m1_bist 1330 - 8: gmx#.outb.stat.drf40x64m1_bist 1331 - 9: 0 */ 1332#else 1333 uint64_t status : 10; 1334 uint64_t reserved_10_63 : 54; 1335#endif 1336 } cn52xx; 1337 struct cvmx_agl_gmx_bist_cn52xx cn52xxp1; 1338 struct cvmx_agl_gmx_bist_cn52xx cn56xx; 1339 struct cvmx_agl_gmx_bist_cn52xx cn56xxp1; 1340 struct cvmx_agl_gmx_bist_s cn61xx; 1341 struct cvmx_agl_gmx_bist_s cn63xx; 1342 struct cvmx_agl_gmx_bist_s cn63xxp1; 1343 struct cvmx_agl_gmx_bist_s cn66xx; 1344 struct cvmx_agl_gmx_bist_s cn68xx; 1345 struct cvmx_agl_gmx_bist_s cn68xxp1; 1346}; 1347typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t; 1348 1349/** 1350 * cvmx_agl_gmx_drv_ctl 1351 * 1352 * AGL_GMX_DRV_CTL = GMX Drive Control 1353 * 1354 * 1355 * Notes: 1356 * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1. 1357 * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1. 1358 */ 1359union cvmx_agl_gmx_drv_ctl { 1360 uint64_t u64; 1361 struct cvmx_agl_gmx_drv_ctl_s { 1362#ifdef __BIG_ENDIAN_BITFIELD 1363 uint64_t reserved_49_63 : 15; 1364 uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */ 1365 uint64_t reserved_45_47 : 3; 1366 uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */ 1367 uint64_t reserved_37_39 : 3; 1368 uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */ 1369 uint64_t reserved_17_31 : 15; 1370 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */ 1371 uint64_t reserved_13_15 : 3; 1372 uint64_t pctl : 5; /**< AGL PCTL */ 1373 uint64_t reserved_5_7 : 3; 1374 uint64_t nctl : 5; /**< AGL NCTL */ 1375#else 1376 uint64_t nctl : 5; 1377 uint64_t reserved_5_7 : 3; 1378 uint64_t pctl : 5; 1379 uint64_t reserved_13_15 : 3; 1380 uint64_t byp_en : 1; 1381 uint64_t reserved_17_31 : 15; 1382 uint64_t nctl1 : 5; 1383 uint64_t reserved_37_39 : 3; 1384 uint64_t pctl1 : 5; 1385 uint64_t reserved_45_47 : 3; 1386 uint64_t byp_en1 : 1; 1387 uint64_t reserved_49_63 : 15; 1388#endif 1389 } s; 1390 struct cvmx_agl_gmx_drv_ctl_s cn52xx; 1391 struct cvmx_agl_gmx_drv_ctl_s cn52xxp1; 1392 struct cvmx_agl_gmx_drv_ctl_cn56xx { 1393#ifdef __BIG_ENDIAN_BITFIELD 1394 uint64_t reserved_17_63 : 47; 1395 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */ 1396 uint64_t reserved_13_15 : 3; 1397 uint64_t pctl : 5; /**< AGL PCTL */ 1398 uint64_t reserved_5_7 : 3; 1399 uint64_t nctl : 5; /**< AGL NCTL */ 1400#else 1401 uint64_t nctl : 5; 1402 uint64_t reserved_5_7 : 3; 1403 uint64_t pctl : 5; 1404 uint64_t reserved_13_15 : 3; 1405 uint64_t byp_en : 1; 1406 uint64_t reserved_17_63 : 47; 1407#endif 1408 } cn56xx; 1409 struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1; 1410}; 1411typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t; 1412 1413/** 1414 * cvmx_agl_gmx_inf_mode 1415 * 1416 * AGL_GMX_INF_MODE = Interface Mode 1417 * 1418 * 1419 * Notes: 1420 * Not reset when MIX*_CTL[RESET] is set to 1. 1421 * 1422 */ 1423union cvmx_agl_gmx_inf_mode { 1424 uint64_t u64; 1425 struct cvmx_agl_gmx_inf_mode_s { 1426#ifdef __BIG_ENDIAN_BITFIELD 1427 uint64_t reserved_2_63 : 62; 1428 uint64_t en : 1; /**< Interface Enable */ 1429 uint64_t reserved_0_0 : 1; 1430#else 1431 uint64_t reserved_0_0 : 1; 1432 uint64_t en : 1; 1433 uint64_t reserved_2_63 : 62; 1434#endif 1435 } s; 1436 struct cvmx_agl_gmx_inf_mode_s cn52xx; 1437 struct cvmx_agl_gmx_inf_mode_s cn52xxp1; 1438 struct cvmx_agl_gmx_inf_mode_s cn56xx; 1439 struct cvmx_agl_gmx_inf_mode_s cn56xxp1; 1440}; 1441typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t; 1442 1443/** 1444 * cvmx_agl_gmx_prt#_cfg 1445 * 1446 * AGL_GMX_PRT_CFG = Port description 1447 * 1448 * 1449 * Notes: 1450 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 1451 * 1452 */ 1453union cvmx_agl_gmx_prtx_cfg { 1454 uint64_t u64; 1455 struct cvmx_agl_gmx_prtx_cfg_s { 1456#ifdef __BIG_ENDIAN_BITFIELD 1457 uint64_t reserved_14_63 : 50; 1458 uint64_t tx_idle : 1; /**< TX Machine is idle */ 1459 uint64_t rx_idle : 1; /**< RX Machine is idle */ 1460 uint64_t reserved_9_11 : 3; 1461 uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED] 1462 10 = 10Mbs operation 1463 00 = 100Mbs operation 1464 01 = 1000Mbs operation 1465 11 = Reserved */ 1466 uint64_t reserved_7_7 : 1; 1467 uint64_t burst : 1; /**< Half-Duplex Burst Enable 1468 Only valid for 1000Mbs half-duplex operation 1469 0 = burst length of 0x2000 (halfdup / 1000Mbs) 1470 1 = burst length of 0x0 (all other modes) */ 1471 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send 1472 RMGII traffic. When this bit clear on a given 1473 port, then all packet cycles will appear as 1474 inter-frame cycles. */ 1475 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive 1476 RMGII traffic. When this bit clear on a given 1477 port, then the all packet cycles will appear as 1478 inter-frame cycles. */ 1479 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 1480 0 = 512 bitimes (10/100Mbs operation) 1481 1 = 4096 bitimes (1000Mbs operation) */ 1482 uint64_t duplex : 1; /**< Duplex 1483 0 = Half Duplex (collisions/extentions/bursts) 1484 1 = Full Duplex */ 1485 uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED] 1486 10 = 10Mbs operation 1487 00 = 100Mbs operation 1488 01 = 1000Mbs operation 1489 11 = Reserved */ 1490 uint64_t en : 1; /**< Link Enable 1491 When EN is clear, packets will not be received 1492 or transmitted (including PAUSE and JAM packets). 1493 If EN is cleared while a packet is currently 1494 being received or transmitted, the packet will 1495 be allowed to complete before the bus is idled. 1496 On the RX side, subsequent packets in a burst 1497 will be ignored. */ 1498#else 1499 uint64_t en : 1; 1500 uint64_t speed : 1; 1501 uint64_t duplex : 1; 1502 uint64_t slottime : 1; 1503 uint64_t rx_en : 1; 1504 uint64_t tx_en : 1; 1505 uint64_t burst : 1; 1506 uint64_t reserved_7_7 : 1; 1507 uint64_t speed_msb : 1; 1508 uint64_t reserved_9_11 : 3; 1509 uint64_t rx_idle : 1; 1510 uint64_t tx_idle : 1; 1511 uint64_t reserved_14_63 : 50; 1512#endif 1513 } s; 1514 struct cvmx_agl_gmx_prtx_cfg_cn52xx { 1515#ifdef __BIG_ENDIAN_BITFIELD 1516 uint64_t reserved_6_63 : 58; 1517 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send 1518 RMGII traffic. When this bit clear on a given 1519 port, then all MII cycles will appear as 1520 inter-frame cycles. */ 1521 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive 1522 RMGII traffic. When this bit clear on a given 1523 port, then the all MII cycles will appear as 1524 inter-frame cycles. */ 1525 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 1526 0 = 512 bitimes (10/100Mbs operation) 1527 1 = Reserved */ 1528 uint64_t duplex : 1; /**< Duplex 1529 0 = Half Duplex (collisions/extentions/bursts) 1530 1 = Full Duplex */ 1531 uint64_t speed : 1; /**< Link Speed 1532 0 = 10/100Mbs operation 1533 1 = Reserved */ 1534 uint64_t en : 1; /**< Link Enable 1535 When EN is clear, packets will not be received 1536 or transmitted (including PAUSE and JAM packets). 1537 If EN is cleared while a packet is currently 1538 being received or transmitted, the packet will 1539 be allowed to complete before the bus is idled. 1540 On the RX side, subsequent packets in a burst 1541 will be ignored. */ 1542#else 1543 uint64_t en : 1; 1544 uint64_t speed : 1; 1545 uint64_t duplex : 1; 1546 uint64_t slottime : 1; 1547 uint64_t rx_en : 1; 1548 uint64_t tx_en : 1; 1549 uint64_t reserved_6_63 : 58; 1550#endif 1551 } cn52xx; 1552 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1; 1553 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx; 1554 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1; 1555 struct cvmx_agl_gmx_prtx_cfg_s cn61xx; 1556 struct cvmx_agl_gmx_prtx_cfg_s cn63xx; 1557 struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1; 1558 struct cvmx_agl_gmx_prtx_cfg_s cn66xx; 1559 struct cvmx_agl_gmx_prtx_cfg_s cn68xx; 1560 struct cvmx_agl_gmx_prtx_cfg_s cn68xxp1; 1561}; 1562typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t; 1563 1564/** 1565 * cvmx_agl_gmx_rx#_adr_cam0 1566 * 1567 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1568 * 1569 * 1570 * Notes: 1571 * Not reset when MIX*_CTL[RESET] is set to 1. 1572 * 1573 */ 1574union cvmx_agl_gmx_rxx_adr_cam0 { 1575 uint64_t u64; 1576 struct cvmx_agl_gmx_rxx_adr_cam0_s { 1577#ifdef __BIG_ENDIAN_BITFIELD 1578 uint64_t adr : 64; /**< The DMAC address to match on 1579 Each entry contributes 8bits to one of 8 matchers. 1580 The CAM matches against unicst or multicst DMAC 1581 addresses. */ 1582#else 1583 uint64_t adr : 64; 1584#endif 1585 } s; 1586 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx; 1587 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1; 1588 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx; 1589 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1; 1590 struct cvmx_agl_gmx_rxx_adr_cam0_s cn61xx; 1591 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx; 1592 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1; 1593 struct cvmx_agl_gmx_rxx_adr_cam0_s cn66xx; 1594 struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xx; 1595 struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xxp1; 1596}; 1597typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t; 1598 1599/** 1600 * cvmx_agl_gmx_rx#_adr_cam1 1601 * 1602 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1603 * 1604 * 1605 * Notes: 1606 * Not reset when MIX*_CTL[RESET] is set to 1. 1607 * 1608 */ 1609union cvmx_agl_gmx_rxx_adr_cam1 { 1610 uint64_t u64; 1611 struct cvmx_agl_gmx_rxx_adr_cam1_s { 1612#ifdef __BIG_ENDIAN_BITFIELD 1613 uint64_t adr : 64; /**< The DMAC address to match on 1614 Each entry contributes 8bits to one of 8 matchers. 1615 The CAM matches against unicst or multicst DMAC 1616 addresses. */ 1617#else 1618 uint64_t adr : 64; 1619#endif 1620 } s; 1621 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx; 1622 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1; 1623 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx; 1624 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1; 1625 struct cvmx_agl_gmx_rxx_adr_cam1_s cn61xx; 1626 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx; 1627 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1; 1628 struct cvmx_agl_gmx_rxx_adr_cam1_s cn66xx; 1629 struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xx; 1630 struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xxp1; 1631}; 1632typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t; 1633 1634/** 1635 * cvmx_agl_gmx_rx#_adr_cam2 1636 * 1637 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1638 * 1639 * 1640 * Notes: 1641 * Not reset when MIX*_CTL[RESET] is set to 1. 1642 * 1643 */ 1644union cvmx_agl_gmx_rxx_adr_cam2 { 1645 uint64_t u64; 1646 struct cvmx_agl_gmx_rxx_adr_cam2_s { 1647#ifdef __BIG_ENDIAN_BITFIELD 1648 uint64_t adr : 64; /**< The DMAC address to match on 1649 Each entry contributes 8bits to one of 8 matchers. 1650 The CAM matches against unicst or multicst DMAC 1651 addresses. */ 1652#else 1653 uint64_t adr : 64; 1654#endif 1655 } s; 1656 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx; 1657 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1; 1658 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx; 1659 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1; 1660 struct cvmx_agl_gmx_rxx_adr_cam2_s cn61xx; 1661 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx; 1662 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1; 1663 struct cvmx_agl_gmx_rxx_adr_cam2_s cn66xx; 1664 struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xx; 1665 struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xxp1; 1666}; 1667typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t; 1668 1669/** 1670 * cvmx_agl_gmx_rx#_adr_cam3 1671 * 1672 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1673 * 1674 * 1675 * Notes: 1676 * Not reset when MIX*_CTL[RESET] is set to 1. 1677 * 1678 */ 1679union cvmx_agl_gmx_rxx_adr_cam3 { 1680 uint64_t u64; 1681 struct cvmx_agl_gmx_rxx_adr_cam3_s { 1682#ifdef __BIG_ENDIAN_BITFIELD 1683 uint64_t adr : 64; /**< The DMAC address to match on 1684 Each entry contributes 8bits to one of 8 matchers. 1685 The CAM matches against unicst or multicst DMAC 1686 addresses. */ 1687#else 1688 uint64_t adr : 64; 1689#endif 1690 } s; 1691 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx; 1692 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1; 1693 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx; 1694 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1; 1695 struct cvmx_agl_gmx_rxx_adr_cam3_s cn61xx; 1696 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx; 1697 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1; 1698 struct cvmx_agl_gmx_rxx_adr_cam3_s cn66xx; 1699 struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xx; 1700 struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xxp1; 1701}; 1702typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t; 1703 1704/** 1705 * cvmx_agl_gmx_rx#_adr_cam4 1706 * 1707 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1708 * 1709 * 1710 * Notes: 1711 * Not reset when MIX*_CTL[RESET] is set to 1. 1712 * 1713 */ 1714union cvmx_agl_gmx_rxx_adr_cam4 { 1715 uint64_t u64; 1716 struct cvmx_agl_gmx_rxx_adr_cam4_s { 1717#ifdef __BIG_ENDIAN_BITFIELD 1718 uint64_t adr : 64; /**< The DMAC address to match on 1719 Each entry contributes 8bits to one of 8 matchers. 1720 The CAM matches against unicst or multicst DMAC 1721 addresses. */ 1722#else 1723 uint64_t adr : 64; 1724#endif 1725 } s; 1726 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx; 1727 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1; 1728 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx; 1729 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1; 1730 struct cvmx_agl_gmx_rxx_adr_cam4_s cn61xx; 1731 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx; 1732 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1; 1733 struct cvmx_agl_gmx_rxx_adr_cam4_s cn66xx; 1734 struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xx; 1735 struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xxp1; 1736}; 1737typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t; 1738 1739/** 1740 * cvmx_agl_gmx_rx#_adr_cam5 1741 * 1742 * AGL_GMX_RX_ADR_CAM = Address Filtering Control 1743 * 1744 * 1745 * Notes: 1746 * Not reset when MIX*_CTL[RESET] is set to 1. 1747 * 1748 */ 1749union cvmx_agl_gmx_rxx_adr_cam5 { 1750 uint64_t u64; 1751 struct cvmx_agl_gmx_rxx_adr_cam5_s { 1752#ifdef __BIG_ENDIAN_BITFIELD 1753 uint64_t adr : 64; /**< The DMAC address to match on 1754 Each entry contributes 8bits to one of 8 matchers. 1755 The CAM matches against unicst or multicst DMAC 1756 addresses. */ 1757#else 1758 uint64_t adr : 64; 1759#endif 1760 } s; 1761 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx; 1762 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1; 1763 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx; 1764 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1; 1765 struct cvmx_agl_gmx_rxx_adr_cam5_s cn61xx; 1766 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx; 1767 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1; 1768 struct cvmx_agl_gmx_rxx_adr_cam5_s cn66xx; 1769 struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xx; 1770 struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xxp1; 1771}; 1772typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t; 1773 1774/** 1775 * cvmx_agl_gmx_rx#_adr_cam_en 1776 * 1777 * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable 1778 * 1779 * 1780 * Notes: 1781 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 1782 * 1783 */ 1784union cvmx_agl_gmx_rxx_adr_cam_en { 1785 uint64_t u64; 1786 struct cvmx_agl_gmx_rxx_adr_cam_en_s { 1787#ifdef __BIG_ENDIAN_BITFIELD 1788 uint64_t reserved_8_63 : 56; 1789 uint64_t en : 8; /**< CAM Entry Enables */ 1790#else 1791 uint64_t en : 8; 1792 uint64_t reserved_8_63 : 56; 1793#endif 1794 } s; 1795 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx; 1796 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1; 1797 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx; 1798 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1; 1799 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn61xx; 1800 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx; 1801 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1; 1802 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn66xx; 1803 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xx; 1804 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xxp1; 1805}; 1806typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t; 1807 1808/** 1809 * cvmx_agl_gmx_rx#_adr_ctl 1810 * 1811 * AGL_GMX_RX_ADR_CTL = Address Filtering Control 1812 * 1813 * 1814 * Notes: 1815 * * ALGORITHM 1816 * Here is some pseudo code that represents the address filter behavior. 1817 * 1818 * @verbatim 1819 * bool dmac_addr_filter(uint8 prt, uint48 dmac) [ 1820 * ASSERT(prt >= 0 && prt <= 3); 1821 * if (is_bcst(dmac)) // broadcast accept 1822 * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT); 1823 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject 1824 * return REJECT; 1825 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept 1826 * return ACCEPT; 1827 * 1828 * cam_hit = 0; 1829 * 1830 * for (i=0; i<8; i++) [ 1831 * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0) 1832 * continue; 1833 * uint48 unswizzled_mac_adr = 0x0; 1834 * for (j=5; j>=0; j--) [ 1835 * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>]; 1836 * ] 1837 * if (unswizzled_mac_adr == dmac) [ 1838 * cam_hit = 1; 1839 * break; 1840 * ] 1841 * ] 1842 * 1843 * if (cam_hit) 1844 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT); 1845 * else 1846 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT); 1847 * ] 1848 * @endverbatim 1849 * 1850 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 1851 */ 1852union cvmx_agl_gmx_rxx_adr_ctl { 1853 uint64_t u64; 1854 struct cvmx_agl_gmx_rxx_adr_ctl_s { 1855#ifdef __BIG_ENDIAN_BITFIELD 1856 uint64_t reserved_4_63 : 60; 1857 uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter 1858 0 = reject the packet on DMAC address match 1859 1 = accept the packet on DMAC address match */ 1860 uint64_t mcst : 2; /**< Multicast Mode 1861 0 = Use the Address Filter CAM 1862 1 = Force reject all multicast packets 1863 2 = Force accept all multicast packets 1864 3 = Reserved */ 1865 uint64_t bcst : 1; /**< Accept All Broadcast Packets */ 1866#else 1867 uint64_t bcst : 1; 1868 uint64_t mcst : 2; 1869 uint64_t cam_mode : 1; 1870 uint64_t reserved_4_63 : 60; 1871#endif 1872 } s; 1873 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx; 1874 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1; 1875 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx; 1876 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1; 1877 struct cvmx_agl_gmx_rxx_adr_ctl_s cn61xx; 1878 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx; 1879 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1; 1880 struct cvmx_agl_gmx_rxx_adr_ctl_s cn66xx; 1881 struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xx; 1882 struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xxp1; 1883}; 1884typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t; 1885 1886/** 1887 * cvmx_agl_gmx_rx#_decision 1888 * 1889 * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet 1890 * 1891 * 1892 * Notes: 1893 * As each byte in a packet is received by GMX, the L2 byte count is compared 1894 * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes 1895 * from the beginning of the L2 header (DMAC). In normal operation, the L2 1896 * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any 1897 * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]). 1898 * 1899 * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the 1900 * packet and would require UDD skip length to account for them. 1901 * 1902 * L2 Size 1903 * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24) 1904 * 1905 * MII/Full Duplex accept packet apply filters 1906 * no filtering is applied accept packet based on DMAC and PAUSE packet filters 1907 * 1908 * MII/Half Duplex drop packet apply filters 1909 * packet is unconditionally dropped accept packet based on DMAC 1910 * 1911 * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8) 1912 * 1913 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 1914 */ 1915union cvmx_agl_gmx_rxx_decision { 1916 uint64_t u64; 1917 struct cvmx_agl_gmx_rxx_decision_s { 1918#ifdef __BIG_ENDIAN_BITFIELD 1919 uint64_t reserved_5_63 : 59; 1920 uint64_t cnt : 5; /**< The byte count to decide when to accept or filter 1921 a packet. */ 1922#else 1923 uint64_t cnt : 5; 1924 uint64_t reserved_5_63 : 59; 1925#endif 1926 } s; 1927 struct cvmx_agl_gmx_rxx_decision_s cn52xx; 1928 struct cvmx_agl_gmx_rxx_decision_s cn52xxp1; 1929 struct cvmx_agl_gmx_rxx_decision_s cn56xx; 1930 struct cvmx_agl_gmx_rxx_decision_s cn56xxp1; 1931 struct cvmx_agl_gmx_rxx_decision_s cn61xx; 1932 struct cvmx_agl_gmx_rxx_decision_s cn63xx; 1933 struct cvmx_agl_gmx_rxx_decision_s cn63xxp1; 1934 struct cvmx_agl_gmx_rxx_decision_s cn66xx; 1935 struct cvmx_agl_gmx_rxx_decision_s cn68xx; 1936 struct cvmx_agl_gmx_rxx_decision_s cn68xxp1; 1937}; 1938typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t; 1939 1940/** 1941 * cvmx_agl_gmx_rx#_frm_chk 1942 * 1943 * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame 1944 * 1945 * 1946 * Notes: 1947 * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW. 1948 * 1949 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 1950 */ 1951union cvmx_agl_gmx_rxx_frm_chk { 1952 uint64_t u64; 1953 struct cvmx_agl_gmx_rxx_frm_chk_s { 1954#ifdef __BIG_ENDIAN_BITFIELD 1955 uint64_t reserved_10_63 : 54; 1956 uint64_t niberr : 1; /**< Nibble error */ 1957 uint64_t skperr : 1; /**< Skipper error */ 1958 uint64_t rcverr : 1; /**< Frame was received with packet data reception error */ 1959 uint64_t lenerr : 1; /**< Frame was received with length error */ 1960 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 1961 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 1962 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 1963 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 1964 uint64_t carext : 1; /**< Carrier extend error */ 1965 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 1966#else 1967 uint64_t minerr : 1; 1968 uint64_t carext : 1; 1969 uint64_t maxerr : 1; 1970 uint64_t jabber : 1; 1971 uint64_t fcserr : 1; 1972 uint64_t alnerr : 1; 1973 uint64_t lenerr : 1; 1974 uint64_t rcverr : 1; 1975 uint64_t skperr : 1; 1976 uint64_t niberr : 1; 1977 uint64_t reserved_10_63 : 54; 1978#endif 1979 } s; 1980 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx { 1981#ifdef __BIG_ENDIAN_BITFIELD 1982 uint64_t reserved_9_63 : 55; 1983 uint64_t skperr : 1; /**< Skipper error */ 1984 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */ 1985 uint64_t lenerr : 1; /**< Frame was received with length error */ 1986 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 1987 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 1988 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 1989 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 1990 uint64_t reserved_1_1 : 1; 1991 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 1992#else 1993 uint64_t minerr : 1; 1994 uint64_t reserved_1_1 : 1; 1995 uint64_t maxerr : 1; 1996 uint64_t jabber : 1; 1997 uint64_t fcserr : 1; 1998 uint64_t alnerr : 1; 1999 uint64_t lenerr : 1; 2000 uint64_t rcverr : 1; 2001 uint64_t skperr : 1; 2002 uint64_t reserved_9_63 : 55; 2003#endif 2004 } cn52xx; 2005 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1; 2006 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx; 2007 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1; 2008 struct cvmx_agl_gmx_rxx_frm_chk_s cn61xx; 2009 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx; 2010 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1; 2011 struct cvmx_agl_gmx_rxx_frm_chk_s cn66xx; 2012 struct cvmx_agl_gmx_rxx_frm_chk_s cn68xx; 2013 struct cvmx_agl_gmx_rxx_frm_chk_s cn68xxp1; 2014}; 2015typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t; 2016 2017/** 2018 * cvmx_agl_gmx_rx#_frm_ctl 2019 * 2020 * AGL_GMX_RX_FRM_CTL = Frame Control 2021 * 2022 * 2023 * Notes: 2024 * * PRE_STRP 2025 * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP 2026 * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane 2027 * core as part of the packet. 2028 * 2029 * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet 2030 * size when checking against the MIN and MAX bounds. Furthermore, the bytes 2031 * are skipped when locating the start of the L2 header for DMAC and Control 2032 * frame recognition. 2033 * 2034 * * CTL_BCK/CTL_DRP 2035 * These bits control how the HW handles incoming PAUSE packets. Here are 2036 * the most common modes of operation: 2037 * CTL_BCK=1,CTL_DRP=1 - HW does it all 2038 * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames 2039 * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored 2040 * 2041 * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode. 2042 * Since PAUSE packets only apply to fulldup operation, any PAUSE packet 2043 * would constitute an exception which should be handled by the processing 2044 * cores. PAUSE packets should not be forwarded. 2045 * 2046 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2047 */ 2048union cvmx_agl_gmx_rxx_frm_ctl { 2049 uint64_t u64; 2050 struct cvmx_agl_gmx_rxx_frm_ctl_s { 2051#ifdef __BIG_ENDIAN_BITFIELD 2052 uint64_t reserved_13_63 : 51; 2053 uint64_t ptp_mode : 1; /**< Timestamp mode 2054 When PTP_MODE is set, a 64-bit timestamp will be 2055 prepended to every incoming packet. The timestamp 2056 bytes are added to the packet in such a way as to 2057 not modify the packet's receive byte count. This 2058 implies that the AGL_GMX_RX_JABBER, 2059 AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX, 2060 AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the 2061 AGL_GMX_RX_STATS_* do not require any adjustment 2062 as they operate on the received packet size. 2063 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */ 2064 uint64_t reserved_11_11 : 1; 2065 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks 2066 due to PARITAL packets */ 2067 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 2068 regardless of the number of previous PREAMBLE 2069 nibbles. In this mode, PRE_STRP should be set to 2070 account for the variable nature of the PREAMBLE. 2071 PRE_CHK must be set to enable this and all 2072 PREAMBLE features. */ 2073 uint64_t pad_len : 1; /**< When set, disables the length check for non-min 2074 sized pkts with padding in the client data */ 2075 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 2076 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 2077 AGL will begin the frame at the first SFD. 2078 PRE_FREE must be set if PRE_ALIGN is set. 2079 PRE_CHK must be set to enable this and all 2080 PREAMBLE features. */ 2081 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 2082 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 2083 Multicast address */ 2084 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 2085 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 2086 uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 2087 0=PREAMBLE+SFD is sent to core as part of frame 2088 1=PREAMBLE+SFD is dropped 2089 PRE_STRP must be set if PRE_ALIGN is set. 2090 PRE_CHK must be set to enable this and all 2091 PREAMBLE features. */ 2092 uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3 2093 PREAMBLE to begin every frame. AGL checks that a 2094 valid PREAMBLE is received (based on PRE_FREE). 2095 When a problem does occur within the PREAMBLE 2096 seqeunce, the frame is marked as bad and not sent 2097 into the core. The AGL_GMX_RX_INT_REG[PCTERR] 2098 interrupt is also raised. */ 2099#else 2100 uint64_t pre_chk : 1; 2101 uint64_t pre_strp : 1; 2102 uint64_t ctl_drp : 1; 2103 uint64_t ctl_bck : 1; 2104 uint64_t ctl_mcst : 1; 2105 uint64_t ctl_smac : 1; 2106 uint64_t pre_free : 1; 2107 uint64_t vlan_len : 1; 2108 uint64_t pad_len : 1; 2109 uint64_t pre_align : 1; 2110 uint64_t null_dis : 1; 2111 uint64_t reserved_11_11 : 1; 2112 uint64_t ptp_mode : 1; 2113 uint64_t reserved_13_63 : 51; 2114#endif 2115 } s; 2116 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx { 2117#ifdef __BIG_ENDIAN_BITFIELD 2118 uint64_t reserved_10_63 : 54; 2119 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 2120 regardless of the number of previous PREAMBLE 2121 nibbles. In this mode, PREAMBLE can be consumed 2122 by the HW so when PRE_ALIGN is set, PRE_FREE, 2123 PRE_STRP must be set for correct operation. 2124 PRE_CHK must be set to enable this and all 2125 PREAMBLE features. */ 2126 uint64_t pad_len : 1; /**< When set, disables the length check for non-min 2127 sized pkts with padding in the client data */ 2128 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 2129 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 2130 0 - 254 cycles of PREAMBLE followed by SFD 2131 PRE_FREE must be set if PRE_ALIGN is set. 2132 PRE_CHK must be set to enable this and all 2133 PREAMBLE features. */ 2134 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 2135 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 2136 Multicast address */ 2137 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 2138 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 2139 uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 2140 0=PREAMBLE+SFD is sent to core as part of frame 2141 1=PREAMBLE+SFD is dropped 2142 PRE_STRP must be set if PRE_ALIGN is set. 2143 PRE_CHK must be set to enable this and all 2144 PREAMBLE features. */ 2145 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 2146 to begin every frame. GMX checks that the 2147 PREAMBLE is sent correctly */ 2148#else 2149 uint64_t pre_chk : 1; 2150 uint64_t pre_strp : 1; 2151 uint64_t ctl_drp : 1; 2152 uint64_t ctl_bck : 1; 2153 uint64_t ctl_mcst : 1; 2154 uint64_t ctl_smac : 1; 2155 uint64_t pre_free : 1; 2156 uint64_t vlan_len : 1; 2157 uint64_t pad_len : 1; 2158 uint64_t pre_align : 1; 2159 uint64_t reserved_10_63 : 54; 2160#endif 2161 } cn52xx; 2162 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1; 2163 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx; 2164 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1; 2165 struct cvmx_agl_gmx_rxx_frm_ctl_s cn61xx; 2166 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx; 2167 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1; 2168 struct cvmx_agl_gmx_rxx_frm_ctl_s cn66xx; 2169 struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xx; 2170 struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xxp1; 2171}; 2172typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t; 2173 2174/** 2175 * cvmx_agl_gmx_rx#_frm_max 2176 * 2177 * AGL_GMX_RX_FRM_MAX = Frame Max length 2178 * 2179 * 2180 * Notes: 2181 * When changing the LEN field, be sure that LEN does not exceed 2182 * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that 2183 * are within the maximum length parameter to be rejected because they exceed 2184 * the AGL_GMX_RX_JABBER[CNT] limit. 2185 * 2186 * Notes: 2187 * 2188 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2189 */ 2190union cvmx_agl_gmx_rxx_frm_max { 2191 uint64_t u64; 2192 struct cvmx_agl_gmx_rxx_frm_max_s { 2193#ifdef __BIG_ENDIAN_BITFIELD 2194 uint64_t reserved_16_63 : 48; 2195 uint64_t len : 16; /**< Byte count for Max-sized frame check 2196 AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check 2197 for port n. 2198 If enabled, failing packets set the MAXERR 2199 interrupt and the MIX opcode is set to OVER_FCS 2200 (0x3, if packet has bad FCS) or OVER_ERR (0x4, if 2201 packet has good FCS). 2202 LEN <= AGL_GMX_RX_JABBER[CNT] */ 2203#else 2204 uint64_t len : 16; 2205 uint64_t reserved_16_63 : 48; 2206#endif 2207 } s; 2208 struct cvmx_agl_gmx_rxx_frm_max_s cn52xx; 2209 struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1; 2210 struct cvmx_agl_gmx_rxx_frm_max_s cn56xx; 2211 struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1; 2212 struct cvmx_agl_gmx_rxx_frm_max_s cn61xx; 2213 struct cvmx_agl_gmx_rxx_frm_max_s cn63xx; 2214 struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1; 2215 struct cvmx_agl_gmx_rxx_frm_max_s cn66xx; 2216 struct cvmx_agl_gmx_rxx_frm_max_s cn68xx; 2217 struct cvmx_agl_gmx_rxx_frm_max_s cn68xxp1; 2218}; 2219typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t; 2220 2221/** 2222 * cvmx_agl_gmx_rx#_frm_min 2223 * 2224 * AGL_GMX_RX_FRM_MIN = Frame Min length 2225 * 2226 * 2227 * Notes: 2228 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2229 * 2230 */ 2231union cvmx_agl_gmx_rxx_frm_min { 2232 uint64_t u64; 2233 struct cvmx_agl_gmx_rxx_frm_min_s { 2234#ifdef __BIG_ENDIAN_BITFIELD 2235 uint64_t reserved_16_63 : 48; 2236 uint64_t len : 16; /**< Byte count for Min-sized frame check 2237 AGL_GMX_RXn_FRM_CHK[MINERR] enables the check 2238 for port n. 2239 If enabled, failing packets set the MINERR 2240 interrupt and the MIX opcode is set to UNDER_FCS 2241 (0x6, if packet has bad FCS) or UNDER_ERR (0x8, 2242 if packet has good FCS). */ 2243#else 2244 uint64_t len : 16; 2245 uint64_t reserved_16_63 : 48; 2246#endif 2247 } s; 2248 struct cvmx_agl_gmx_rxx_frm_min_s cn52xx; 2249 struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1; 2250 struct cvmx_agl_gmx_rxx_frm_min_s cn56xx; 2251 struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1; 2252 struct cvmx_agl_gmx_rxx_frm_min_s cn61xx; 2253 struct cvmx_agl_gmx_rxx_frm_min_s cn63xx; 2254 struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1; 2255 struct cvmx_agl_gmx_rxx_frm_min_s cn66xx; 2256 struct cvmx_agl_gmx_rxx_frm_min_s cn68xx; 2257 struct cvmx_agl_gmx_rxx_frm_min_s cn68xxp1; 2258}; 2259typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t; 2260 2261/** 2262 * cvmx_agl_gmx_rx#_ifg 2263 * 2264 * AGL_GMX_RX_IFG = RX Min IFG 2265 * 2266 * 2267 * Notes: 2268 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2269 * 2270 */ 2271union cvmx_agl_gmx_rxx_ifg { 2272 uint64_t u64; 2273 struct cvmx_agl_gmx_rxx_ifg_s { 2274#ifdef __BIG_ENDIAN_BITFIELD 2275 uint64_t reserved_4_63 : 60; 2276 uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to 2277 determine IFGERR. Normally IFG is 96 bits. 2278 Note in some operating modes, IFG cycles can be 2279 inserted or removed in order to achieve clock rate 2280 adaptation. For these reasons, the default value 2281 is slightly conservative and does not check upto 2282 the full 96 bits of IFG. */ 2283#else 2284 uint64_t ifg : 4; 2285 uint64_t reserved_4_63 : 60; 2286#endif 2287 } s; 2288 struct cvmx_agl_gmx_rxx_ifg_s cn52xx; 2289 struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1; 2290 struct cvmx_agl_gmx_rxx_ifg_s cn56xx; 2291 struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1; 2292 struct cvmx_agl_gmx_rxx_ifg_s cn61xx; 2293 struct cvmx_agl_gmx_rxx_ifg_s cn63xx; 2294 struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1; 2295 struct cvmx_agl_gmx_rxx_ifg_s cn66xx; 2296 struct cvmx_agl_gmx_rxx_ifg_s cn68xx; 2297 struct cvmx_agl_gmx_rxx_ifg_s cn68xxp1; 2298}; 2299typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t; 2300 2301/** 2302 * cvmx_agl_gmx_rx#_int_en 2303 * 2304 * AGL_GMX_RX_INT_EN = Interrupt Enable 2305 * 2306 * 2307 * Notes: 2308 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2309 * 2310 */ 2311union cvmx_agl_gmx_rxx_int_en { 2312 uint64_t u64; 2313 struct cvmx_agl_gmx_rxx_int_en_s { 2314#ifdef __BIG_ENDIAN_BITFIELD 2315 uint64_t reserved_20_63 : 44; 2316 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 2317 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex | NS */ 2318 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed | NS */ 2319 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus | NS */ 2320 uint64_t ifgerr : 1; /**< Interframe Gap Violation */ 2321 uint64_t coldet : 1; /**< Collision Detection */ 2322 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 2323 uint64_t rsverr : 1; /**< Packet reserved opcodes */ 2324 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 2325 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ 2326 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */ 2327 uint64_t skperr : 1; /**< Skipper error */ 2328 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 2329 uint64_t lenerr : 1; /**< Frame was received with length error */ 2330 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2331 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2332 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2333 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 2334 uint64_t carext : 1; /**< Carrier extend error */ 2335 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 2336#else 2337 uint64_t minerr : 1; 2338 uint64_t carext : 1; 2339 uint64_t maxerr : 1; 2340 uint64_t jabber : 1; 2341 uint64_t fcserr : 1; 2342 uint64_t alnerr : 1; 2343 uint64_t lenerr : 1; 2344 uint64_t rcverr : 1; 2345 uint64_t skperr : 1; 2346 uint64_t niberr : 1; 2347 uint64_t ovrerr : 1; 2348 uint64_t pcterr : 1; 2349 uint64_t rsverr : 1; 2350 uint64_t falerr : 1; 2351 uint64_t coldet : 1; 2352 uint64_t ifgerr : 1; 2353 uint64_t phy_link : 1; 2354 uint64_t phy_spd : 1; 2355 uint64_t phy_dupx : 1; 2356 uint64_t pause_drp : 1; 2357 uint64_t reserved_20_63 : 44; 2358#endif 2359 } s; 2360 struct cvmx_agl_gmx_rxx_int_en_cn52xx { 2361#ifdef __BIG_ENDIAN_BITFIELD 2362 uint64_t reserved_20_63 : 44; 2363 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 2364 uint64_t reserved_16_18 : 3; 2365 uint64_t ifgerr : 1; /**< Interframe Gap Violation */ 2366 uint64_t coldet : 1; /**< Collision Detection */ 2367 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 2368 uint64_t rsverr : 1; /**< MII reserved opcodes */ 2369 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 2370 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ 2371 uint64_t reserved_9_9 : 1; 2372 uint64_t skperr : 1; /**< Skipper error */ 2373 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 2374 uint64_t lenerr : 1; /**< Frame was received with length error */ 2375 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2376 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2377 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2378 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 2379 uint64_t reserved_1_1 : 1; 2380 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 2381#else 2382 uint64_t minerr : 1; 2383 uint64_t reserved_1_1 : 1; 2384 uint64_t maxerr : 1; 2385 uint64_t jabber : 1; 2386 uint64_t fcserr : 1; 2387 uint64_t alnerr : 1; 2388 uint64_t lenerr : 1; 2389 uint64_t rcverr : 1; 2390 uint64_t skperr : 1; 2391 uint64_t reserved_9_9 : 1; 2392 uint64_t ovrerr : 1; 2393 uint64_t pcterr : 1; 2394 uint64_t rsverr : 1; 2395 uint64_t falerr : 1; 2396 uint64_t coldet : 1; 2397 uint64_t ifgerr : 1; 2398 uint64_t reserved_16_18 : 3; 2399 uint64_t pause_drp : 1; 2400 uint64_t reserved_20_63 : 44; 2401#endif 2402 } cn52xx; 2403 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1; 2404 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx; 2405 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1; 2406 struct cvmx_agl_gmx_rxx_int_en_s cn61xx; 2407 struct cvmx_agl_gmx_rxx_int_en_s cn63xx; 2408 struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1; 2409 struct cvmx_agl_gmx_rxx_int_en_s cn66xx; 2410 struct cvmx_agl_gmx_rxx_int_en_s cn68xx; 2411 struct cvmx_agl_gmx_rxx_int_en_s cn68xxp1; 2412}; 2413typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t; 2414 2415/** 2416 * cvmx_agl_gmx_rx#_int_reg 2417 * 2418 * AGL_GMX_RX_INT_REG = Interrupt Register 2419 * 2420 * 2421 * Notes: 2422 * (1) exceptions will only be raised to the control processor if the 2423 * corresponding bit in the AGL_GMX_RX_INT_EN register is set. 2424 * 2425 * (2) exception conditions 10:0 can also set the rcv/opcode in the received 2426 * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask 2427 * for configuring which conditions set the error. 2428 * 2429 * (3) in half duplex operation, the expectation is that collisions will appear 2430 * as MINERRs. 2431 * 2432 * (4) JABBER - An RX Jabber error indicates that a packet was received which 2433 * is longer than the maximum allowed packet as defined by the 2434 * system. GMX will truncate the packet at the JABBER count. 2435 * Failure to do so could lead to system instabilty. 2436 * 2437 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS > 2438 * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS 2439 * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED. 2440 * 2441 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN. 2442 * 2443 * (8) ALNERR - Indicates that the packet received was not an integer number of 2444 * bytes. If FCS checking is enabled, ALNERR will only assert if 2445 * the FCS is bad. If FCS checking is disabled, ALNERR will 2446 * assert in all non-integer frame cases. 2447 * 2448 * (9) Collisions - Collisions can only occur in half-duplex mode. A collision 2449 * is assumed by the receiver when the received 2450 * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR 2451 * 2452 * (A) LENERR - Length errors occur when the received packet does not match the 2453 * length field. LENERR is only checked for packets between 64 2454 * and 1500 bytes. For untagged frames, the length must exact 2455 * match. For tagged frames the length or length+4 must match. 2456 * 2457 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence. 2458 * Does not check the number of PREAMBLE cycles. 2459 * 2460 * (C) OVRERR - 2461 * 2462 * OVRERR is an architectural assertion check internal to GMX to 2463 * make sure no assumption was violated. In a correctly operating 2464 * system, this interrupt can never fire. 2465 * 2466 * GMX has an internal arbiter which selects which of 4 ports to 2467 * buffer in the main RX FIFO. If we normally buffer 8 bytes, 2468 * then each port will typically push a tick every 8 cycles - if 2469 * the packet interface is going as fast as possible. If there 2470 * are four ports, they push every two cycles. So that's the 2471 * assumption. That the inbound module will always be able to 2472 * consume the tick before another is produced. If that doesn't 2473 * happen - that's when OVRERR will assert. 2474 * 2475 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2476 */ 2477union cvmx_agl_gmx_rxx_int_reg { 2478 uint64_t u64; 2479 struct cvmx_agl_gmx_rxx_int_reg_s { 2480#ifdef __BIG_ENDIAN_BITFIELD 2481 uint64_t reserved_20_63 : 44; 2482 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 2483 uint64_t phy_dupx : 1; /**< Change in the RGMII inbound LinkDuplex | NS */ 2484 uint64_t phy_spd : 1; /**< Change in the RGMII inbound LinkSpeed | NS */ 2485 uint64_t phy_link : 1; /**< Change in the RGMII inbound LinkStatus | NS */ 2486 uint64_t ifgerr : 1; /**< Interframe Gap Violation 2487 Does not necessarily indicate a failure */ 2488 uint64_t coldet : 1; /**< Collision Detection */ 2489 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 2490 uint64_t rsverr : 1; /**< Packet reserved opcodes */ 2491 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 2492 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 2493 This interrupt should never assert */ 2494 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */ 2495 uint64_t skperr : 1; /**< Skipper error */ 2496 uint64_t rcverr : 1; /**< Frame was received with Packet Data reception error */ 2497 uint64_t lenerr : 1; /**< Frame was received with length error */ 2498 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2499 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2500 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2501 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 2502 uint64_t carext : 1; /**< Carrier extend error */ 2503 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 2504#else 2505 uint64_t minerr : 1; 2506 uint64_t carext : 1; 2507 uint64_t maxerr : 1; 2508 uint64_t jabber : 1; 2509 uint64_t fcserr : 1; 2510 uint64_t alnerr : 1; 2511 uint64_t lenerr : 1; 2512 uint64_t rcverr : 1; 2513 uint64_t skperr : 1; 2514 uint64_t niberr : 1; 2515 uint64_t ovrerr : 1; 2516 uint64_t pcterr : 1; 2517 uint64_t rsverr : 1; 2518 uint64_t falerr : 1; 2519 uint64_t coldet : 1; 2520 uint64_t ifgerr : 1; 2521 uint64_t phy_link : 1; 2522 uint64_t phy_spd : 1; 2523 uint64_t phy_dupx : 1; 2524 uint64_t pause_drp : 1; 2525 uint64_t reserved_20_63 : 44; 2526#endif 2527 } s; 2528 struct cvmx_agl_gmx_rxx_int_reg_cn52xx { 2529#ifdef __BIG_ENDIAN_BITFIELD 2530 uint64_t reserved_20_63 : 44; 2531 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 2532 uint64_t reserved_16_18 : 3; 2533 uint64_t ifgerr : 1; /**< Interframe Gap Violation 2534 Does not necessarily indicate a failure */ 2535 uint64_t coldet : 1; /**< Collision Detection */ 2536 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 2537 uint64_t rsverr : 1; /**< MII reserved opcodes */ 2538 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 2539 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 2540 This interrupt should never assert */ 2541 uint64_t reserved_9_9 : 1; 2542 uint64_t skperr : 1; /**< Skipper error */ 2543 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */ 2544 uint64_t lenerr : 1; /**< Frame was received with length error */ 2545 uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2546 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2547 uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2548 uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 2549 uint64_t reserved_1_1 : 1; 2550 uint64_t minerr : 1; /**< Frame was received with length < min_length */ 2551#else 2552 uint64_t minerr : 1; 2553 uint64_t reserved_1_1 : 1; 2554 uint64_t maxerr : 1; 2555 uint64_t jabber : 1; 2556 uint64_t fcserr : 1; 2557 uint64_t alnerr : 1; 2558 uint64_t lenerr : 1; 2559 uint64_t rcverr : 1; 2560 uint64_t skperr : 1; 2561 uint64_t reserved_9_9 : 1; 2562 uint64_t ovrerr : 1; 2563 uint64_t pcterr : 1; 2564 uint64_t rsverr : 1; 2565 uint64_t falerr : 1; 2566 uint64_t coldet : 1; 2567 uint64_t ifgerr : 1; 2568 uint64_t reserved_16_18 : 3; 2569 uint64_t pause_drp : 1; 2570 uint64_t reserved_20_63 : 44; 2571#endif 2572 } cn52xx; 2573 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1; 2574 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx; 2575 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1; 2576 struct cvmx_agl_gmx_rxx_int_reg_s cn61xx; 2577 struct cvmx_agl_gmx_rxx_int_reg_s cn63xx; 2578 struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1; 2579 struct cvmx_agl_gmx_rxx_int_reg_s cn66xx; 2580 struct cvmx_agl_gmx_rxx_int_reg_s cn68xx; 2581 struct cvmx_agl_gmx_rxx_int_reg_s cn68xxp1; 2582}; 2583typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t; 2584 2585/** 2586 * cvmx_agl_gmx_rx#_jabber 2587 * 2588 * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate 2589 * 2590 * 2591 * Notes: 2592 * CNT must be 8-byte aligned such that CNT[2:0] == 0 2593 * 2594 * The packet that will be sent to the packet input logic will have an 2595 * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and 2596 * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is 2597 * defined as... 2598 * 2599 * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8) 2600 * 2601 * Be sure the CNT field value is at least as large as the 2602 * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause 2603 * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected 2604 * because they exceed the CNT limit. 2605 * 2606 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2607 */ 2608union cvmx_agl_gmx_rxx_jabber { 2609 uint64_t u64; 2610 struct cvmx_agl_gmx_rxx_jabber_s { 2611#ifdef __BIG_ENDIAN_BITFIELD 2612 uint64_t reserved_16_63 : 48; 2613 uint64_t cnt : 16; /**< Byte count for jabber check 2614 Failing packets set the JABBER interrupt and are 2615 optionally sent with opcode==JABBER 2616 GMX will truncate the packet to CNT bytes 2617 CNT >= AGL_GMX_RX_FRM_MAX[LEN] */ 2618#else 2619 uint64_t cnt : 16; 2620 uint64_t reserved_16_63 : 48; 2621#endif 2622 } s; 2623 struct cvmx_agl_gmx_rxx_jabber_s cn52xx; 2624 struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1; 2625 struct cvmx_agl_gmx_rxx_jabber_s cn56xx; 2626 struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1; 2627 struct cvmx_agl_gmx_rxx_jabber_s cn61xx; 2628 struct cvmx_agl_gmx_rxx_jabber_s cn63xx; 2629 struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1; 2630 struct cvmx_agl_gmx_rxx_jabber_s cn66xx; 2631 struct cvmx_agl_gmx_rxx_jabber_s cn68xx; 2632 struct cvmx_agl_gmx_rxx_jabber_s cn68xxp1; 2633}; 2634typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t; 2635 2636/** 2637 * cvmx_agl_gmx_rx#_pause_drop_time 2638 * 2639 * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition 2640 * 2641 * 2642 * Notes: 2643 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2644 * 2645 */ 2646union cvmx_agl_gmx_rxx_pause_drop_time { 2647 uint64_t u64; 2648 struct cvmx_agl_gmx_rxx_pause_drop_time_s { 2649#ifdef __BIG_ENDIAN_BITFIELD 2650 uint64_t reserved_16_63 : 48; 2651 uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */ 2652#else 2653 uint64_t status : 16; 2654 uint64_t reserved_16_63 : 48; 2655#endif 2656 } s; 2657 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx; 2658 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1; 2659 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx; 2660 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1; 2661 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn61xx; 2662 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx; 2663 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1; 2664 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn66xx; 2665 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xx; 2666 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xxp1; 2667}; 2668typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t; 2669 2670/** 2671 * cvmx_agl_gmx_rx#_rx_inbnd 2672 * 2673 * AGL_GMX_RX_INBND = RGMII InBand Link Status 2674 * 2675 * 2676 * Notes: 2677 * These fields are only valid if the attached PHY is operating in RGMII mode 2678 * and supports the optional in-band status (see section 3.4.1 of the RGMII 2679 * specification, version 1.3 for more information). 2680 * 2681 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2682 */ 2683union cvmx_agl_gmx_rxx_rx_inbnd { 2684 uint64_t u64; 2685 struct cvmx_agl_gmx_rxx_rx_inbnd_s { 2686#ifdef __BIG_ENDIAN_BITFIELD 2687 uint64_t reserved_4_63 : 60; 2688 uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex | NS 2689 0=half-duplex 2690 1=full-duplex */ 2691 uint64_t speed : 2; /**< RGMII Inbound LinkSpeed | NS 2692 00=2.5MHz 2693 01=25MHz 2694 10=125MHz 2695 11=Reserved */ 2696 uint64_t status : 1; /**< RGMII Inbound LinkStatus | NS 2697 0=down 2698 1=up */ 2699#else 2700 uint64_t status : 1; 2701 uint64_t speed : 2; 2702 uint64_t duplex : 1; 2703 uint64_t reserved_4_63 : 60; 2704#endif 2705 } s; 2706 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn61xx; 2707 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx; 2708 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1; 2709 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn66xx; 2710 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xx; 2711 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xxp1; 2712}; 2713typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t; 2714 2715/** 2716 * cvmx_agl_gmx_rx#_stats_ctl 2717 * 2718 * AGL_GMX_RX_STATS_CTL = RX Stats Control register 2719 * 2720 * 2721 * Notes: 2722 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 2723 * 2724 */ 2725union cvmx_agl_gmx_rxx_stats_ctl { 2726 uint64_t u64; 2727 struct cvmx_agl_gmx_rxx_stats_ctl_s { 2728#ifdef __BIG_ENDIAN_BITFIELD 2729 uint64_t reserved_1_63 : 63; 2730 uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */ 2731#else 2732 uint64_t rd_clr : 1; 2733 uint64_t reserved_1_63 : 63; 2734#endif 2735 } s; 2736 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx; 2737 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1; 2738 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx; 2739 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1; 2740 struct cvmx_agl_gmx_rxx_stats_ctl_s cn61xx; 2741 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx; 2742 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1; 2743 struct cvmx_agl_gmx_rxx_stats_ctl_s cn66xx; 2744 struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xx; 2745 struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xxp1; 2746}; 2747typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t; 2748 2749/** 2750 * cvmx_agl_gmx_rx#_stats_octs 2751 * 2752 * Notes: 2753 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2754 * - Counters will wrap 2755 * - Not reset when MIX*_CTL[RESET] is set to 1. 2756 */ 2757union cvmx_agl_gmx_rxx_stats_octs { 2758 uint64_t u64; 2759 struct cvmx_agl_gmx_rxx_stats_octs_s { 2760#ifdef __BIG_ENDIAN_BITFIELD 2761 uint64_t reserved_48_63 : 16; 2762 uint64_t cnt : 48; /**< Octet count of received good packets */ 2763#else 2764 uint64_t cnt : 48; 2765 uint64_t reserved_48_63 : 16; 2766#endif 2767 } s; 2768 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx; 2769 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1; 2770 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx; 2771 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1; 2772 struct cvmx_agl_gmx_rxx_stats_octs_s cn61xx; 2773 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx; 2774 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1; 2775 struct cvmx_agl_gmx_rxx_stats_octs_s cn66xx; 2776 struct cvmx_agl_gmx_rxx_stats_octs_s cn68xx; 2777 struct cvmx_agl_gmx_rxx_stats_octs_s cn68xxp1; 2778}; 2779typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t; 2780 2781/** 2782 * cvmx_agl_gmx_rx#_stats_octs_ctl 2783 * 2784 * Notes: 2785 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2786 * - Counters will wrap 2787 * - Not reset when MIX*_CTL[RESET] is set to 1. 2788 */ 2789union cvmx_agl_gmx_rxx_stats_octs_ctl { 2790 uint64_t u64; 2791 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s { 2792#ifdef __BIG_ENDIAN_BITFIELD 2793 uint64_t reserved_48_63 : 16; 2794 uint64_t cnt : 48; /**< Octet count of received pause packets */ 2795#else 2796 uint64_t cnt : 48; 2797 uint64_t reserved_48_63 : 16; 2798#endif 2799 } s; 2800 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx; 2801 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1; 2802 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx; 2803 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1; 2804 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn61xx; 2805 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx; 2806 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1; 2807 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn66xx; 2808 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xx; 2809 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xxp1; 2810}; 2811typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t; 2812 2813/** 2814 * cvmx_agl_gmx_rx#_stats_octs_dmac 2815 * 2816 * Notes: 2817 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2818 * - Counters will wrap 2819 * - Not reset when MIX*_CTL[RESET] is set to 1. 2820 */ 2821union cvmx_agl_gmx_rxx_stats_octs_dmac { 2822 uint64_t u64; 2823 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s { 2824#ifdef __BIG_ENDIAN_BITFIELD 2825 uint64_t reserved_48_63 : 16; 2826 uint64_t cnt : 48; /**< Octet count of filtered dmac packets */ 2827#else 2828 uint64_t cnt : 48; 2829 uint64_t reserved_48_63 : 16; 2830#endif 2831 } s; 2832 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx; 2833 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1; 2834 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx; 2835 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1; 2836 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn61xx; 2837 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx; 2838 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1; 2839 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn66xx; 2840 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xx; 2841 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xxp1; 2842}; 2843typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t; 2844 2845/** 2846 * cvmx_agl_gmx_rx#_stats_octs_drp 2847 * 2848 * Notes: 2849 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2850 * - Counters will wrap 2851 * - Not reset when MIX*_CTL[RESET] is set to 1. 2852 */ 2853union cvmx_agl_gmx_rxx_stats_octs_drp { 2854 uint64_t u64; 2855 struct cvmx_agl_gmx_rxx_stats_octs_drp_s { 2856#ifdef __BIG_ENDIAN_BITFIELD 2857 uint64_t reserved_48_63 : 16; 2858 uint64_t cnt : 48; /**< Octet count of dropped packets */ 2859#else 2860 uint64_t cnt : 48; 2861 uint64_t reserved_48_63 : 16; 2862#endif 2863 } s; 2864 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx; 2865 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1; 2866 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx; 2867 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1; 2868 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn61xx; 2869 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx; 2870 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1; 2871 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn66xx; 2872 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xx; 2873 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xxp1; 2874}; 2875typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t; 2876 2877/** 2878 * cvmx_agl_gmx_rx#_stats_pkts 2879 * 2880 * AGL_GMX_RX_STATS_PKTS 2881 * 2882 * Count of good received packets - packets that are not recognized as PAUSE 2883 * packets, dropped due the DMAC filter, dropped due FIFO full status, or 2884 * have any other OPCODE (FCS, Length, etc). 2885 * 2886 * Notes: 2887 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2888 * - Counters will wrap 2889 * - Not reset when MIX*_CTL[RESET] is set to 1. 2890 */ 2891union cvmx_agl_gmx_rxx_stats_pkts { 2892 uint64_t u64; 2893 struct cvmx_agl_gmx_rxx_stats_pkts_s { 2894#ifdef __BIG_ENDIAN_BITFIELD 2895 uint64_t reserved_32_63 : 32; 2896 uint64_t cnt : 32; /**< Count of received good packets */ 2897#else 2898 uint64_t cnt : 32; 2899 uint64_t reserved_32_63 : 32; 2900#endif 2901 } s; 2902 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx; 2903 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1; 2904 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx; 2905 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1; 2906 struct cvmx_agl_gmx_rxx_stats_pkts_s cn61xx; 2907 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx; 2908 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1; 2909 struct cvmx_agl_gmx_rxx_stats_pkts_s cn66xx; 2910 struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xx; 2911 struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xxp1; 2912}; 2913typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t; 2914 2915/** 2916 * cvmx_agl_gmx_rx#_stats_pkts_bad 2917 * 2918 * AGL_GMX_RX_STATS_PKTS_BAD 2919 * 2920 * Count of all packets received with some error that were not dropped 2921 * either due to the dmac filter or lack of room in the receive FIFO. 2922 * 2923 * Notes: 2924 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2925 * - Counters will wrap 2926 * - Not reset when MIX*_CTL[RESET] is set to 1. 2927 */ 2928union cvmx_agl_gmx_rxx_stats_pkts_bad { 2929 uint64_t u64; 2930 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s { 2931#ifdef __BIG_ENDIAN_BITFIELD 2932 uint64_t reserved_32_63 : 32; 2933 uint64_t cnt : 32; /**< Count of bad packets */ 2934#else 2935 uint64_t cnt : 32; 2936 uint64_t reserved_32_63 : 32; 2937#endif 2938 } s; 2939 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx; 2940 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1; 2941 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx; 2942 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1; 2943 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn61xx; 2944 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx; 2945 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1; 2946 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn66xx; 2947 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xx; 2948 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xxp1; 2949}; 2950typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t; 2951 2952/** 2953 * cvmx_agl_gmx_rx#_stats_pkts_ctl 2954 * 2955 * AGL_GMX_RX_STATS_PKTS_CTL 2956 * 2957 * Count of all packets received that were recognized as Flow Control or 2958 * PAUSE packets. PAUSE packets with any kind of error are counted in 2959 * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or 2960 * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count 2961 * increments regardless of whether the packet is dropped. Pause packets 2962 * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac 2963 * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here. 2964 * 2965 * Notes: 2966 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 2967 * - Counters will wrap 2968 * - Not reset when MIX*_CTL[RESET] is set to 1. 2969 */ 2970union cvmx_agl_gmx_rxx_stats_pkts_ctl { 2971 uint64_t u64; 2972 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s { 2973#ifdef __BIG_ENDIAN_BITFIELD 2974 uint64_t reserved_32_63 : 32; 2975 uint64_t cnt : 32; /**< Count of received pause packets */ 2976#else 2977 uint64_t cnt : 32; 2978 uint64_t reserved_32_63 : 32; 2979#endif 2980 } s; 2981 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx; 2982 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1; 2983 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx; 2984 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1; 2985 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn61xx; 2986 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx; 2987 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1; 2988 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn66xx; 2989 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xx; 2990 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xxp1; 2991}; 2992typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t; 2993 2994/** 2995 * cvmx_agl_gmx_rx#_stats_pkts_dmac 2996 * 2997 * AGL_GMX_RX_STATS_PKTS_DMAC 2998 * 2999 * Count of all packets received that were dropped by the dmac filter. 3000 * Packets that match the DMAC will be dropped and counted here regardless 3001 * of if they were bad packets. These packets will never be counted in 3002 * AGL_GMX_RX_STATS_PKTS. 3003 * 3004 * Some packets that were not able to satisify the DECISION_CNT may not 3005 * actually be dropped by Octeon, but they will be counted here as if they 3006 * were dropped. 3007 * 3008 * Notes: 3009 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 3010 * - Counters will wrap 3011 * - Not reset when MIX*_CTL[RESET] is set to 1. 3012 */ 3013union cvmx_agl_gmx_rxx_stats_pkts_dmac { 3014 uint64_t u64; 3015 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s { 3016#ifdef __BIG_ENDIAN_BITFIELD 3017 uint64_t reserved_32_63 : 32; 3018 uint64_t cnt : 32; /**< Count of filtered dmac packets */ 3019#else 3020 uint64_t cnt : 32; 3021 uint64_t reserved_32_63 : 32; 3022#endif 3023 } s; 3024 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx; 3025 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1; 3026 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx; 3027 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1; 3028 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn61xx; 3029 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx; 3030 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1; 3031 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn66xx; 3032 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xx; 3033 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xxp1; 3034}; 3035typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t; 3036 3037/** 3038 * cvmx_agl_gmx_rx#_stats_pkts_drp 3039 * 3040 * AGL_GMX_RX_STATS_PKTS_DRP 3041 * 3042 * Count of all packets received that were dropped due to a full receive 3043 * FIFO. This counts good and bad packets received - all packets dropped by 3044 * the FIFO. It does not count packets dropped by the dmac or pause packet 3045 * filters. 3046 * 3047 * Notes: 3048 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set 3049 * - Counters will wrap 3050 * - Not reset when MIX*_CTL[RESET] is set to 1. 3051 */ 3052union cvmx_agl_gmx_rxx_stats_pkts_drp { 3053 uint64_t u64; 3054 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s { 3055#ifdef __BIG_ENDIAN_BITFIELD 3056 uint64_t reserved_32_63 : 32; 3057 uint64_t cnt : 32; /**< Count of dropped packets */ 3058#else 3059 uint64_t cnt : 32; 3060 uint64_t reserved_32_63 : 32; 3061#endif 3062 } s; 3063 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx; 3064 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1; 3065 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx; 3066 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1; 3067 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn61xx; 3068 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx; 3069 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1; 3070 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn66xx; 3071 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xx; 3072 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xxp1; 3073}; 3074typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t; 3075 3076/** 3077 * cvmx_agl_gmx_rx#_udd_skp 3078 * 3079 * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data 3080 * 3081 * 3082 * Notes: 3083 * (1) The skip bytes are part of the packet and will be sent down the NCB 3084 * packet interface and will be handled by PKI. 3085 * 3086 * (2) The system can determine if the UDD bytes are included in the FCS check 3087 * by using the FCSSEL field - if the FCS check is enabled. 3088 * 3089 * (3) Assume that the preamble/sfd is always at the start of the frame - even 3090 * before UDD bytes. In most cases, there will be no preamble in these 3091 * cases since it will be MII to MII communication without a PHY 3092 * involved. 3093 * 3094 * (4) We can still do address filtering and control packet filtering is the 3095 * user desires. 3096 * 3097 * (5) UDD_SKP must be 0 in half-duplex operation unless 3098 * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set, 3099 * then UDD_SKP will normally be 8. 3100 * 3101 * (6) In all cases, the UDD bytes will be sent down the packet interface as 3102 * part of the packet. The UDD bytes are never stripped from the actual 3103 * packet. 3104 * 3105 * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero 3106 * 3107 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3108 */ 3109union cvmx_agl_gmx_rxx_udd_skp { 3110 uint64_t u64; 3111 struct cvmx_agl_gmx_rxx_udd_skp_s { 3112#ifdef __BIG_ENDIAN_BITFIELD 3113 uint64_t reserved_9_63 : 55; 3114 uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation 3115 0 = all skip bytes are included in FCS 3116 1 = the skip bytes are not included in FCS */ 3117 uint64_t reserved_7_7 : 1; 3118 uint64_t len : 7; /**< Amount of User-defined data before the start of 3119 the L2 data. Zero means L2 comes first. 3120 Max value is 64. */ 3121#else 3122 uint64_t len : 7; 3123 uint64_t reserved_7_7 : 1; 3124 uint64_t fcssel : 1; 3125 uint64_t reserved_9_63 : 55; 3126#endif 3127 } s; 3128 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx; 3129 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1; 3130 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx; 3131 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1; 3132 struct cvmx_agl_gmx_rxx_udd_skp_s cn61xx; 3133 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx; 3134 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1; 3135 struct cvmx_agl_gmx_rxx_udd_skp_s cn66xx; 3136 struct cvmx_agl_gmx_rxx_udd_skp_s cn68xx; 3137 struct cvmx_agl_gmx_rxx_udd_skp_s cn68xxp1; 3138}; 3139typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t; 3140 3141/** 3142 * cvmx_agl_gmx_rx_bp_drop# 3143 * 3144 * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop 3145 * 3146 * 3147 * Notes: 3148 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3149 * 3150 */ 3151union cvmx_agl_gmx_rx_bp_dropx { 3152 uint64_t u64; 3153 struct cvmx_agl_gmx_rx_bp_dropx_s { 3154#ifdef __BIG_ENDIAN_BITFIELD 3155 uint64_t reserved_6_63 : 58; 3156 uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO. 3157 When the FIFO exceeds this count, packets will 3158 be dropped and not buffered. 3159 MARK should typically be programmed to 2. 3160 Failure to program correctly can lead to system 3161 instability. */ 3162#else 3163 uint64_t mark : 6; 3164 uint64_t reserved_6_63 : 58; 3165#endif 3166 } s; 3167 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx; 3168 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1; 3169 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx; 3170 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1; 3171 struct cvmx_agl_gmx_rx_bp_dropx_s cn61xx; 3172 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx; 3173 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1; 3174 struct cvmx_agl_gmx_rx_bp_dropx_s cn66xx; 3175 struct cvmx_agl_gmx_rx_bp_dropx_s cn68xx; 3176 struct cvmx_agl_gmx_rx_bp_dropx_s cn68xxp1; 3177}; 3178typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t; 3179 3180/** 3181 * cvmx_agl_gmx_rx_bp_off# 3182 * 3183 * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop 3184 * 3185 * 3186 * Notes: 3187 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3188 * 3189 */ 3190union cvmx_agl_gmx_rx_bp_offx { 3191 uint64_t u64; 3192 struct cvmx_agl_gmx_rx_bp_offx_s { 3193#ifdef __BIG_ENDIAN_BITFIELD 3194 uint64_t reserved_6_63 : 58; 3195 uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */ 3196#else 3197 uint64_t mark : 6; 3198 uint64_t reserved_6_63 : 58; 3199#endif 3200 } s; 3201 struct cvmx_agl_gmx_rx_bp_offx_s cn52xx; 3202 struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1; 3203 struct cvmx_agl_gmx_rx_bp_offx_s cn56xx; 3204 struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1; 3205 struct cvmx_agl_gmx_rx_bp_offx_s cn61xx; 3206 struct cvmx_agl_gmx_rx_bp_offx_s cn63xx; 3207 struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1; 3208 struct cvmx_agl_gmx_rx_bp_offx_s cn66xx; 3209 struct cvmx_agl_gmx_rx_bp_offx_s cn68xx; 3210 struct cvmx_agl_gmx_rx_bp_offx_s cn68xxp1; 3211}; 3212typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t; 3213 3214/** 3215 * cvmx_agl_gmx_rx_bp_on# 3216 * 3217 * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure 3218 * 3219 * 3220 * Notes: 3221 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3222 * 3223 */ 3224union cvmx_agl_gmx_rx_bp_onx { 3225 uint64_t u64; 3226 struct cvmx_agl_gmx_rx_bp_onx_s { 3227#ifdef __BIG_ENDIAN_BITFIELD 3228 uint64_t reserved_9_63 : 55; 3229 uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */ 3230#else 3231 uint64_t mark : 9; 3232 uint64_t reserved_9_63 : 55; 3233#endif 3234 } s; 3235 struct cvmx_agl_gmx_rx_bp_onx_s cn52xx; 3236 struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1; 3237 struct cvmx_agl_gmx_rx_bp_onx_s cn56xx; 3238 struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1; 3239 struct cvmx_agl_gmx_rx_bp_onx_s cn61xx; 3240 struct cvmx_agl_gmx_rx_bp_onx_s cn63xx; 3241 struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1; 3242 struct cvmx_agl_gmx_rx_bp_onx_s cn66xx; 3243 struct cvmx_agl_gmx_rx_bp_onx_s cn68xx; 3244 struct cvmx_agl_gmx_rx_bp_onx_s cn68xxp1; 3245}; 3246typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t; 3247 3248/** 3249 * cvmx_agl_gmx_rx_prt_info 3250 * 3251 * AGL_GMX_RX_PRT_INFO = state information for the ports 3252 * 3253 * 3254 * Notes: 3255 * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1. 3256 * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1. 3257 */ 3258union cvmx_agl_gmx_rx_prt_info { 3259 uint64_t u64; 3260 struct cvmx_agl_gmx_rx_prt_info_s { 3261#ifdef __BIG_ENDIAN_BITFIELD 3262 uint64_t reserved_18_63 : 46; 3263 uint64_t drop : 2; /**< Port indication that data was dropped */ 3264 uint64_t reserved_2_15 : 14; 3265 uint64_t commit : 2; /**< Port indication that SOP was accepted */ 3266#else 3267 uint64_t commit : 2; 3268 uint64_t reserved_2_15 : 14; 3269 uint64_t drop : 2; 3270 uint64_t reserved_18_63 : 46; 3271#endif 3272 } s; 3273 struct cvmx_agl_gmx_rx_prt_info_s cn52xx; 3274 struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1; 3275 struct cvmx_agl_gmx_rx_prt_info_cn56xx { 3276#ifdef __BIG_ENDIAN_BITFIELD 3277 uint64_t reserved_17_63 : 47; 3278 uint64_t drop : 1; /**< Port indication that data was dropped */ 3279 uint64_t reserved_1_15 : 15; 3280 uint64_t commit : 1; /**< Port indication that SOP was accepted */ 3281#else 3282 uint64_t commit : 1; 3283 uint64_t reserved_1_15 : 15; 3284 uint64_t drop : 1; 3285 uint64_t reserved_17_63 : 47; 3286#endif 3287 } cn56xx; 3288 struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1; 3289 struct cvmx_agl_gmx_rx_prt_info_s cn61xx; 3290 struct cvmx_agl_gmx_rx_prt_info_s cn63xx; 3291 struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1; 3292 struct cvmx_agl_gmx_rx_prt_info_s cn66xx; 3293 struct cvmx_agl_gmx_rx_prt_info_s cn68xx; 3294 struct cvmx_agl_gmx_rx_prt_info_s cn68xxp1; 3295}; 3296typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t; 3297 3298/** 3299 * cvmx_agl_gmx_rx_tx_status 3300 * 3301 * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status 3302 * 3303 * 3304 * Notes: 3305 * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1. 3306 * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1. 3307 */ 3308union cvmx_agl_gmx_rx_tx_status { 3309 uint64_t u64; 3310 struct cvmx_agl_gmx_rx_tx_status_s { 3311#ifdef __BIG_ENDIAN_BITFIELD 3312 uint64_t reserved_6_63 : 58; 3313 uint64_t tx : 2; /**< Transmit data since last read */ 3314 uint64_t reserved_2_3 : 2; 3315 uint64_t rx : 2; /**< Receive data since last read */ 3316#else 3317 uint64_t rx : 2; 3318 uint64_t reserved_2_3 : 2; 3319 uint64_t tx : 2; 3320 uint64_t reserved_6_63 : 58; 3321#endif 3322 } s; 3323 struct cvmx_agl_gmx_rx_tx_status_s cn52xx; 3324 struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1; 3325 struct cvmx_agl_gmx_rx_tx_status_cn56xx { 3326#ifdef __BIG_ENDIAN_BITFIELD 3327 uint64_t reserved_5_63 : 59; 3328 uint64_t tx : 1; /**< Transmit data since last read */ 3329 uint64_t reserved_1_3 : 3; 3330 uint64_t rx : 1; /**< Receive data since last read */ 3331#else 3332 uint64_t rx : 1; 3333 uint64_t reserved_1_3 : 3; 3334 uint64_t tx : 1; 3335 uint64_t reserved_5_63 : 59; 3336#endif 3337 } cn56xx; 3338 struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1; 3339 struct cvmx_agl_gmx_rx_tx_status_s cn61xx; 3340 struct cvmx_agl_gmx_rx_tx_status_s cn63xx; 3341 struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1; 3342 struct cvmx_agl_gmx_rx_tx_status_s cn66xx; 3343 struct cvmx_agl_gmx_rx_tx_status_s cn68xx; 3344 struct cvmx_agl_gmx_rx_tx_status_s cn68xxp1; 3345}; 3346typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t; 3347 3348/** 3349 * cvmx_agl_gmx_smac# 3350 * 3351 * AGL_GMX_SMAC = Packet SMAC 3352 * 3353 * 3354 * Notes: 3355 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3356 * 3357 */ 3358union cvmx_agl_gmx_smacx { 3359 uint64_t u64; 3360 struct cvmx_agl_gmx_smacx_s { 3361#ifdef __BIG_ENDIAN_BITFIELD 3362 uint64_t reserved_48_63 : 16; 3363 uint64_t smac : 48; /**< The SMAC field is used for generating and 3364 accepting Control Pause packets */ 3365#else 3366 uint64_t smac : 48; 3367 uint64_t reserved_48_63 : 16; 3368#endif 3369 } s; 3370 struct cvmx_agl_gmx_smacx_s cn52xx; 3371 struct cvmx_agl_gmx_smacx_s cn52xxp1; 3372 struct cvmx_agl_gmx_smacx_s cn56xx; 3373 struct cvmx_agl_gmx_smacx_s cn56xxp1; 3374 struct cvmx_agl_gmx_smacx_s cn61xx; 3375 struct cvmx_agl_gmx_smacx_s cn63xx; 3376 struct cvmx_agl_gmx_smacx_s cn63xxp1; 3377 struct cvmx_agl_gmx_smacx_s cn66xx; 3378 struct cvmx_agl_gmx_smacx_s cn68xx; 3379 struct cvmx_agl_gmx_smacx_s cn68xxp1; 3380}; 3381typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t; 3382 3383/** 3384 * cvmx_agl_gmx_stat_bp 3385 * 3386 * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation 3387 * 3388 * 3389 * Notes: 3390 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 3391 * 3392 * 3393 * 3394 * It has no relationship with the TX FIFO per se. The TX engine sends packets 3395 * from PKO and upon completion, sends a command to the TX stats block for an 3396 * update based on the packet size. The stats operation can take a few cycles - 3397 * normally not enough to be visible considering the 64B min packet size that is 3398 * ethernet convention. 3399 * 3400 * In the rare case in which SW attempted to schedule really, really, small packets 3401 * or the sclk (6xxx) is running ass-slow, then the stats updates may not happen in 3402 * real time and can back up the TX engine. 3403 * 3404 * This counter is the number of cycles in which the TX engine was stalled. In 3405 * normal operation, it should always be zeros. 3406 */ 3407union cvmx_agl_gmx_stat_bp { 3408 uint64_t u64; 3409 struct cvmx_agl_gmx_stat_bp_s { 3410#ifdef __BIG_ENDIAN_BITFIELD 3411 uint64_t reserved_17_63 : 47; 3412 uint64_t bp : 1; /**< Current TX stats BP state 3413 When the TX stats machine cannot update the stats 3414 registers quickly enough, the machine has the 3415 ability to BP TX datapath. This is a rare event 3416 and will not occur in normal operation. 3417 0 = no backpressure is applied 3418 1 = backpressure is applied to TX datapath to 3419 allow stat update operations to complete */ 3420 uint64_t cnt : 16; /**< Number of cycles that BP has been asserted 3421 Saturating counter */ 3422#else 3423 uint64_t cnt : 16; 3424 uint64_t bp : 1; 3425 uint64_t reserved_17_63 : 47; 3426#endif 3427 } s; 3428 struct cvmx_agl_gmx_stat_bp_s cn52xx; 3429 struct cvmx_agl_gmx_stat_bp_s cn52xxp1; 3430 struct cvmx_agl_gmx_stat_bp_s cn56xx; 3431 struct cvmx_agl_gmx_stat_bp_s cn56xxp1; 3432 struct cvmx_agl_gmx_stat_bp_s cn61xx; 3433 struct cvmx_agl_gmx_stat_bp_s cn63xx; 3434 struct cvmx_agl_gmx_stat_bp_s cn63xxp1; 3435 struct cvmx_agl_gmx_stat_bp_s cn66xx; 3436 struct cvmx_agl_gmx_stat_bp_s cn68xx; 3437 struct cvmx_agl_gmx_stat_bp_s cn68xxp1; 3438}; 3439typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t; 3440 3441/** 3442 * cvmx_agl_gmx_tx#_append 3443 * 3444 * AGL_GMX_TX_APPEND = Packet TX Append Control 3445 * 3446 * 3447 * Notes: 3448 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3449 * 3450 */ 3451union cvmx_agl_gmx_txx_append { 3452 uint64_t u64; 3453 struct cvmx_agl_gmx_txx_append_s { 3454#ifdef __BIG_ENDIAN_BITFIELD 3455 uint64_t reserved_4_63 : 60; 3456 uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet 3457 when FCS is clear. Pause packets are normally 3458 padded to 60 bytes. If 3459 AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then 3460 FORCE_FCS will not be used. */ 3461 uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */ 3462 uint64_t pad : 1; /**< Append PAD bytes such that min sized */ 3463 uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */ 3464#else 3465 uint64_t preamble : 1; 3466 uint64_t pad : 1; 3467 uint64_t fcs : 1; 3468 uint64_t force_fcs : 1; 3469 uint64_t reserved_4_63 : 60; 3470#endif 3471 } s; 3472 struct cvmx_agl_gmx_txx_append_s cn52xx; 3473 struct cvmx_agl_gmx_txx_append_s cn52xxp1; 3474 struct cvmx_agl_gmx_txx_append_s cn56xx; 3475 struct cvmx_agl_gmx_txx_append_s cn56xxp1; 3476 struct cvmx_agl_gmx_txx_append_s cn61xx; 3477 struct cvmx_agl_gmx_txx_append_s cn63xx; 3478 struct cvmx_agl_gmx_txx_append_s cn63xxp1; 3479 struct cvmx_agl_gmx_txx_append_s cn66xx; 3480 struct cvmx_agl_gmx_txx_append_s cn68xx; 3481 struct cvmx_agl_gmx_txx_append_s cn68xxp1; 3482}; 3483typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t; 3484 3485/** 3486 * cvmx_agl_gmx_tx#_clk 3487 * 3488 * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register 3489 * 3490 * 3491 * Notes: 3492 * Normal Programming Values: 3493 * (1) RGMII, 1000Mbs (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1 3494 * (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5 3495 * (3) MII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1 3496 * 3497 * RGMII Example: 3498 * Given a 125MHz PLL reference clock... 3499 * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1) 3500 * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5) 3501 * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50) 3502 * 3503 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3504 */ 3505union cvmx_agl_gmx_txx_clk { 3506 uint64_t u64; 3507 struct cvmx_agl_gmx_txx_clk_s { 3508#ifdef __BIG_ENDIAN_BITFIELD 3509 uint64_t reserved_6_63 : 58; 3510 uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency | NS 3511 TXC(period) = 3512 rgm_ref_clk(period)*CLK_CNT */ 3513#else 3514 uint64_t clk_cnt : 6; 3515 uint64_t reserved_6_63 : 58; 3516#endif 3517 } s; 3518 struct cvmx_agl_gmx_txx_clk_s cn61xx; 3519 struct cvmx_agl_gmx_txx_clk_s cn63xx; 3520 struct cvmx_agl_gmx_txx_clk_s cn63xxp1; 3521 struct cvmx_agl_gmx_txx_clk_s cn66xx; 3522 struct cvmx_agl_gmx_txx_clk_s cn68xx; 3523 struct cvmx_agl_gmx_txx_clk_s cn68xxp1; 3524}; 3525typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t; 3526 3527/** 3528 * cvmx_agl_gmx_tx#_ctl 3529 * 3530 * AGL_GMX_TX_CTL = TX Control register 3531 * 3532 * 3533 * Notes: 3534 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3535 * 3536 */ 3537union cvmx_agl_gmx_txx_ctl { 3538 uint64_t u64; 3539 struct cvmx_agl_gmx_txx_ctl_s { 3540#ifdef __BIG_ENDIAN_BITFIELD 3541 uint64_t reserved_2_63 : 62; 3542 uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats 3543 and interrupts */ 3544 uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats 3545 and interrupts */ 3546#else 3547 uint64_t xscol_en : 1; 3548 uint64_t xsdef_en : 1; 3549 uint64_t reserved_2_63 : 62; 3550#endif 3551 } s; 3552 struct cvmx_agl_gmx_txx_ctl_s cn52xx; 3553 struct cvmx_agl_gmx_txx_ctl_s cn52xxp1; 3554 struct cvmx_agl_gmx_txx_ctl_s cn56xx; 3555 struct cvmx_agl_gmx_txx_ctl_s cn56xxp1; 3556 struct cvmx_agl_gmx_txx_ctl_s cn61xx; 3557 struct cvmx_agl_gmx_txx_ctl_s cn63xx; 3558 struct cvmx_agl_gmx_txx_ctl_s cn63xxp1; 3559 struct cvmx_agl_gmx_txx_ctl_s cn66xx; 3560 struct cvmx_agl_gmx_txx_ctl_s cn68xx; 3561 struct cvmx_agl_gmx_txx_ctl_s cn68xxp1; 3562}; 3563typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t; 3564 3565/** 3566 * cvmx_agl_gmx_tx#_min_pkt 3567 * 3568 * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size) 3569 * 3570 * 3571 * Notes: 3572 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3573 * 3574 */ 3575union cvmx_agl_gmx_txx_min_pkt { 3576 uint64_t u64; 3577 struct cvmx_agl_gmx_txx_min_pkt_s { 3578#ifdef __BIG_ENDIAN_BITFIELD 3579 uint64_t reserved_8_63 : 56; 3580 uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied 3581 Padding is only appened when 3582 AGL_GMX_TX_APPEND[PAD] for the coresponding packet 3583 port is set. Packets will be padded to 3584 MIN_SIZE+1 The reset value will pad to 60 bytes. */ 3585#else 3586 uint64_t min_size : 8; 3587 uint64_t reserved_8_63 : 56; 3588#endif 3589 } s; 3590 struct cvmx_agl_gmx_txx_min_pkt_s cn52xx; 3591 struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1; 3592 struct cvmx_agl_gmx_txx_min_pkt_s cn56xx; 3593 struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1; 3594 struct cvmx_agl_gmx_txx_min_pkt_s cn61xx; 3595 struct cvmx_agl_gmx_txx_min_pkt_s cn63xx; 3596 struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1; 3597 struct cvmx_agl_gmx_txx_min_pkt_s cn66xx; 3598 struct cvmx_agl_gmx_txx_min_pkt_s cn68xx; 3599 struct cvmx_agl_gmx_txx_min_pkt_s cn68xxp1; 3600}; 3601typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t; 3602 3603/** 3604 * cvmx_agl_gmx_tx#_pause_pkt_interval 3605 * 3606 * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent 3607 * 3608 * 3609 * Notes: 3610 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and 3611 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system 3612 * designer. It is suggested that TIME be much greater than INTERVAL and 3613 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE 3614 * count and then when the backpressure condition is lifted, a PAUSE packet 3615 * with TIME==0 will be sent indicating that Octane is ready for additional 3616 * data. 3617 * 3618 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is 3619 * suggested that TIME and INTERVAL are programmed such that they satisify the 3620 * following rule... 3621 * 3622 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) 3623 * 3624 * where largest_pkt_size is that largest packet that the system can send 3625 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size 3626 * of the PAUSE packet (normally 64B). 3627 * 3628 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3629 */ 3630union cvmx_agl_gmx_txx_pause_pkt_interval { 3631 uint64_t u64; 3632 struct cvmx_agl_gmx_txx_pause_pkt_interval_s { 3633#ifdef __BIG_ENDIAN_BITFIELD 3634 uint64_t reserved_16_63 : 48; 3635 uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512) 3636 bit-times. 3637 Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME 3638 INTERVAL=0, will only send a single PAUSE packet 3639 for each backpressure event */ 3640#else 3641 uint64_t interval : 16; 3642 uint64_t reserved_16_63 : 48; 3643#endif 3644 } s; 3645 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx; 3646 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1; 3647 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx; 3648 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1; 3649 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn61xx; 3650 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx; 3651 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1; 3652 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn66xx; 3653 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xx; 3654 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xxp1; 3655}; 3656typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t; 3657 3658/** 3659 * cvmx_agl_gmx_tx#_pause_pkt_time 3660 * 3661 * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field 3662 * 3663 * 3664 * Notes: 3665 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and 3666 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system 3667 * designer. It is suggested that TIME be much greater than INTERVAL and 3668 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE 3669 * count and then when the backpressure condition is lifted, a PAUSE packet 3670 * with TIME==0 will be sent indicating that Octane is ready for additional 3671 * data. 3672 * 3673 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is 3674 * suggested that TIME and INTERVAL are programmed such that they satisify the 3675 * following rule... 3676 * 3677 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) 3678 * 3679 * where largest_pkt_size is that largest packet that the system can send 3680 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size 3681 * of the PAUSE packet (normally 64B). 3682 * 3683 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3684 */ 3685union cvmx_agl_gmx_txx_pause_pkt_time { 3686 uint64_t u64; 3687 struct cvmx_agl_gmx_txx_pause_pkt_time_s { 3688#ifdef __BIG_ENDIAN_BITFIELD 3689 uint64_t reserved_16_63 : 48; 3690 uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts 3691 pause_time is in 512 bit-times 3692 Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */ 3693#else 3694 uint64_t time : 16; 3695 uint64_t reserved_16_63 : 48; 3696#endif 3697 } s; 3698 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx; 3699 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1; 3700 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx; 3701 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1; 3702 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn61xx; 3703 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx; 3704 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1; 3705 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn66xx; 3706 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xx; 3707 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xxp1; 3708}; 3709typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t; 3710 3711/** 3712 * cvmx_agl_gmx_tx#_pause_togo 3713 * 3714 * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure 3715 * 3716 * 3717 * Notes: 3718 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3719 * 3720 */ 3721union cvmx_agl_gmx_txx_pause_togo { 3722 uint64_t u64; 3723 struct cvmx_agl_gmx_txx_pause_togo_s { 3724#ifdef __BIG_ENDIAN_BITFIELD 3725 uint64_t reserved_16_63 : 48; 3726 uint64_t time : 16; /**< Amount of time remaining to backpressure */ 3727#else 3728 uint64_t time : 16; 3729 uint64_t reserved_16_63 : 48; 3730#endif 3731 } s; 3732 struct cvmx_agl_gmx_txx_pause_togo_s cn52xx; 3733 struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1; 3734 struct cvmx_agl_gmx_txx_pause_togo_s cn56xx; 3735 struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1; 3736 struct cvmx_agl_gmx_txx_pause_togo_s cn61xx; 3737 struct cvmx_agl_gmx_txx_pause_togo_s cn63xx; 3738 struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1; 3739 struct cvmx_agl_gmx_txx_pause_togo_s cn66xx; 3740 struct cvmx_agl_gmx_txx_pause_togo_s cn68xx; 3741 struct cvmx_agl_gmx_txx_pause_togo_s cn68xxp1; 3742}; 3743typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t; 3744 3745/** 3746 * cvmx_agl_gmx_tx#_pause_zero 3747 * 3748 * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure 3749 * 3750 * 3751 * Notes: 3752 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3753 * 3754 */ 3755union cvmx_agl_gmx_txx_pause_zero { 3756 uint64_t u64; 3757 struct cvmx_agl_gmx_txx_pause_zero_s { 3758#ifdef __BIG_ENDIAN_BITFIELD 3759 uint64_t reserved_1_63 : 63; 3760 uint64_t send : 1; /**< When backpressure condition clear, send PAUSE 3761 packet with pause_time of zero to enable the 3762 channel */ 3763#else 3764 uint64_t send : 1; 3765 uint64_t reserved_1_63 : 63; 3766#endif 3767 } s; 3768 struct cvmx_agl_gmx_txx_pause_zero_s cn52xx; 3769 struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1; 3770 struct cvmx_agl_gmx_txx_pause_zero_s cn56xx; 3771 struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1; 3772 struct cvmx_agl_gmx_txx_pause_zero_s cn61xx; 3773 struct cvmx_agl_gmx_txx_pause_zero_s cn63xx; 3774 struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1; 3775 struct cvmx_agl_gmx_txx_pause_zero_s cn66xx; 3776 struct cvmx_agl_gmx_txx_pause_zero_s cn68xx; 3777 struct cvmx_agl_gmx_txx_pause_zero_s cn68xxp1; 3778}; 3779typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t; 3780 3781/** 3782 * cvmx_agl_gmx_tx#_soft_pause 3783 * 3784 * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause 3785 * 3786 * 3787 * Notes: 3788 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 3789 * 3790 */ 3791union cvmx_agl_gmx_txx_soft_pause { 3792 uint64_t u64; 3793 struct cvmx_agl_gmx_txx_soft_pause_s { 3794#ifdef __BIG_ENDIAN_BITFIELD 3795 uint64_t reserved_16_63 : 48; 3796 uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times 3797 for full-duplex operation only */ 3798#else 3799 uint64_t time : 16; 3800 uint64_t reserved_16_63 : 48; 3801#endif 3802 } s; 3803 struct cvmx_agl_gmx_txx_soft_pause_s cn52xx; 3804 struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1; 3805 struct cvmx_agl_gmx_txx_soft_pause_s cn56xx; 3806 struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1; 3807 struct cvmx_agl_gmx_txx_soft_pause_s cn61xx; 3808 struct cvmx_agl_gmx_txx_soft_pause_s cn63xx; 3809 struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1; 3810 struct cvmx_agl_gmx_txx_soft_pause_s cn66xx; 3811 struct cvmx_agl_gmx_txx_soft_pause_s cn68xx; 3812 struct cvmx_agl_gmx_txx_soft_pause_s cn68xxp1; 3813}; 3814typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t; 3815 3816/** 3817 * cvmx_agl_gmx_tx#_stat0 3818 * 3819 * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL 3820 * 3821 * 3822 * Notes: 3823 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 3824 * - Counters will wrap 3825 * - Not reset when MIX*_CTL[RESET] is set to 1. 3826 */ 3827union cvmx_agl_gmx_txx_stat0 { 3828 uint64_t u64; 3829 struct cvmx_agl_gmx_txx_stat0_s { 3830#ifdef __BIG_ENDIAN_BITFIELD 3831 uint64_t xsdef : 32; /**< Number of packets dropped (never successfully 3832 sent) due to excessive deferal */ 3833 uint64_t xscol : 32; /**< Number of packets dropped (never successfully 3834 sent) due to excessive collision. Defined by 3835 AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */ 3836#else 3837 uint64_t xscol : 32; 3838 uint64_t xsdef : 32; 3839#endif 3840 } s; 3841 struct cvmx_agl_gmx_txx_stat0_s cn52xx; 3842 struct cvmx_agl_gmx_txx_stat0_s cn52xxp1; 3843 struct cvmx_agl_gmx_txx_stat0_s cn56xx; 3844 struct cvmx_agl_gmx_txx_stat0_s cn56xxp1; 3845 struct cvmx_agl_gmx_txx_stat0_s cn61xx; 3846 struct cvmx_agl_gmx_txx_stat0_s cn63xx; 3847 struct cvmx_agl_gmx_txx_stat0_s cn63xxp1; 3848 struct cvmx_agl_gmx_txx_stat0_s cn66xx; 3849 struct cvmx_agl_gmx_txx_stat0_s cn68xx; 3850 struct cvmx_agl_gmx_txx_stat0_s cn68xxp1; 3851}; 3852typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t; 3853 3854/** 3855 * cvmx_agl_gmx_tx#_stat1 3856 * 3857 * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL 3858 * 3859 * 3860 * Notes: 3861 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 3862 * - Counters will wrap 3863 * - Not reset when MIX*_CTL[RESET] is set to 1. 3864 */ 3865union cvmx_agl_gmx_txx_stat1 { 3866 uint64_t u64; 3867 struct cvmx_agl_gmx_txx_stat1_s { 3868#ifdef __BIG_ENDIAN_BITFIELD 3869 uint64_t scol : 32; /**< Number of packets sent with a single collision */ 3870 uint64_t mcol : 32; /**< Number of packets sent with multiple collisions 3871 but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */ 3872#else 3873 uint64_t mcol : 32; 3874 uint64_t scol : 32; 3875#endif 3876 } s; 3877 struct cvmx_agl_gmx_txx_stat1_s cn52xx; 3878 struct cvmx_agl_gmx_txx_stat1_s cn52xxp1; 3879 struct cvmx_agl_gmx_txx_stat1_s cn56xx; 3880 struct cvmx_agl_gmx_txx_stat1_s cn56xxp1; 3881 struct cvmx_agl_gmx_txx_stat1_s cn61xx; 3882 struct cvmx_agl_gmx_txx_stat1_s cn63xx; 3883 struct cvmx_agl_gmx_txx_stat1_s cn63xxp1; 3884 struct cvmx_agl_gmx_txx_stat1_s cn66xx; 3885 struct cvmx_agl_gmx_txx_stat1_s cn68xx; 3886 struct cvmx_agl_gmx_txx_stat1_s cn68xxp1; 3887}; 3888typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t; 3889 3890/** 3891 * cvmx_agl_gmx_tx#_stat2 3892 * 3893 * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS 3894 * 3895 * 3896 * Notes: 3897 * - Octect counts are the sum of all data transmitted on the wire including 3898 * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect 3899 * counts do not include PREAMBLE byte or EXTEND cycles. 3900 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 3901 * - Counters will wrap 3902 * - Not reset when MIX*_CTL[RESET] is set to 1. 3903 */ 3904union cvmx_agl_gmx_txx_stat2 { 3905 uint64_t u64; 3906 struct cvmx_agl_gmx_txx_stat2_s { 3907#ifdef __BIG_ENDIAN_BITFIELD 3908 uint64_t reserved_48_63 : 16; 3909 uint64_t octs : 48; /**< Number of total octets sent on the interface. 3910 Does not count octets from frames that were 3911 truncated due to collisions in halfdup mode. */ 3912#else 3913 uint64_t octs : 48; 3914 uint64_t reserved_48_63 : 16; 3915#endif 3916 } s; 3917 struct cvmx_agl_gmx_txx_stat2_s cn52xx; 3918 struct cvmx_agl_gmx_txx_stat2_s cn52xxp1; 3919 struct cvmx_agl_gmx_txx_stat2_s cn56xx; 3920 struct cvmx_agl_gmx_txx_stat2_s cn56xxp1; 3921 struct cvmx_agl_gmx_txx_stat2_s cn61xx; 3922 struct cvmx_agl_gmx_txx_stat2_s cn63xx; 3923 struct cvmx_agl_gmx_txx_stat2_s cn63xxp1; 3924 struct cvmx_agl_gmx_txx_stat2_s cn66xx; 3925 struct cvmx_agl_gmx_txx_stat2_s cn68xx; 3926 struct cvmx_agl_gmx_txx_stat2_s cn68xxp1; 3927}; 3928typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t; 3929 3930/** 3931 * cvmx_agl_gmx_tx#_stat3 3932 * 3933 * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS 3934 * 3935 * 3936 * Notes: 3937 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 3938 * - Counters will wrap 3939 * - Not reset when MIX*_CTL[RESET] is set to 1. 3940 */ 3941union cvmx_agl_gmx_txx_stat3 { 3942 uint64_t u64; 3943 struct cvmx_agl_gmx_txx_stat3_s { 3944#ifdef __BIG_ENDIAN_BITFIELD 3945 uint64_t reserved_32_63 : 32; 3946 uint64_t pkts : 32; /**< Number of total frames sent on the interface. 3947 Does not count frames that were truncated due to 3948 collisions in halfdup mode. */ 3949#else 3950 uint64_t pkts : 32; 3951 uint64_t reserved_32_63 : 32; 3952#endif 3953 } s; 3954 struct cvmx_agl_gmx_txx_stat3_s cn52xx; 3955 struct cvmx_agl_gmx_txx_stat3_s cn52xxp1; 3956 struct cvmx_agl_gmx_txx_stat3_s cn56xx; 3957 struct cvmx_agl_gmx_txx_stat3_s cn56xxp1; 3958 struct cvmx_agl_gmx_txx_stat3_s cn61xx; 3959 struct cvmx_agl_gmx_txx_stat3_s cn63xx; 3960 struct cvmx_agl_gmx_txx_stat3_s cn63xxp1; 3961 struct cvmx_agl_gmx_txx_stat3_s cn66xx; 3962 struct cvmx_agl_gmx_txx_stat3_s cn68xx; 3963 struct cvmx_agl_gmx_txx_stat3_s cn68xxp1; 3964}; 3965typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t; 3966 3967/** 3968 * cvmx_agl_gmx_tx#_stat4 3969 * 3970 * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64) 3971 * 3972 * 3973 * Notes: 3974 * - Packet length is the sum of all data transmitted on the wire for the given 3975 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 3976 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 3977 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 3978 * - Counters will wrap 3979 * - Not reset when MIX*_CTL[RESET] is set to 1. 3980 */ 3981union cvmx_agl_gmx_txx_stat4 { 3982 uint64_t u64; 3983 struct cvmx_agl_gmx_txx_stat4_s { 3984#ifdef __BIG_ENDIAN_BITFIELD 3985 uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */ 3986 uint64_t hist0 : 32; /**< Number of packets sent with an octet count 3987 of < 64. */ 3988#else 3989 uint64_t hist0 : 32; 3990 uint64_t hist1 : 32; 3991#endif 3992 } s; 3993 struct cvmx_agl_gmx_txx_stat4_s cn52xx; 3994 struct cvmx_agl_gmx_txx_stat4_s cn52xxp1; 3995 struct cvmx_agl_gmx_txx_stat4_s cn56xx; 3996 struct cvmx_agl_gmx_txx_stat4_s cn56xxp1; 3997 struct cvmx_agl_gmx_txx_stat4_s cn61xx; 3998 struct cvmx_agl_gmx_txx_stat4_s cn63xx; 3999 struct cvmx_agl_gmx_txx_stat4_s cn63xxp1; 4000 struct cvmx_agl_gmx_txx_stat4_s cn66xx; 4001 struct cvmx_agl_gmx_txx_stat4_s cn68xx; 4002 struct cvmx_agl_gmx_txx_stat4_s cn68xxp1; 4003}; 4004typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t; 4005 4006/** 4007 * cvmx_agl_gmx_tx#_stat5 4008 * 4009 * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127) 4010 * 4011 * 4012 * Notes: 4013 * - Packet length is the sum of all data transmitted on the wire for the given 4014 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 4015 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 4016 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 4017 * - Counters will wrap 4018 * - Not reset when MIX*_CTL[RESET] is set to 1. 4019 */ 4020union cvmx_agl_gmx_txx_stat5 { 4021 uint64_t u64; 4022 struct cvmx_agl_gmx_txx_stat5_s { 4023#ifdef __BIG_ENDIAN_BITFIELD 4024 uint64_t hist3 : 32; /**< Number of packets sent with an octet count of 4025 128 - 255. */ 4026 uint64_t hist2 : 32; /**< Number of packets sent with an octet count of 4027 65 - 127. */ 4028#else 4029 uint64_t hist2 : 32; 4030 uint64_t hist3 : 32; 4031#endif 4032 } s; 4033 struct cvmx_agl_gmx_txx_stat5_s cn52xx; 4034 struct cvmx_agl_gmx_txx_stat5_s cn52xxp1; 4035 struct cvmx_agl_gmx_txx_stat5_s cn56xx; 4036 struct cvmx_agl_gmx_txx_stat5_s cn56xxp1; 4037 struct cvmx_agl_gmx_txx_stat5_s cn61xx; 4038 struct cvmx_agl_gmx_txx_stat5_s cn63xx; 4039 struct cvmx_agl_gmx_txx_stat5_s cn63xxp1; 4040 struct cvmx_agl_gmx_txx_stat5_s cn66xx; 4041 struct cvmx_agl_gmx_txx_stat5_s cn68xx; 4042 struct cvmx_agl_gmx_txx_stat5_s cn68xxp1; 4043}; 4044typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t; 4045 4046/** 4047 * cvmx_agl_gmx_tx#_stat6 4048 * 4049 * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511) 4050 * 4051 * 4052 * Notes: 4053 * - Packet length is the sum of all data transmitted on the wire for the given 4054 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 4055 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 4056 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 4057 * - Counters will wrap 4058 * - Not reset when MIX*_CTL[RESET] is set to 1. 4059 */ 4060union cvmx_agl_gmx_txx_stat6 { 4061 uint64_t u64; 4062 struct cvmx_agl_gmx_txx_stat6_s { 4063#ifdef __BIG_ENDIAN_BITFIELD 4064 uint64_t hist5 : 32; /**< Number of packets sent with an octet count of 4065 512 - 1023. */ 4066 uint64_t hist4 : 32; /**< Number of packets sent with an octet count of 4067 256 - 511. */ 4068#else 4069 uint64_t hist4 : 32; 4070 uint64_t hist5 : 32; 4071#endif 4072 } s; 4073 struct cvmx_agl_gmx_txx_stat6_s cn52xx; 4074 struct cvmx_agl_gmx_txx_stat6_s cn52xxp1; 4075 struct cvmx_agl_gmx_txx_stat6_s cn56xx; 4076 struct cvmx_agl_gmx_txx_stat6_s cn56xxp1; 4077 struct cvmx_agl_gmx_txx_stat6_s cn61xx; 4078 struct cvmx_agl_gmx_txx_stat6_s cn63xx; 4079 struct cvmx_agl_gmx_txx_stat6_s cn63xxp1; 4080 struct cvmx_agl_gmx_txx_stat6_s cn66xx; 4081 struct cvmx_agl_gmx_txx_stat6_s cn68xx; 4082 struct cvmx_agl_gmx_txx_stat6_s cn68xxp1; 4083}; 4084typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t; 4085 4086/** 4087 * cvmx_agl_gmx_tx#_stat7 4088 * 4089 * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518) 4090 * 4091 * 4092 * Notes: 4093 * - Packet length is the sum of all data transmitted on the wire for the given 4094 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 4095 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 4096 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 4097 * - Counters will wrap 4098 * - Not reset when MIX*_CTL[RESET] is set to 1. 4099 */ 4100union cvmx_agl_gmx_txx_stat7 { 4101 uint64_t u64; 4102 struct cvmx_agl_gmx_txx_stat7_s { 4103#ifdef __BIG_ENDIAN_BITFIELD 4104 uint64_t hist7 : 32; /**< Number of packets sent with an octet count 4105 of > 1518. */ 4106 uint64_t hist6 : 32; /**< Number of packets sent with an octet count of 4107 1024 - 1518. */ 4108#else 4109 uint64_t hist6 : 32; 4110 uint64_t hist7 : 32; 4111#endif 4112 } s; 4113 struct cvmx_agl_gmx_txx_stat7_s cn52xx; 4114 struct cvmx_agl_gmx_txx_stat7_s cn52xxp1; 4115 struct cvmx_agl_gmx_txx_stat7_s cn56xx; 4116 struct cvmx_agl_gmx_txx_stat7_s cn56xxp1; 4117 struct cvmx_agl_gmx_txx_stat7_s cn61xx; 4118 struct cvmx_agl_gmx_txx_stat7_s cn63xx; 4119 struct cvmx_agl_gmx_txx_stat7_s cn63xxp1; 4120 struct cvmx_agl_gmx_txx_stat7_s cn66xx; 4121 struct cvmx_agl_gmx_txx_stat7_s cn68xx; 4122 struct cvmx_agl_gmx_txx_stat7_s cn68xxp1; 4123}; 4124typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t; 4125 4126/** 4127 * cvmx_agl_gmx_tx#_stat8 4128 * 4129 * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST 4130 * 4131 * 4132 * Notes: 4133 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 4134 * - Counters will wrap 4135 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the 4136 * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet 4137 * as per the 802.3 frame definition. If the system requires additional data 4138 * before the L2 header, then the MCST and BCST counters may not reflect 4139 * reality and should be ignored by software. 4140 * - Not reset when MIX*_CTL[RESET] is set to 1. 4141 */ 4142union cvmx_agl_gmx_txx_stat8 { 4143 uint64_t u64; 4144 struct cvmx_agl_gmx_txx_stat8_s { 4145#ifdef __BIG_ENDIAN_BITFIELD 4146 uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC. 4147 Does not include BCST packets. */ 4148 uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC. 4149 Does not include MCST packets. */ 4150#else 4151 uint64_t bcst : 32; 4152 uint64_t mcst : 32; 4153#endif 4154 } s; 4155 struct cvmx_agl_gmx_txx_stat8_s cn52xx; 4156 struct cvmx_agl_gmx_txx_stat8_s cn52xxp1; 4157 struct cvmx_agl_gmx_txx_stat8_s cn56xx; 4158 struct cvmx_agl_gmx_txx_stat8_s cn56xxp1; 4159 struct cvmx_agl_gmx_txx_stat8_s cn61xx; 4160 struct cvmx_agl_gmx_txx_stat8_s cn63xx; 4161 struct cvmx_agl_gmx_txx_stat8_s cn63xxp1; 4162 struct cvmx_agl_gmx_txx_stat8_s cn66xx; 4163 struct cvmx_agl_gmx_txx_stat8_s cn68xx; 4164 struct cvmx_agl_gmx_txx_stat8_s cn68xxp1; 4165}; 4166typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t; 4167 4168/** 4169 * cvmx_agl_gmx_tx#_stat9 4170 * 4171 * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL 4172 * 4173 * 4174 * Notes: 4175 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set 4176 * - Counters will wrap 4177 * - Not reset when MIX*_CTL[RESET] is set to 1. 4178 */ 4179union cvmx_agl_gmx_txx_stat9 { 4180 uint64_t u64; 4181 struct cvmx_agl_gmx_txx_stat9_s { 4182#ifdef __BIG_ENDIAN_BITFIELD 4183 uint64_t undflw : 32; /**< Number of underflow packets */ 4184 uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control) 4185 generated by GMX. It does not include control 4186 packets forwarded or generated by the PP's. */ 4187#else 4188 uint64_t ctl : 32; 4189 uint64_t undflw : 32; 4190#endif 4191 } s; 4192 struct cvmx_agl_gmx_txx_stat9_s cn52xx; 4193 struct cvmx_agl_gmx_txx_stat9_s cn52xxp1; 4194 struct cvmx_agl_gmx_txx_stat9_s cn56xx; 4195 struct cvmx_agl_gmx_txx_stat9_s cn56xxp1; 4196 struct cvmx_agl_gmx_txx_stat9_s cn61xx; 4197 struct cvmx_agl_gmx_txx_stat9_s cn63xx; 4198 struct cvmx_agl_gmx_txx_stat9_s cn63xxp1; 4199 struct cvmx_agl_gmx_txx_stat9_s cn66xx; 4200 struct cvmx_agl_gmx_txx_stat9_s cn68xx; 4201 struct cvmx_agl_gmx_txx_stat9_s cn68xxp1; 4202}; 4203typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t; 4204 4205/** 4206 * cvmx_agl_gmx_tx#_stats_ctl 4207 * 4208 * AGL_GMX_TX_STATS_CTL = TX Stats Control register 4209 * 4210 * 4211 * Notes: 4212 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 4213 * 4214 */ 4215union cvmx_agl_gmx_txx_stats_ctl { 4216 uint64_t u64; 4217 struct cvmx_agl_gmx_txx_stats_ctl_s { 4218#ifdef __BIG_ENDIAN_BITFIELD 4219 uint64_t reserved_1_63 : 63; 4220 uint64_t rd_clr : 1; /**< Stats registers will clear on reads */ 4221#else 4222 uint64_t rd_clr : 1; 4223 uint64_t reserved_1_63 : 63; 4224#endif 4225 } s; 4226 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx; 4227 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1; 4228 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx; 4229 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1; 4230 struct cvmx_agl_gmx_txx_stats_ctl_s cn61xx; 4231 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx; 4232 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1; 4233 struct cvmx_agl_gmx_txx_stats_ctl_s cn66xx; 4234 struct cvmx_agl_gmx_txx_stats_ctl_s cn68xx; 4235 struct cvmx_agl_gmx_txx_stats_ctl_s cn68xxp1; 4236}; 4237typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t; 4238 4239/** 4240 * cvmx_agl_gmx_tx#_thresh 4241 * 4242 * AGL_GMX_TX_THRESH = Packet TX Threshold 4243 * 4244 * 4245 * Notes: 4246 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1. 4247 * 4248 */ 4249union cvmx_agl_gmx_txx_thresh { 4250 uint64_t u64; 4251 struct cvmx_agl_gmx_txx_thresh_s { 4252#ifdef __BIG_ENDIAN_BITFIELD 4253 uint64_t reserved_6_63 : 58; 4254 uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO 4255 before sending on the packet interface 4256 This register should be large enough to prevent 4257 underflow on the packet interface and must never 4258 be set below 4. This register cannot exceed the 4259 the TX FIFO depth which is 128, 8B entries. */ 4260#else 4261 uint64_t cnt : 6; 4262 uint64_t reserved_6_63 : 58; 4263#endif 4264 } s; 4265 struct cvmx_agl_gmx_txx_thresh_s cn52xx; 4266 struct cvmx_agl_gmx_txx_thresh_s cn52xxp1; 4267 struct cvmx_agl_gmx_txx_thresh_s cn56xx; 4268 struct cvmx_agl_gmx_txx_thresh_s cn56xxp1; 4269 struct cvmx_agl_gmx_txx_thresh_s cn61xx; 4270 struct cvmx_agl_gmx_txx_thresh_s cn63xx; 4271 struct cvmx_agl_gmx_txx_thresh_s cn63xxp1; 4272 struct cvmx_agl_gmx_txx_thresh_s cn66xx; 4273 struct cvmx_agl_gmx_txx_thresh_s cn68xx; 4274 struct cvmx_agl_gmx_txx_thresh_s cn68xxp1; 4275}; 4276typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t; 4277 4278/** 4279 * cvmx_agl_gmx_tx_bp 4280 * 4281 * AGL_GMX_TX_BP = Packet TX BackPressure Register 4282 * 4283 * 4284 * Notes: 4285 * BP[0] will be reset when MIX0_CTL[RESET] is set to 1. 4286 * BP[1] will be reset when MIX1_CTL[RESET] is set to 1. 4287 */ 4288union cvmx_agl_gmx_tx_bp { 4289 uint64_t u64; 4290 struct cvmx_agl_gmx_tx_bp_s { 4291#ifdef __BIG_ENDIAN_BITFIELD 4292 uint64_t reserved_2_63 : 62; 4293 uint64_t bp : 2; /**< Port BackPressure status 4294 0=Port is available 4295 1=Port should be back pressured */ 4296#else 4297 uint64_t bp : 2; 4298 uint64_t reserved_2_63 : 62; 4299#endif 4300 } s; 4301 struct cvmx_agl_gmx_tx_bp_s cn52xx; 4302 struct cvmx_agl_gmx_tx_bp_s cn52xxp1; 4303 struct cvmx_agl_gmx_tx_bp_cn56xx { 4304#ifdef __BIG_ENDIAN_BITFIELD 4305 uint64_t reserved_1_63 : 63; 4306 uint64_t bp : 1; /**< Port BackPressure status 4307 0=Port is available 4308 1=Port should be back pressured */ 4309#else 4310 uint64_t bp : 1; 4311 uint64_t reserved_1_63 : 63; 4312#endif 4313 } cn56xx; 4314 struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1; 4315 struct cvmx_agl_gmx_tx_bp_s cn61xx; 4316 struct cvmx_agl_gmx_tx_bp_s cn63xx; 4317 struct cvmx_agl_gmx_tx_bp_s cn63xxp1; 4318 struct cvmx_agl_gmx_tx_bp_s cn66xx; 4319 struct cvmx_agl_gmx_tx_bp_s cn68xx; 4320 struct cvmx_agl_gmx_tx_bp_s cn68xxp1; 4321}; 4322typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t; 4323 4324/** 4325 * cvmx_agl_gmx_tx_col_attempt 4326 * 4327 * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame 4328 * 4329 * 4330 * Notes: 4331 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4332 * 4333 */ 4334union cvmx_agl_gmx_tx_col_attempt { 4335 uint64_t u64; 4336 struct cvmx_agl_gmx_tx_col_attempt_s { 4337#ifdef __BIG_ENDIAN_BITFIELD 4338 uint64_t reserved_5_63 : 59; 4339 uint64_t limit : 5; /**< Collision Attempts */ 4340#else 4341 uint64_t limit : 5; 4342 uint64_t reserved_5_63 : 59; 4343#endif 4344 } s; 4345 struct cvmx_agl_gmx_tx_col_attempt_s cn52xx; 4346 struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1; 4347 struct cvmx_agl_gmx_tx_col_attempt_s cn56xx; 4348 struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1; 4349 struct cvmx_agl_gmx_tx_col_attempt_s cn61xx; 4350 struct cvmx_agl_gmx_tx_col_attempt_s cn63xx; 4351 struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1; 4352 struct cvmx_agl_gmx_tx_col_attempt_s cn66xx; 4353 struct cvmx_agl_gmx_tx_col_attempt_s cn68xx; 4354 struct cvmx_agl_gmx_tx_col_attempt_s cn68xxp1; 4355}; 4356typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t; 4357 4358/** 4359 * cvmx_agl_gmx_tx_ifg 4360 * 4361 * Common 4362 * 4363 * 4364 * AGL_GMX_TX_IFG = Packet TX Interframe Gap 4365 * 4366 * Notes: 4367 * Notes: 4368 * * Programming IFG1 and IFG2. 4369 * 4370 * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must 4371 * be in the range of 1-8, IFG2 must be in the range of 4-12, and the 4372 * IFG1+IFG2 sum must be 12. 4373 * 4374 * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must 4375 * be in the range of 1-11, IFG2 must be in the range of 1-11, and the 4376 * IFG1+IFG2 sum must be 12. 4377 * 4378 * For all other systems, IFG1 and IFG2 can be any value in the range of 4379 * 1-15. Allowing for a total possible IFG sum of 2-30. 4380 * 4381 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4382 */ 4383union cvmx_agl_gmx_tx_ifg { 4384 uint64_t u64; 4385 struct cvmx_agl_gmx_tx_ifg_s { 4386#ifdef __BIG_ENDIAN_BITFIELD 4387 uint64_t reserved_8_63 : 56; 4388 uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing 4389 If CRS is detected during IFG2, then the 4390 interFrameSpacing timer is not reset and a frame 4391 is transmited once the timer expires. */ 4392 uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing 4393 If CRS is detected during IFG1, then the 4394 interFrameSpacing timer is reset and a frame is 4395 not transmited. */ 4396#else 4397 uint64_t ifg1 : 4; 4398 uint64_t ifg2 : 4; 4399 uint64_t reserved_8_63 : 56; 4400#endif 4401 } s; 4402 struct cvmx_agl_gmx_tx_ifg_s cn52xx; 4403 struct cvmx_agl_gmx_tx_ifg_s cn52xxp1; 4404 struct cvmx_agl_gmx_tx_ifg_s cn56xx; 4405 struct cvmx_agl_gmx_tx_ifg_s cn56xxp1; 4406 struct cvmx_agl_gmx_tx_ifg_s cn61xx; 4407 struct cvmx_agl_gmx_tx_ifg_s cn63xx; 4408 struct cvmx_agl_gmx_tx_ifg_s cn63xxp1; 4409 struct cvmx_agl_gmx_tx_ifg_s cn66xx; 4410 struct cvmx_agl_gmx_tx_ifg_s cn68xx; 4411 struct cvmx_agl_gmx_tx_ifg_s cn68xxp1; 4412}; 4413typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t; 4414 4415/** 4416 * cvmx_agl_gmx_tx_int_en 4417 * 4418 * AGL_GMX_TX_INT_EN = Interrupt Enable 4419 * 4420 * 4421 * Notes: 4422 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1. 4423 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1. 4424 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1. 4425 */ 4426union cvmx_agl_gmx_tx_int_en { 4427 uint64_t u64; 4428 struct cvmx_agl_gmx_tx_int_en_s { 4429#ifdef __BIG_ENDIAN_BITFIELD 4430 uint64_t reserved_22_63 : 42; 4431 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be 4432 sent due to XSCOL */ 4433 uint64_t reserved_18_19 : 2; 4434 uint64_t late_col : 2; /**< TX Late Collision */ 4435 uint64_t reserved_14_15 : 2; 4436 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */ 4437 uint64_t reserved_10_11 : 2; 4438 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */ 4439 uint64_t reserved_4_7 : 4; 4440 uint64_t undflw : 2; /**< TX Underflow */ 4441 uint64_t reserved_1_1 : 1; 4442 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4443#else 4444 uint64_t pko_nxa : 1; 4445 uint64_t reserved_1_1 : 1; 4446 uint64_t undflw : 2; 4447 uint64_t reserved_4_7 : 4; 4448 uint64_t xscol : 2; 4449 uint64_t reserved_10_11 : 2; 4450 uint64_t xsdef : 2; 4451 uint64_t reserved_14_15 : 2; 4452 uint64_t late_col : 2; 4453 uint64_t reserved_18_19 : 2; 4454 uint64_t ptp_lost : 2; 4455 uint64_t reserved_22_63 : 42; 4456#endif 4457 } s; 4458 struct cvmx_agl_gmx_tx_int_en_cn52xx { 4459#ifdef __BIG_ENDIAN_BITFIELD 4460 uint64_t reserved_18_63 : 46; 4461 uint64_t late_col : 2; /**< TX Late Collision */ 4462 uint64_t reserved_14_15 : 2; 4463 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */ 4464 uint64_t reserved_10_11 : 2; 4465 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */ 4466 uint64_t reserved_4_7 : 4; 4467 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */ 4468 uint64_t reserved_1_1 : 1; 4469 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4470#else 4471 uint64_t pko_nxa : 1; 4472 uint64_t reserved_1_1 : 1; 4473 uint64_t undflw : 2; 4474 uint64_t reserved_4_7 : 4; 4475 uint64_t xscol : 2; 4476 uint64_t reserved_10_11 : 2; 4477 uint64_t xsdef : 2; 4478 uint64_t reserved_14_15 : 2; 4479 uint64_t late_col : 2; 4480 uint64_t reserved_18_63 : 46; 4481#endif 4482 } cn52xx; 4483 struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1; 4484 struct cvmx_agl_gmx_tx_int_en_cn56xx { 4485#ifdef __BIG_ENDIAN_BITFIELD 4486 uint64_t reserved_17_63 : 47; 4487 uint64_t late_col : 1; /**< TX Late Collision */ 4488 uint64_t reserved_13_15 : 3; 4489 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */ 4490 uint64_t reserved_9_11 : 3; 4491 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */ 4492 uint64_t reserved_3_7 : 5; 4493 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */ 4494 uint64_t reserved_1_1 : 1; 4495 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4496#else 4497 uint64_t pko_nxa : 1; 4498 uint64_t reserved_1_1 : 1; 4499 uint64_t undflw : 1; 4500 uint64_t reserved_3_7 : 5; 4501 uint64_t xscol : 1; 4502 uint64_t reserved_9_11 : 3; 4503 uint64_t xsdef : 1; 4504 uint64_t reserved_13_15 : 3; 4505 uint64_t late_col : 1; 4506 uint64_t reserved_17_63 : 47; 4507#endif 4508 } cn56xx; 4509 struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1; 4510 struct cvmx_agl_gmx_tx_int_en_s cn61xx; 4511 struct cvmx_agl_gmx_tx_int_en_s cn63xx; 4512 struct cvmx_agl_gmx_tx_int_en_s cn63xxp1; 4513 struct cvmx_agl_gmx_tx_int_en_s cn66xx; 4514 struct cvmx_agl_gmx_tx_int_en_s cn68xx; 4515 struct cvmx_agl_gmx_tx_int_en_s cn68xxp1; 4516}; 4517typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t; 4518 4519/** 4520 * cvmx_agl_gmx_tx_int_reg 4521 * 4522 * AGL_GMX_TX_INT_REG = Interrupt Register 4523 * 4524 * 4525 * Notes: 4526 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1. 4527 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1. 4528 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1. 4529 */ 4530union cvmx_agl_gmx_tx_int_reg { 4531 uint64_t u64; 4532 struct cvmx_agl_gmx_tx_int_reg_s { 4533#ifdef __BIG_ENDIAN_BITFIELD 4534 uint64_t reserved_22_63 : 42; 4535 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be 4536 sent due to XSCOL */ 4537 uint64_t reserved_18_19 : 2; 4538 uint64_t late_col : 2; /**< TX Late Collision */ 4539 uint64_t reserved_14_15 : 2; 4540 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */ 4541 uint64_t reserved_10_11 : 2; 4542 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */ 4543 uint64_t reserved_4_7 : 4; 4544 uint64_t undflw : 2; /**< TX Underflow */ 4545 uint64_t reserved_1_1 : 1; 4546 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4547#else 4548 uint64_t pko_nxa : 1; 4549 uint64_t reserved_1_1 : 1; 4550 uint64_t undflw : 2; 4551 uint64_t reserved_4_7 : 4; 4552 uint64_t xscol : 2; 4553 uint64_t reserved_10_11 : 2; 4554 uint64_t xsdef : 2; 4555 uint64_t reserved_14_15 : 2; 4556 uint64_t late_col : 2; 4557 uint64_t reserved_18_19 : 2; 4558 uint64_t ptp_lost : 2; 4559 uint64_t reserved_22_63 : 42; 4560#endif 4561 } s; 4562 struct cvmx_agl_gmx_tx_int_reg_cn52xx { 4563#ifdef __BIG_ENDIAN_BITFIELD 4564 uint64_t reserved_18_63 : 46; 4565 uint64_t late_col : 2; /**< TX Late Collision */ 4566 uint64_t reserved_14_15 : 2; 4567 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */ 4568 uint64_t reserved_10_11 : 2; 4569 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */ 4570 uint64_t reserved_4_7 : 4; 4571 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */ 4572 uint64_t reserved_1_1 : 1; 4573 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4574#else 4575 uint64_t pko_nxa : 1; 4576 uint64_t reserved_1_1 : 1; 4577 uint64_t undflw : 2; 4578 uint64_t reserved_4_7 : 4; 4579 uint64_t xscol : 2; 4580 uint64_t reserved_10_11 : 2; 4581 uint64_t xsdef : 2; 4582 uint64_t reserved_14_15 : 2; 4583 uint64_t late_col : 2; 4584 uint64_t reserved_18_63 : 46; 4585#endif 4586 } cn52xx; 4587 struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1; 4588 struct cvmx_agl_gmx_tx_int_reg_cn56xx { 4589#ifdef __BIG_ENDIAN_BITFIELD 4590 uint64_t reserved_17_63 : 47; 4591 uint64_t late_col : 1; /**< TX Late Collision */ 4592 uint64_t reserved_13_15 : 3; 4593 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */ 4594 uint64_t reserved_9_11 : 3; 4595 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */ 4596 uint64_t reserved_3_7 : 5; 4597 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */ 4598 uint64_t reserved_1_1 : 1; 4599 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 4600#else 4601 uint64_t pko_nxa : 1; 4602 uint64_t reserved_1_1 : 1; 4603 uint64_t undflw : 1; 4604 uint64_t reserved_3_7 : 5; 4605 uint64_t xscol : 1; 4606 uint64_t reserved_9_11 : 3; 4607 uint64_t xsdef : 1; 4608 uint64_t reserved_13_15 : 3; 4609 uint64_t late_col : 1; 4610 uint64_t reserved_17_63 : 47; 4611#endif 4612 } cn56xx; 4613 struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1; 4614 struct cvmx_agl_gmx_tx_int_reg_s cn61xx; 4615 struct cvmx_agl_gmx_tx_int_reg_s cn63xx; 4616 struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1; 4617 struct cvmx_agl_gmx_tx_int_reg_s cn66xx; 4618 struct cvmx_agl_gmx_tx_int_reg_s cn68xx; 4619 struct cvmx_agl_gmx_tx_int_reg_s cn68xxp1; 4620}; 4621typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t; 4622 4623/** 4624 * cvmx_agl_gmx_tx_jam 4625 * 4626 * AGL_GMX_TX_JAM = Packet TX Jam Pattern 4627 * 4628 * 4629 * Notes: 4630 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4631 * 4632 */ 4633union cvmx_agl_gmx_tx_jam { 4634 uint64_t u64; 4635 struct cvmx_agl_gmx_tx_jam_s { 4636#ifdef __BIG_ENDIAN_BITFIELD 4637 uint64_t reserved_8_63 : 56; 4638 uint64_t jam : 8; /**< Jam pattern */ 4639#else 4640 uint64_t jam : 8; 4641 uint64_t reserved_8_63 : 56; 4642#endif 4643 } s; 4644 struct cvmx_agl_gmx_tx_jam_s cn52xx; 4645 struct cvmx_agl_gmx_tx_jam_s cn52xxp1; 4646 struct cvmx_agl_gmx_tx_jam_s cn56xx; 4647 struct cvmx_agl_gmx_tx_jam_s cn56xxp1; 4648 struct cvmx_agl_gmx_tx_jam_s cn61xx; 4649 struct cvmx_agl_gmx_tx_jam_s cn63xx; 4650 struct cvmx_agl_gmx_tx_jam_s cn63xxp1; 4651 struct cvmx_agl_gmx_tx_jam_s cn66xx; 4652 struct cvmx_agl_gmx_tx_jam_s cn68xx; 4653 struct cvmx_agl_gmx_tx_jam_s cn68xxp1; 4654}; 4655typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t; 4656 4657/** 4658 * cvmx_agl_gmx_tx_lfsr 4659 * 4660 * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff 4661 * 4662 * 4663 * Notes: 4664 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4665 * 4666 */ 4667union cvmx_agl_gmx_tx_lfsr { 4668 uint64_t u64; 4669 struct cvmx_agl_gmx_tx_lfsr_s { 4670#ifdef __BIG_ENDIAN_BITFIELD 4671 uint64_t reserved_16_63 : 48; 4672 uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random 4673 numbers to compute truncated binary exponential 4674 backoff. */ 4675#else 4676 uint64_t lfsr : 16; 4677 uint64_t reserved_16_63 : 48; 4678#endif 4679 } s; 4680 struct cvmx_agl_gmx_tx_lfsr_s cn52xx; 4681 struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1; 4682 struct cvmx_agl_gmx_tx_lfsr_s cn56xx; 4683 struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1; 4684 struct cvmx_agl_gmx_tx_lfsr_s cn61xx; 4685 struct cvmx_agl_gmx_tx_lfsr_s cn63xx; 4686 struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1; 4687 struct cvmx_agl_gmx_tx_lfsr_s cn66xx; 4688 struct cvmx_agl_gmx_tx_lfsr_s cn68xx; 4689 struct cvmx_agl_gmx_tx_lfsr_s cn68xxp1; 4690}; 4691typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t; 4692 4693/** 4694 * cvmx_agl_gmx_tx_ovr_bp 4695 * 4696 * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure 4697 * 4698 * 4699 * Notes: 4700 * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1. 4701 * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1. 4702 */ 4703union cvmx_agl_gmx_tx_ovr_bp { 4704 uint64_t u64; 4705 struct cvmx_agl_gmx_tx_ovr_bp_s { 4706#ifdef __BIG_ENDIAN_BITFIELD 4707 uint64_t reserved_10_63 : 54; 4708 uint64_t en : 2; /**< Per port Enable back pressure override */ 4709 uint64_t reserved_6_7 : 2; 4710 uint64_t bp : 2; /**< Port BackPressure status to use 4711 0=Port is available 4712 1=Port should be back pressured */ 4713 uint64_t reserved_2_3 : 2; 4714 uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */ 4715#else 4716 uint64_t ign_full : 2; 4717 uint64_t reserved_2_3 : 2; 4718 uint64_t bp : 2; 4719 uint64_t reserved_6_7 : 2; 4720 uint64_t en : 2; 4721 uint64_t reserved_10_63 : 54; 4722#endif 4723 } s; 4724 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx; 4725 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1; 4726 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx { 4727#ifdef __BIG_ENDIAN_BITFIELD 4728 uint64_t reserved_9_63 : 55; 4729 uint64_t en : 1; /**< Per port Enable back pressure override */ 4730 uint64_t reserved_5_7 : 3; 4731 uint64_t bp : 1; /**< Port BackPressure status to use 4732 0=Port is available 4733 1=Port should be back pressured */ 4734 uint64_t reserved_1_3 : 3; 4735 uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */ 4736#else 4737 uint64_t ign_full : 1; 4738 uint64_t reserved_1_3 : 3; 4739 uint64_t bp : 1; 4740 uint64_t reserved_5_7 : 3; 4741 uint64_t en : 1; 4742 uint64_t reserved_9_63 : 55; 4743#endif 4744 } cn56xx; 4745 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1; 4746 struct cvmx_agl_gmx_tx_ovr_bp_s cn61xx; 4747 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx; 4748 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1; 4749 struct cvmx_agl_gmx_tx_ovr_bp_s cn66xx; 4750 struct cvmx_agl_gmx_tx_ovr_bp_s cn68xx; 4751 struct cvmx_agl_gmx_tx_ovr_bp_s cn68xxp1; 4752}; 4753typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t; 4754 4755/** 4756 * cvmx_agl_gmx_tx_pause_pkt_dmac 4757 * 4758 * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field 4759 * 4760 * 4761 * Notes: 4762 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4763 * 4764 */ 4765union cvmx_agl_gmx_tx_pause_pkt_dmac { 4766 uint64_t u64; 4767 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s { 4768#ifdef __BIG_ENDIAN_BITFIELD 4769 uint64_t reserved_48_63 : 16; 4770 uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */ 4771#else 4772 uint64_t dmac : 48; 4773 uint64_t reserved_48_63 : 16; 4774#endif 4775 } s; 4776 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx; 4777 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1; 4778 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx; 4779 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1; 4780 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn61xx; 4781 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx; 4782 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1; 4783 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn66xx; 4784 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xx; 4785 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xxp1; 4786}; 4787typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t; 4788 4789/** 4790 * cvmx_agl_gmx_tx_pause_pkt_type 4791 * 4792 * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field 4793 * 4794 * 4795 * Notes: 4796 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. 4797 * 4798 */ 4799union cvmx_agl_gmx_tx_pause_pkt_type { 4800 uint64_t u64; 4801 struct cvmx_agl_gmx_tx_pause_pkt_type_s { 4802#ifdef __BIG_ENDIAN_BITFIELD 4803 uint64_t reserved_16_63 : 48; 4804 uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */ 4805#else 4806 uint64_t type : 16; 4807 uint64_t reserved_16_63 : 48; 4808#endif 4809 } s; 4810 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx; 4811 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1; 4812 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx; 4813 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1; 4814 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn61xx; 4815 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx; 4816 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1; 4817 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn66xx; 4818 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xx; 4819 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xxp1; 4820}; 4821typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t; 4822 4823/** 4824 * cvmx_agl_prt#_ctl 4825 * 4826 * AGL_PRT_CTL = AGL Port Control 4827 * 4828 * 4829 * Notes: 4830 * The RGMII timing specification requires that devices transmit clock and 4831 * data synchronously. The specification requires external sources (namely 4832 * the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of 4833 * delay. 4834 * 4835 * To eliminate the need for the PC board delays, the MIX RGMII interface 4836 * has optional onboard DLL's for both transmit and receive. For correct 4837 * operation, at most one of the transmitter, board, or receiver involved 4838 * in an RGMII link should introduce delay. By default/reset, 4839 * the MIX RGMII receivers delay the received clock, and the MIX 4840 * RGMII transmitters do not delay the transmitted clock. Whether this 4841 * default works as-is with a given link partner depends on the behavior 4842 * of the link partner and the PC board. 4843 * 4844 * These are the possible modes of MIX RGMII receive operation: 4845 * o AGL_PRTx_CTL[CLKRX_BYP] = 0 (reset value) - The OCTEON MIX RGMII 4846 * receive interface introduces clock delay using its internal DLL. 4847 * This mode is appropriate if neither the remote 4848 * transmitter nor the PC board delays the clock. 4849 * o AGL_PRTx_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The OCTEON MIX 4850 * RGMII receive interface introduces no clock delay. This mode 4851 * is appropriate if either the remote transmitter or the PC board 4852 * delays the clock. 4853 * 4854 * These are the possible modes of MIX RGMII transmit operation: 4855 * o AGL_PRTx_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) - 4856 * The OCTEON MIX RGMII transmit interface introduces no clock 4857 * delay. This mode is appropriate is either the remote receiver 4858 * or the PC board delays the clock. 4859 * o AGL_PRTx_CTL[CLKTX_BYP] = 0 - The OCTEON MIX RGMII transmit 4860 * interface introduces clock delay using its internal DLL. 4861 * This mode is appropriate if neither the remote receiver 4862 * nor the PC board delays the clock. 4863 * 4864 * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1. 4865 * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1. 4866 */ 4867union cvmx_agl_prtx_ctl { 4868 uint64_t u64; 4869 struct cvmx_agl_prtx_ctl_s { 4870#ifdef __BIG_ENDIAN_BITFIELD 4871 uint64_t drv_byp : 1; /**< Bypass the compensation controller and use 4872 DRV_NCTL and DRV_PCTL */ 4873 uint64_t reserved_62_62 : 1; 4874 uint64_t cmp_pctl : 6; /**< PCTL drive strength from the compensation ctl */ 4875 uint64_t reserved_54_55 : 2; 4876 uint64_t cmp_nctl : 6; /**< NCTL drive strength from the compensation ctl */ 4877 uint64_t reserved_46_47 : 2; 4878 uint64_t drv_pctl : 6; /**< PCTL drive strength to use in bypass mode 4879 Reset value of 19 is for 50 ohm termination */ 4880 uint64_t reserved_38_39 : 2; 4881 uint64_t drv_nctl : 6; /**< NCTL drive strength to use in bypass mode 4882 Reset value of 15 is for 50 ohm termination */ 4883 uint64_t reserved_29_31 : 3; 4884 uint64_t clk_set : 5; /**< The clock delay as determined by the DLL */ 4885 uint64_t clkrx_byp : 1; /**< Bypass the RX clock delay setting 4886 Skews RXC from RXD,RXCTL in RGMII mode 4887 By default, HW internally shifts the RXC clock 4888 to sample RXD,RXCTL assuming clock and data and 4889 sourced synchronously from the link partner. 4890 In MII mode, the CLKRX_BYP is forced to 1. */ 4891 uint64_t reserved_21_22 : 2; 4892 uint64_t clkrx_set : 5; /**< RX clock delay setting to use in bypass mode 4893 Skews RXC from RXD in RGMII mode */ 4894 uint64_t clktx_byp : 1; /**< Bypass the TX clock delay setting 4895 Skews TXC from TXD,TXCTL in RGMII mode 4896 By default, clock and data and sourced 4897 synchronously. 4898 In MII mode, the CLKRX_BYP is forced to 1. */ 4899 uint64_t reserved_13_14 : 2; 4900 uint64_t clktx_set : 5; /**< TX clock delay setting to use in bypass mode 4901 Skews TXC from TXD in RGMII mode */ 4902 uint64_t reserved_5_7 : 3; 4903 uint64_t dllrst : 1; /**< DLL Reset */ 4904 uint64_t comp : 1; /**< Compensation Enable */ 4905 uint64_t enable : 1; /**< Port Enable */ 4906 uint64_t clkrst : 1; /**< Clock Tree Reset */ 4907 uint64_t mode : 1; /**< Port Mode 4908 MODE must be set the same for all ports in which 4909 AGL_PRTx_CTL[ENABLE] is set. 4910 0=RGMII 4911 1=MII */ 4912#else 4913 uint64_t mode : 1; 4914 uint64_t clkrst : 1; 4915 uint64_t enable : 1; 4916 uint64_t comp : 1; 4917 uint64_t dllrst : 1; 4918 uint64_t reserved_5_7 : 3; 4919 uint64_t clktx_set : 5; 4920 uint64_t reserved_13_14 : 2; 4921 uint64_t clktx_byp : 1; 4922 uint64_t clkrx_set : 5; 4923 uint64_t reserved_21_22 : 2; 4924 uint64_t clkrx_byp : 1; 4925 uint64_t clk_set : 5; 4926 uint64_t reserved_29_31 : 3; 4927 uint64_t drv_nctl : 6; 4928 uint64_t reserved_38_39 : 2; 4929 uint64_t drv_pctl : 6; 4930 uint64_t reserved_46_47 : 2; 4931 uint64_t cmp_nctl : 6; 4932 uint64_t reserved_54_55 : 2; 4933 uint64_t cmp_pctl : 6; 4934 uint64_t reserved_62_62 : 1; 4935 uint64_t drv_byp : 1; 4936#endif 4937 } s; 4938 struct cvmx_agl_prtx_ctl_s cn61xx; 4939 struct cvmx_agl_prtx_ctl_s cn63xx; 4940 struct cvmx_agl_prtx_ctl_s cn63xxp1; 4941 struct cvmx_agl_prtx_ctl_s cn66xx; 4942 struct cvmx_agl_prtx_ctl_s cn68xx; 4943 struct cvmx_agl_prtx_ctl_s cn68xxp1; 4944}; 4945typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t; 4946 4947#endif 4948