cvmx-gmxx-defs.h revision 215976
1215976Sjmallett/***********************license start*************** 2215976Sjmallett * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3215976Sjmallett * reserved. 4215976Sjmallett * 5215976Sjmallett * 6215976Sjmallett * Redistribution and use in source and binary forms, with or without 7215976Sjmallett * modification, are permitted provided that the following conditions are 8215976Sjmallett * met: 9215976Sjmallett * 10215976Sjmallett * * Redistributions of source code must retain the above copyright 11215976Sjmallett * notice, this list of conditions and the following disclaimer. 12215976Sjmallett * 13215976Sjmallett * * Redistributions in binary form must reproduce the above 14215976Sjmallett * copyright notice, this list of conditions and the following 15215976Sjmallett * disclaimer in the documentation and/or other materials provided 16215976Sjmallett * with the distribution. 17215976Sjmallett 18215976Sjmallett * * Neither the name of Cavium Networks nor the names of 19215976Sjmallett * its contributors may be used to endorse or promote products 20215976Sjmallett * derived from this software without specific prior written 21215976Sjmallett * permission. 22215976Sjmallett 23215976Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215976Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215976Sjmallett * regulations, and may be subject to export or import regulations in other 26215976Sjmallett * countries. 27215976Sjmallett 28215976Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29215976Sjmallett * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30215976Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215976Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215976Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215976Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215976Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215976Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215976Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215976Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38215976Sjmallett ***********************license end**************************************/ 39215976Sjmallett 40215976Sjmallett 41215976Sjmallett/** 42215976Sjmallett * cvmx-gmxx-defs.h 43215976Sjmallett * 44215976Sjmallett * Configuration and status register (CSR) type definitions for 45215976Sjmallett * Octeon gmxx. 46215976Sjmallett * 47215976Sjmallett * This file is auto generated. Do not edit. 48215976Sjmallett * 49215976Sjmallett * <hr>$Revision$<hr> 50215976Sjmallett * 51215976Sjmallett */ 52215976Sjmallett#ifndef __CVMX_GMXX_TYPEDEFS_H__ 53215976Sjmallett#define __CVMX_GMXX_TYPEDEFS_H__ 54215976Sjmallett 55215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56215976Sjmallettstatic inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id) 57215976Sjmallett{ 58215976Sjmallett if (!( 59215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 60215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 61215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 62215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 63215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 64215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 65215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 66215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 67215976Sjmallett cvmx_warn("CVMX_GMXX_BAD_REG(%lu) is invalid on this chip\n", block_id); 68215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull; 69215976Sjmallett} 70215976Sjmallett#else 71215976Sjmallett#define CVMX_GMXX_BAD_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull) 72215976Sjmallett#endif 73215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 74215976Sjmallettstatic inline uint64_t CVMX_GMXX_BIST(unsigned long block_id) 75215976Sjmallett{ 76215976Sjmallett if (!( 77215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 78215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 79215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 80215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 81215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 82215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 83215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 84215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 85215976Sjmallett cvmx_warn("CVMX_GMXX_BIST(%lu) is invalid on this chip\n", block_id); 86215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull; 87215976Sjmallett} 88215976Sjmallett#else 89215976Sjmallett#define CVMX_GMXX_BIST(block_id) (CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull) 90215976Sjmallett#endif 91215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 92215976Sjmallettstatic inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id) 93215976Sjmallett{ 94215976Sjmallett if (!( 95215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 96215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 97215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 98215976Sjmallett cvmx_warn("CVMX_GMXX_CLK_EN(%lu) is invalid on this chip\n", block_id); 99215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull; 100215976Sjmallett} 101215976Sjmallett#else 102215976Sjmallett#define CVMX_GMXX_CLK_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull) 103215976Sjmallett#endif 104215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 105215976Sjmallettstatic inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id) 106215976Sjmallett{ 107215976Sjmallett if (!( 108215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 109215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 110215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 111215976Sjmallett cvmx_warn("CVMX_GMXX_HG2_CONTROL(%lu) is invalid on this chip\n", block_id); 112215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull; 113215976Sjmallett} 114215976Sjmallett#else 115215976Sjmallett#define CVMX_GMXX_HG2_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull) 116215976Sjmallett#endif 117215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 118215976Sjmallettstatic inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id) 119215976Sjmallett{ 120215976Sjmallett if (!( 121215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 122215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 123215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 124215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 125215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 126215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 127215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 128215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 129215976Sjmallett cvmx_warn("CVMX_GMXX_INF_MODE(%lu) is invalid on this chip\n", block_id); 130215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull; 131215976Sjmallett} 132215976Sjmallett#else 133215976Sjmallett#define CVMX_GMXX_INF_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull) 134215976Sjmallett#endif 135215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 136215976Sjmallettstatic inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id) 137215976Sjmallett{ 138215976Sjmallett if (!( 139215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 140215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 141215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 142215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 143215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 144215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 145215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 146215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 147215976Sjmallett cvmx_warn("CVMX_GMXX_NXA_ADR(%lu) is invalid on this chip\n", block_id); 148215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull; 149215976Sjmallett} 150215976Sjmallett#else 151215976Sjmallett#define CVMX_GMXX_NXA_ADR(block_id) (CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull) 152215976Sjmallett#endif 153215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 154215976Sjmallettstatic inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id) 155215976Sjmallett{ 156215976Sjmallett if (!( 157215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) || 158215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) || 159215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0)))))) 160215976Sjmallett cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 161215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull; 162215976Sjmallett} 163215976Sjmallett#else 164215976Sjmallett#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull) 165215976Sjmallett#endif 166215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 167215976Sjmallettstatic inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id) 168215976Sjmallett{ 169215976Sjmallett if (!( 170215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 171215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 172215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 173215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 174215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 175215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 176215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 177215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 178215976Sjmallett cvmx_warn("CVMX_GMXX_PRTX_CFG(%lu,%lu) is invalid on this chip\n", offset, block_id); 179215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 180215976Sjmallett} 181215976Sjmallett#else 182215976Sjmallett#define CVMX_GMXX_PRTX_CFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 183215976Sjmallett#endif 184215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 185215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id) 186215976Sjmallett{ 187215976Sjmallett if (!( 188215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 189215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 190215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 191215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 192215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 193215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 194215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 195215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 196215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0(%lu,%lu) is invalid on this chip\n", offset, block_id); 197215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 198215976Sjmallett} 199215976Sjmallett#else 200215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 201215976Sjmallett#endif 202215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 203215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id) 204215976Sjmallett{ 205215976Sjmallett if (!( 206215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 207215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 208215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 209215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 210215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 211215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 212215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 213215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 214215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1(%lu,%lu) is invalid on this chip\n", offset, block_id); 215215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 216215976Sjmallett} 217215976Sjmallett#else 218215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 219215976Sjmallett#endif 220215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 221215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id) 222215976Sjmallett{ 223215976Sjmallett if (!( 224215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 225215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 226215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 227215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 228215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 229215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 230215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 231215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 232215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2(%lu,%lu) is invalid on this chip\n", offset, block_id); 233215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 234215976Sjmallett} 235215976Sjmallett#else 236215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 237215976Sjmallett#endif 238215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 239215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id) 240215976Sjmallett{ 241215976Sjmallett if (!( 242215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 243215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 244215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 245215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 246215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 247215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 248215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 249215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 250215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3(%lu,%lu) is invalid on this chip\n", offset, block_id); 251215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 252215976Sjmallett} 253215976Sjmallett#else 254215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 255215976Sjmallett#endif 256215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 257215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id) 258215976Sjmallett{ 259215976Sjmallett if (!( 260215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 261215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 262215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 263215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 264215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 265215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 266215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 267215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 268215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4(%lu,%lu) is invalid on this chip\n", offset, block_id); 269215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 270215976Sjmallett} 271215976Sjmallett#else 272215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 273215976Sjmallett#endif 274215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 275215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id) 276215976Sjmallett{ 277215976Sjmallett if (!( 278215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 279215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 280215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 281215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 282215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 283215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 284215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 285215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 286215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5(%lu,%lu) is invalid on this chip\n", offset, block_id); 287215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 288215976Sjmallett} 289215976Sjmallett#else 290215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 291215976Sjmallett#endif 292215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 293215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id) 294215976Sjmallett{ 295215976Sjmallett if (!( 296215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 297215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 298215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 299215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 300215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 301215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 302215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 303215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 304215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN(%lu,%lu) is invalid on this chip\n", offset, block_id); 305215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 306215976Sjmallett} 307215976Sjmallett#else 308215976Sjmallett#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 309215976Sjmallett#endif 310215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 311215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id) 312215976Sjmallett{ 313215976Sjmallett if (!( 314215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 315215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 316215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 317215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 318215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 319215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 320215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 321215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 322215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_ADR_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 323215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 324215976Sjmallett} 325215976Sjmallett#else 326215976Sjmallett#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 327215976Sjmallett#endif 328215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 329215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id) 330215976Sjmallett{ 331215976Sjmallett if (!( 332215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 333215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 334215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 335215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 336215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 337215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 338215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 339215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 340215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_DECISION(%lu,%lu) is invalid on this chip\n", offset, block_id); 341215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 342215976Sjmallett} 343215976Sjmallett#else 344215976Sjmallett#define CVMX_GMXX_RXX_DECISION(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 345215976Sjmallett#endif 346215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 347215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id) 348215976Sjmallett{ 349215976Sjmallett if (!( 350215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 351215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 352215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 353215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 354215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 355215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 356215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 357215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 358215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_FRM_CHK(%lu,%lu) is invalid on this chip\n", offset, block_id); 359215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 360215976Sjmallett} 361215976Sjmallett#else 362215976Sjmallett#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 363215976Sjmallett#endif 364215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 365215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id) 366215976Sjmallett{ 367215976Sjmallett if (!( 368215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 369215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 370215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 371215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 372215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 373215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 374215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 375215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 376215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_FRM_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 377215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 378215976Sjmallett} 379215976Sjmallett#else 380215976Sjmallett#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 381215976Sjmallett#endif 382215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 383215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id) 384215976Sjmallett{ 385215976Sjmallett if (!( 386215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 387215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 388215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 389215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))))) 390215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id); 391215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 392215976Sjmallett} 393215976Sjmallett#else 394215976Sjmallett#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 395215976Sjmallett#endif 396215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 397215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id) 398215976Sjmallett{ 399215976Sjmallett if (!( 400215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 401215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 402215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 403215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))))) 404215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id); 405215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 406215976Sjmallett} 407215976Sjmallett#else 408215976Sjmallett#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 409215976Sjmallett#endif 410215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 411215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id) 412215976Sjmallett{ 413215976Sjmallett if (!( 414215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 415215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 416215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 417215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 418215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 419215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 420215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 421215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 422215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_IFG(%lu,%lu) is invalid on this chip\n", offset, block_id); 423215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 424215976Sjmallett} 425215976Sjmallett#else 426215976Sjmallett#define CVMX_GMXX_RXX_IFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 427215976Sjmallett#endif 428215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 429215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id) 430215976Sjmallett{ 431215976Sjmallett if (!( 432215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 433215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 434215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 435215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 436215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 437215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 438215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 439215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 440215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_INT_EN(%lu,%lu) is invalid on this chip\n", offset, block_id); 441215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 442215976Sjmallett} 443215976Sjmallett#else 444215976Sjmallett#define CVMX_GMXX_RXX_INT_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 445215976Sjmallett#endif 446215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 447215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id) 448215976Sjmallett{ 449215976Sjmallett if (!( 450215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 451215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 452215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 453215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 454215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 455215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 456215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 457215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 458215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_INT_REG(%lu,%lu) is invalid on this chip\n", offset, block_id); 459215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 460215976Sjmallett} 461215976Sjmallett#else 462215976Sjmallett#define CVMX_GMXX_RXX_INT_REG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 463215976Sjmallett#endif 464215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 465215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id) 466215976Sjmallett{ 467215976Sjmallett if (!( 468215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 469215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 470215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 471215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 472215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 473215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 474215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 475215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 476215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_JABBER(%lu,%lu) is invalid on this chip\n", offset, block_id); 477215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 478215976Sjmallett} 479215976Sjmallett#else 480215976Sjmallett#define CVMX_GMXX_RXX_JABBER(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 481215976Sjmallett#endif 482215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 483215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id) 484215976Sjmallett{ 485215976Sjmallett if (!( 486215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 487215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 488215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 489215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 490215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 491215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id); 492215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 493215976Sjmallett} 494215976Sjmallett#else 495215976Sjmallett#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 496215976Sjmallett#endif 497215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 498215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id) 499215976Sjmallett{ 500215976Sjmallett if (!( 501215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 502215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 503215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 504215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 505215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))))) 506215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id); 507215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 508215976Sjmallett} 509215976Sjmallett#else 510215976Sjmallett#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 511215976Sjmallett#endif 512215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 513215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id) 514215976Sjmallett{ 515215976Sjmallett if (!( 516215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 517215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 518215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 519215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 520215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 521215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 522215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 523215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 524215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 525215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 526215976Sjmallett} 527215976Sjmallett#else 528215976Sjmallett#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 529215976Sjmallett#endif 530215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 531215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id) 532215976Sjmallett{ 533215976Sjmallett if (!( 534215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 535215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 536215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 537215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 538215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 539215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 540215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 541215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 542215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS(%lu,%lu) is invalid on this chip\n", offset, block_id); 543215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 544215976Sjmallett} 545215976Sjmallett#else 546215976Sjmallett#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 547215976Sjmallett#endif 548215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 549215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id) 550215976Sjmallett{ 551215976Sjmallett if (!( 552215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 553215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 554215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 555215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 556215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 557215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 558215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 559215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 560215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 561215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 562215976Sjmallett} 563215976Sjmallett#else 564215976Sjmallett#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 565215976Sjmallett#endif 566215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 567215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id) 568215976Sjmallett{ 569215976Sjmallett if (!( 570215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 571215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 572215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 573215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 574215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 575215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 576215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 577215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 578215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id); 579215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 580215976Sjmallett} 581215976Sjmallett#else 582215976Sjmallett#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 583215976Sjmallett#endif 584215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 585215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id) 586215976Sjmallett{ 587215976Sjmallett if (!( 588215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 589215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 590215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 591215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 592215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 593215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 594215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 595215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 596215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id); 597215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 598215976Sjmallett} 599215976Sjmallett#else 600215976Sjmallett#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 601215976Sjmallett#endif 602215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 603215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id) 604215976Sjmallett{ 605215976Sjmallett if (!( 606215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 607215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 608215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 609215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 610215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 611215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 612215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 613215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 614215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS(%lu,%lu) is invalid on this chip\n", offset, block_id); 615215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 616215976Sjmallett} 617215976Sjmallett#else 618215976Sjmallett#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 619215976Sjmallett#endif 620215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 621215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id) 622215976Sjmallett{ 623215976Sjmallett if (!( 624215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 625215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 626215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 627215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 628215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 629215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 630215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 631215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 632215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD(%lu,%lu) is invalid on this chip\n", offset, block_id); 633215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 634215976Sjmallett} 635215976Sjmallett#else 636215976Sjmallett#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 637215976Sjmallett#endif 638215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 639215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id) 640215976Sjmallett{ 641215976Sjmallett if (!( 642215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 643215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 644215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 645215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 646215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 647215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 648215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 649215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 650215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 651215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 652215976Sjmallett} 653215976Sjmallett#else 654215976Sjmallett#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 655215976Sjmallett#endif 656215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 657215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id) 658215976Sjmallett{ 659215976Sjmallett if (!( 660215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 661215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 662215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 663215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 664215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 665215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 666215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 667215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 668215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id); 669215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 670215976Sjmallett} 671215976Sjmallett#else 672215976Sjmallett#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 673215976Sjmallett#endif 674215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 675215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id) 676215976Sjmallett{ 677215976Sjmallett if (!( 678215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 679215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 680215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 681215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 682215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 683215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 684215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 685215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 686215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id); 687215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 688215976Sjmallett} 689215976Sjmallett#else 690215976Sjmallett#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 691215976Sjmallett#endif 692215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 693215976Sjmallettstatic inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id) 694215976Sjmallett{ 695215976Sjmallett if (!( 696215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 697215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 698215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 699215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 700215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 701215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 702215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 703215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 704215976Sjmallett cvmx_warn("CVMX_GMXX_RXX_UDD_SKP(%lu,%lu) is invalid on this chip\n", offset, block_id); 705215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 706215976Sjmallett} 707215976Sjmallett#else 708215976Sjmallett#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 709215976Sjmallett#endif 710215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 711215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id) 712215976Sjmallett{ 713215976Sjmallett if (!( 714215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 715215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 716215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 717215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 718215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 719215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 720215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 721215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 722215976Sjmallett cvmx_warn("CVMX_GMXX_RX_BP_DROPX(%lu,%lu) is invalid on this chip\n", offset, block_id); 723215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8; 724215976Sjmallett} 725215976Sjmallett#else 726215976Sjmallett#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8) 727215976Sjmallett#endif 728215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 729215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id) 730215976Sjmallett{ 731215976Sjmallett if (!( 732215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 733215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 734215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 735215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 736215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 737215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 738215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 739215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 740215976Sjmallett cvmx_warn("CVMX_GMXX_RX_BP_OFFX(%lu,%lu) is invalid on this chip\n", offset, block_id); 741215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8; 742215976Sjmallett} 743215976Sjmallett#else 744215976Sjmallett#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8) 745215976Sjmallett#endif 746215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 747215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id) 748215976Sjmallett{ 749215976Sjmallett if (!( 750215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 751215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 752215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 753215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 754215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 755215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 756215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 757215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 758215976Sjmallett cvmx_warn("CVMX_GMXX_RX_BP_ONX(%lu,%lu) is invalid on this chip\n", offset, block_id); 759215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8; 760215976Sjmallett} 761215976Sjmallett#else 762215976Sjmallett#define CVMX_GMXX_RX_BP_ONX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8) 763215976Sjmallett#endif 764215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 765215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id) 766215976Sjmallett{ 767215976Sjmallett if (!( 768215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 769215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 770215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 771215976Sjmallett cvmx_warn("CVMX_GMXX_RX_HG2_STATUS(%lu) is invalid on this chip\n", block_id); 772215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull; 773215976Sjmallett} 774215976Sjmallett#else 775215976Sjmallett#define CVMX_GMXX_RX_HG2_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull) 776215976Sjmallett#endif 777215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 778215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id) 779215976Sjmallett{ 780215976Sjmallett if (!( 781215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 782215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 783215976Sjmallett cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id); 784215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull; 785215976Sjmallett} 786215976Sjmallett#else 787215976Sjmallett#define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull) 788215976Sjmallett#endif 789215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 790215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id) 791215976Sjmallett{ 792215976Sjmallett if (!( 793215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) || 794215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1)))))) 795215976Sjmallett cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id); 796215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8; 797215976Sjmallett} 798215976Sjmallett#else 799215976Sjmallett#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8) 800215976Sjmallett#endif 801215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 802215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id) 803215976Sjmallett{ 804215976Sjmallett if (!( 805215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 806215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 807215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 808215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 809215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 810215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 811215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 812215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 813215976Sjmallett cvmx_warn("CVMX_GMXX_RX_PRTS(%lu) is invalid on this chip\n", block_id); 814215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull; 815215976Sjmallett} 816215976Sjmallett#else 817215976Sjmallett#define CVMX_GMXX_RX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull) 818215976Sjmallett#endif 819215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 820215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id) 821215976Sjmallett{ 822215976Sjmallett if (!( 823215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 824215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 825215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 826215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 827215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 828215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 829215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 830215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 831215976Sjmallett cvmx_warn("CVMX_GMXX_RX_PRT_INFO(%lu) is invalid on this chip\n", block_id); 832215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull; 833215976Sjmallett} 834215976Sjmallett#else 835215976Sjmallett#define CVMX_GMXX_RX_PRT_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull) 836215976Sjmallett#endif 837215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 838215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id) 839215976Sjmallett{ 840215976Sjmallett if (!( 841215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 842215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 843215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))))) 844215976Sjmallett cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id); 845215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080007E8ull); 846215976Sjmallett} 847215976Sjmallett#else 848215976Sjmallett#define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull)) 849215976Sjmallett#endif 850215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 851215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id) 852215976Sjmallett{ 853215976Sjmallett if (!( 854215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 855215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 856215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 857215976Sjmallett cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL(%lu) is invalid on this chip\n", block_id); 858215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull; 859215976Sjmallett} 860215976Sjmallett#else 861215976Sjmallett#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull) 862215976Sjmallett#endif 863215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 864215976Sjmallettstatic inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id) 865215976Sjmallett{ 866215976Sjmallett if (!( 867215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 868215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 869215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 870215976Sjmallett cvmx_warn("CVMX_GMXX_RX_XAUI_CTL(%lu) is invalid on this chip\n", block_id); 871215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull; 872215976Sjmallett} 873215976Sjmallett#else 874215976Sjmallett#define CVMX_GMXX_RX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull) 875215976Sjmallett#endif 876215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 877215976Sjmallettstatic inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id) 878215976Sjmallett{ 879215976Sjmallett if (!( 880215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 881215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 882215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 883215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 884215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 885215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 886215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 887215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 888215976Sjmallett cvmx_warn("CVMX_GMXX_SMACX(%lu,%lu) is invalid on this chip\n", offset, block_id); 889215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 890215976Sjmallett} 891215976Sjmallett#else 892215976Sjmallett#define CVMX_GMXX_SMACX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 893215976Sjmallett#endif 894215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 895215976Sjmallettstatic inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id) 896215976Sjmallett{ 897215976Sjmallett if (!( 898215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 899215976Sjmallett cvmx_warn("CVMX_GMXX_SOFT_BIST(%lu) is invalid on this chip\n", block_id); 900215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080007E8ull); 901215976Sjmallett} 902215976Sjmallett#else 903215976Sjmallett#define CVMX_GMXX_SOFT_BIST(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull)) 904215976Sjmallett#endif 905215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 906215976Sjmallettstatic inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id) 907215976Sjmallett{ 908215976Sjmallett if (!( 909215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 910215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 911215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 912215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 913215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 914215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 915215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 916215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 917215976Sjmallett cvmx_warn("CVMX_GMXX_STAT_BP(%lu) is invalid on this chip\n", block_id); 918215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull; 919215976Sjmallett} 920215976Sjmallett#else 921215976Sjmallett#define CVMX_GMXX_STAT_BP(block_id) (CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull) 922215976Sjmallett#endif 923215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 924215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id) 925215976Sjmallett{ 926215976Sjmallett if (!( 927215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 928215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 929215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 930215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 931215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 932215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 933215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 934215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 935215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_APPEND(%lu,%lu) is invalid on this chip\n", offset, block_id); 936215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 937215976Sjmallett} 938215976Sjmallett#else 939215976Sjmallett#define CVMX_GMXX_TXX_APPEND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 940215976Sjmallett#endif 941215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 942215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id) 943215976Sjmallett{ 944215976Sjmallett if (!( 945215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 946215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 947215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 948215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 949215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 950215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 951215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 952215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 953215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_BURST(%lu,%lu) is invalid on this chip\n", offset, block_id); 954215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 955215976Sjmallett} 956215976Sjmallett#else 957215976Sjmallett#define CVMX_GMXX_TXX_BURST(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 958215976Sjmallett#endif 959215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 960215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id) 961215976Sjmallett{ 962215976Sjmallett if (!( 963215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) || 964215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) || 965215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0)))))) 966215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF(%lu,%lu) is invalid on this chip\n", offset, block_id); 967215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull; 968215976Sjmallett} 969215976Sjmallett#else 970215976Sjmallett#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull) 971215976Sjmallett#endif 972215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 973215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id) 974215976Sjmallett{ 975215976Sjmallett if (!( 976215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) || 977215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) || 978215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0)))))) 979215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_CBFC_XON(%lu,%lu) is invalid on this chip\n", offset, block_id); 980215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull; 981215976Sjmallett} 982215976Sjmallett#else 983215976Sjmallett#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull) 984215976Sjmallett#endif 985215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 986215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id) 987215976Sjmallett{ 988215976Sjmallett if (!( 989215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 990215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 991215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 992215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 993215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))))) 994215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id); 995215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 996215976Sjmallett} 997215976Sjmallett#else 998215976Sjmallett#define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 999215976Sjmallett#endif 1000215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1001215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id) 1002215976Sjmallett{ 1003215976Sjmallett if (!( 1004215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1005215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1006215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1007215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1008215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1009215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1010215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1011215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1012215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 1013215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1014215976Sjmallett} 1015215976Sjmallett#else 1016215976Sjmallett#define CVMX_GMXX_TXX_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1017215976Sjmallett#endif 1018215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1019215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id) 1020215976Sjmallett{ 1021215976Sjmallett if (!( 1022215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1023215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1024215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1025215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1026215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1027215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1028215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1029215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1030215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_MIN_PKT(%lu,%lu) is invalid on this chip\n", offset, block_id); 1031215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1032215976Sjmallett} 1033215976Sjmallett#else 1034215976Sjmallett#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1035215976Sjmallett#endif 1036215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1037215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id) 1038215976Sjmallett{ 1039215976Sjmallett if (!( 1040215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1041215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1042215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1043215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1044215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1045215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1046215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1047215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1048215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(%lu,%lu) is invalid on this chip\n", offset, block_id); 1049215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1050215976Sjmallett} 1051215976Sjmallett#else 1052215976Sjmallett#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1053215976Sjmallett#endif 1054215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1055215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id) 1056215976Sjmallett{ 1057215976Sjmallett if (!( 1058215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1059215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1060215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1061215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1062215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1063215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1064215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1065215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1066215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id); 1067215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1068215976Sjmallett} 1069215976Sjmallett#else 1070215976Sjmallett#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1071215976Sjmallett#endif 1072215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1073215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id) 1074215976Sjmallett{ 1075215976Sjmallett if (!( 1076215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1077215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1078215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1079215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1080215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1081215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1082215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1083215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1084215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO(%lu,%lu) is invalid on this chip\n", offset, block_id); 1085215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1086215976Sjmallett} 1087215976Sjmallett#else 1088215976Sjmallett#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1089215976Sjmallett#endif 1090215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1091215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id) 1092215976Sjmallett{ 1093215976Sjmallett if (!( 1094215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1095215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1096215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1097215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1098215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1099215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1100215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1101215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1102215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO(%lu,%lu) is invalid on this chip\n", offset, block_id); 1103215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1104215976Sjmallett} 1105215976Sjmallett#else 1106215976Sjmallett#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1107215976Sjmallett#endif 1108215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1109215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id) 1110215976Sjmallett{ 1111215976Sjmallett if (!( 1112215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1113215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1114215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1115215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 1116215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1117215976Sjmallett} 1118215976Sjmallett#else 1119215976Sjmallett#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1120215976Sjmallett#endif 1121215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1122215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id) 1123215976Sjmallett{ 1124215976Sjmallett if (!( 1125215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1126215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1127215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1128215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1129215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1130215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1131215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1132215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1133215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_SLOT(%lu,%lu) is invalid on this chip\n", offset, block_id); 1134215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1135215976Sjmallett} 1136215976Sjmallett#else 1137215976Sjmallett#define CVMX_GMXX_TXX_SLOT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1138215976Sjmallett#endif 1139215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1140215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id) 1141215976Sjmallett{ 1142215976Sjmallett if (!( 1143215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1144215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1145215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1146215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1147215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1148215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1149215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1150215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1151215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE(%lu,%lu) is invalid on this chip\n", offset, block_id); 1152215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1153215976Sjmallett} 1154215976Sjmallett#else 1155215976Sjmallett#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1156215976Sjmallett#endif 1157215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1158215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id) 1159215976Sjmallett{ 1160215976Sjmallett if (!( 1161215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1162215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1163215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1164215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1165215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1166215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1167215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1168215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1169215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT0(%lu,%lu) is invalid on this chip\n", offset, block_id); 1170215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1171215976Sjmallett} 1172215976Sjmallett#else 1173215976Sjmallett#define CVMX_GMXX_TXX_STAT0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1174215976Sjmallett#endif 1175215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1176215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id) 1177215976Sjmallett{ 1178215976Sjmallett if (!( 1179215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1180215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1181215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1182215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1183215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1184215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1185215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1186215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1187215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT1(%lu,%lu) is invalid on this chip\n", offset, block_id); 1188215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1189215976Sjmallett} 1190215976Sjmallett#else 1191215976Sjmallett#define CVMX_GMXX_TXX_STAT1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1192215976Sjmallett#endif 1193215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1194215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id) 1195215976Sjmallett{ 1196215976Sjmallett if (!( 1197215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1198215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1199215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1200215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1201215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1202215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1203215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1204215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1205215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT2(%lu,%lu) is invalid on this chip\n", offset, block_id); 1206215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1207215976Sjmallett} 1208215976Sjmallett#else 1209215976Sjmallett#define CVMX_GMXX_TXX_STAT2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1210215976Sjmallett#endif 1211215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1212215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id) 1213215976Sjmallett{ 1214215976Sjmallett if (!( 1215215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1216215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1217215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1218215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1219215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1220215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1221215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1222215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1223215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT3(%lu,%lu) is invalid on this chip\n", offset, block_id); 1224215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1225215976Sjmallett} 1226215976Sjmallett#else 1227215976Sjmallett#define CVMX_GMXX_TXX_STAT3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1228215976Sjmallett#endif 1229215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1230215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id) 1231215976Sjmallett{ 1232215976Sjmallett if (!( 1233215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1234215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1235215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1236215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1237215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1238215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1239215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1240215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1241215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT4(%lu,%lu) is invalid on this chip\n", offset, block_id); 1242215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1243215976Sjmallett} 1244215976Sjmallett#else 1245215976Sjmallett#define CVMX_GMXX_TXX_STAT4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1246215976Sjmallett#endif 1247215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1248215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id) 1249215976Sjmallett{ 1250215976Sjmallett if (!( 1251215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1252215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1253215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1254215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1255215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1256215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1257215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1258215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1259215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT5(%lu,%lu) is invalid on this chip\n", offset, block_id); 1260215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1261215976Sjmallett} 1262215976Sjmallett#else 1263215976Sjmallett#define CVMX_GMXX_TXX_STAT5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1264215976Sjmallett#endif 1265215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1266215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id) 1267215976Sjmallett{ 1268215976Sjmallett if (!( 1269215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1270215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1271215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1272215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1273215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1274215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1275215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1276215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1277215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT6(%lu,%lu) is invalid on this chip\n", offset, block_id); 1278215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1279215976Sjmallett} 1280215976Sjmallett#else 1281215976Sjmallett#define CVMX_GMXX_TXX_STAT6(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1282215976Sjmallett#endif 1283215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1284215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id) 1285215976Sjmallett{ 1286215976Sjmallett if (!( 1287215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1288215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1289215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1290215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1291215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1292215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1293215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1294215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1295215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT7(%lu,%lu) is invalid on this chip\n", offset, block_id); 1296215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1297215976Sjmallett} 1298215976Sjmallett#else 1299215976Sjmallett#define CVMX_GMXX_TXX_STAT7(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1300215976Sjmallett#endif 1301215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1302215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id) 1303215976Sjmallett{ 1304215976Sjmallett if (!( 1305215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1306215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1307215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1308215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1309215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1310215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1311215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1312215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1313215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT8(%lu,%lu) is invalid on this chip\n", offset, block_id); 1314215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1315215976Sjmallett} 1316215976Sjmallett#else 1317215976Sjmallett#define CVMX_GMXX_TXX_STAT8(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1318215976Sjmallett#endif 1319215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1320215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id) 1321215976Sjmallett{ 1322215976Sjmallett if (!( 1323215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1324215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1325215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1326215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1327215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1328215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1329215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1330215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1331215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STAT9(%lu,%lu) is invalid on this chip\n", offset, block_id); 1332215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1333215976Sjmallett} 1334215976Sjmallett#else 1335215976Sjmallett#define CVMX_GMXX_TXX_STAT9(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1336215976Sjmallett#endif 1337215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1338215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id) 1339215976Sjmallett{ 1340215976Sjmallett if (!( 1341215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1342215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1343215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1344215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1345215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1346215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1347215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1348215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1349215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id); 1350215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1351215976Sjmallett} 1352215976Sjmallett#else 1353215976Sjmallett#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1354215976Sjmallett#endif 1355215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1356215976Sjmallettstatic inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id) 1357215976Sjmallett{ 1358215976Sjmallett if (!( 1359215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) || 1360215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) || 1361215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1362215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) || 1363215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 1364215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1365215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) || 1366215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))))) 1367215976Sjmallett cvmx_warn("CVMX_GMXX_TXX_THRESH(%lu,%lu) is invalid on this chip\n", offset, block_id); 1368215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048; 1369215976Sjmallett} 1370215976Sjmallett#else 1371215976Sjmallett#define CVMX_GMXX_TXX_THRESH(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048) 1372215976Sjmallett#endif 1373215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1374215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id) 1375215976Sjmallett{ 1376215976Sjmallett if (!( 1377215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1378215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1379215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1380215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1381215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1382215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1383215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1384215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1385215976Sjmallett cvmx_warn("CVMX_GMXX_TX_BP(%lu) is invalid on this chip\n", block_id); 1386215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull; 1387215976Sjmallett} 1388215976Sjmallett#else 1389215976Sjmallett#define CVMX_GMXX_TX_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull) 1390215976Sjmallett#endif 1391215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1392215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id) 1393215976Sjmallett{ 1394215976Sjmallett if (!( 1395215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) || 1396215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0)))))) 1397215976Sjmallett cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id); 1398215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8; 1399215976Sjmallett} 1400215976Sjmallett#else 1401215976Sjmallett#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8) 1402215976Sjmallett#endif 1403215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1404215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id) 1405215976Sjmallett{ 1406215976Sjmallett if (!( 1407215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1408215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1409215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1410215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1411215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1412215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1413215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1414215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1415215976Sjmallett cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT(%lu) is invalid on this chip\n", block_id); 1416215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull; 1417215976Sjmallett} 1418215976Sjmallett#else 1419215976Sjmallett#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) (CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull) 1420215976Sjmallett#endif 1421215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1422215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id) 1423215976Sjmallett{ 1424215976Sjmallett if (!( 1425215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1426215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1427215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1428215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1429215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1430215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1431215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1432215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1433215976Sjmallett cvmx_warn("CVMX_GMXX_TX_CORRUPT(%lu) is invalid on this chip\n", block_id); 1434215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull; 1435215976Sjmallett} 1436215976Sjmallett#else 1437215976Sjmallett#define CVMX_GMXX_TX_CORRUPT(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull) 1438215976Sjmallett#endif 1439215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1440215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id) 1441215976Sjmallett{ 1442215976Sjmallett if (!( 1443215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1444215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1445215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1446215976Sjmallett cvmx_warn("CVMX_GMXX_TX_HG2_REG1(%lu) is invalid on this chip\n", block_id); 1447215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull; 1448215976Sjmallett} 1449215976Sjmallett#else 1450215976Sjmallett#define CVMX_GMXX_TX_HG2_REG1(block_id) (CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull) 1451215976Sjmallett#endif 1452215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1453215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id) 1454215976Sjmallett{ 1455215976Sjmallett if (!( 1456215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1457215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1458215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1459215976Sjmallett cvmx_warn("CVMX_GMXX_TX_HG2_REG2(%lu) is invalid on this chip\n", block_id); 1460215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull; 1461215976Sjmallett} 1462215976Sjmallett#else 1463215976Sjmallett#define CVMX_GMXX_TX_HG2_REG2(block_id) (CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull) 1464215976Sjmallett#endif 1465215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1466215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id) 1467215976Sjmallett{ 1468215976Sjmallett if (!( 1469215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1470215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1471215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1472215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1473215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1474215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1475215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1476215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1477215976Sjmallett cvmx_warn("CVMX_GMXX_TX_IFG(%lu) is invalid on this chip\n", block_id); 1478215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull; 1479215976Sjmallett} 1480215976Sjmallett#else 1481215976Sjmallett#define CVMX_GMXX_TX_IFG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull) 1482215976Sjmallett#endif 1483215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1484215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id) 1485215976Sjmallett{ 1486215976Sjmallett if (!( 1487215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1488215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1489215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1490215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1491215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1492215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1493215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1494215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1495215976Sjmallett cvmx_warn("CVMX_GMXX_TX_INT_EN(%lu) is invalid on this chip\n", block_id); 1496215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull; 1497215976Sjmallett} 1498215976Sjmallett#else 1499215976Sjmallett#define CVMX_GMXX_TX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull) 1500215976Sjmallett#endif 1501215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1502215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id) 1503215976Sjmallett{ 1504215976Sjmallett if (!( 1505215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1506215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1507215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1508215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1509215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1510215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1511215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1512215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1513215976Sjmallett cvmx_warn("CVMX_GMXX_TX_INT_REG(%lu) is invalid on this chip\n", block_id); 1514215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull; 1515215976Sjmallett} 1516215976Sjmallett#else 1517215976Sjmallett#define CVMX_GMXX_TX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull) 1518215976Sjmallett#endif 1519215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1520215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id) 1521215976Sjmallett{ 1522215976Sjmallett if (!( 1523215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1524215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1525215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1526215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1527215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1528215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1529215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1530215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1531215976Sjmallett cvmx_warn("CVMX_GMXX_TX_JAM(%lu) is invalid on this chip\n", block_id); 1532215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull; 1533215976Sjmallett} 1534215976Sjmallett#else 1535215976Sjmallett#define CVMX_GMXX_TX_JAM(block_id) (CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull) 1536215976Sjmallett#endif 1537215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1538215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id) 1539215976Sjmallett{ 1540215976Sjmallett if (!( 1541215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1542215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1543215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1544215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1545215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1546215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1547215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1548215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1549215976Sjmallett cvmx_warn("CVMX_GMXX_TX_LFSR(%lu) is invalid on this chip\n", block_id); 1550215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull; 1551215976Sjmallett} 1552215976Sjmallett#else 1553215976Sjmallett#define CVMX_GMXX_TX_LFSR(block_id) (CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull) 1554215976Sjmallett#endif 1555215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1556215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id) 1557215976Sjmallett{ 1558215976Sjmallett if (!( 1559215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1560215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1561215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1562215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1563215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1564215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1565215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1566215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1567215976Sjmallett cvmx_warn("CVMX_GMXX_TX_OVR_BP(%lu) is invalid on this chip\n", block_id); 1568215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull; 1569215976Sjmallett} 1570215976Sjmallett#else 1571215976Sjmallett#define CVMX_GMXX_TX_OVR_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull) 1572215976Sjmallett#endif 1573215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1574215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id) 1575215976Sjmallett{ 1576215976Sjmallett if (!( 1577215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1578215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1579215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1580215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1581215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1582215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1583215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1584215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1585215976Sjmallett cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC(%lu) is invalid on this chip\n", block_id); 1586215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull; 1587215976Sjmallett} 1588215976Sjmallett#else 1589215976Sjmallett#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull) 1590215976Sjmallett#endif 1591215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1592215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id) 1593215976Sjmallett{ 1594215976Sjmallett if (!( 1595215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1596215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1597215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1598215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1599215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1600215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1601215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1602215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1603215976Sjmallett cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE(%lu) is invalid on this chip\n", block_id); 1604215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull; 1605215976Sjmallett} 1606215976Sjmallett#else 1607215976Sjmallett#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull) 1608215976Sjmallett#endif 1609215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1610215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id) 1611215976Sjmallett{ 1612215976Sjmallett if (!( 1613215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1614215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1615215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1616215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1617215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1618215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1619215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) || 1620215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1621215976Sjmallett cvmx_warn("CVMX_GMXX_TX_PRTS(%lu) is invalid on this chip\n", block_id); 1622215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull; 1623215976Sjmallett} 1624215976Sjmallett#else 1625215976Sjmallett#define CVMX_GMXX_TX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull) 1626215976Sjmallett#endif 1627215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1628215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id) 1629215976Sjmallett{ 1630215976Sjmallett if (!( 1631215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1632215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 1633215976Sjmallett cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id); 1634215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull; 1635215976Sjmallett} 1636215976Sjmallett#else 1637215976Sjmallett#define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull) 1638215976Sjmallett#endif 1639215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1640215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id) 1641215976Sjmallett{ 1642215976Sjmallett if (!( 1643215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1644215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 1645215976Sjmallett cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id); 1646215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull; 1647215976Sjmallett} 1648215976Sjmallett#else 1649215976Sjmallett#define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull) 1650215976Sjmallett#endif 1651215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1652215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id) 1653215976Sjmallett{ 1654215976Sjmallett if (!( 1655215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1656215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 1657215976Sjmallett cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id); 1658215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull; 1659215976Sjmallett} 1660215976Sjmallett#else 1661215976Sjmallett#define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull) 1662215976Sjmallett#endif 1663215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1664215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id) 1665215976Sjmallett{ 1666215976Sjmallett if (!( 1667215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1)))))) 1668215976Sjmallett cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id); 1669215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8; 1670215976Sjmallett} 1671215976Sjmallett#else 1672215976Sjmallett#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8) 1673215976Sjmallett#endif 1674215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1675215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id) 1676215976Sjmallett{ 1677215976Sjmallett if (!( 1678215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 1679215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 1680215976Sjmallett cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id); 1681215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull; 1682215976Sjmallett} 1683215976Sjmallett#else 1684215976Sjmallett#define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull) 1685215976Sjmallett#endif 1686215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1687215976Sjmallettstatic inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id) 1688215976Sjmallett{ 1689215976Sjmallett if (!( 1690215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1691215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1692215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1693215976Sjmallett cvmx_warn("CVMX_GMXX_TX_XAUI_CTL(%lu) is invalid on this chip\n", block_id); 1694215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull; 1695215976Sjmallett} 1696215976Sjmallett#else 1697215976Sjmallett#define CVMX_GMXX_TX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull) 1698215976Sjmallett#endif 1699215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1700215976Sjmallettstatic inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id) 1701215976Sjmallett{ 1702215976Sjmallett if (!( 1703215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1704215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1705215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 1706215976Sjmallett cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK(%lu) is invalid on this chip\n", block_id); 1707215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull; 1708215976Sjmallett} 1709215976Sjmallett#else 1710215976Sjmallett#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull) 1711215976Sjmallett#endif 1712215976Sjmallett 1713215976Sjmallett/** 1714215976Sjmallett * cvmx_gmx#_bad_reg 1715215976Sjmallett * 1716215976Sjmallett * GMX_BAD_REG = A collection of things that have gone very, very wrong 1717215976Sjmallett * 1718215976Sjmallett * 1719215976Sjmallett * Notes: 1720215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used. 1721215976Sjmallett * 1722215976Sjmallett */ 1723215976Sjmallettunion cvmx_gmxx_bad_reg 1724215976Sjmallett{ 1725215976Sjmallett uint64_t u64; 1726215976Sjmallett struct cvmx_gmxx_bad_reg_s 1727215976Sjmallett { 1728215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1729215976Sjmallett uint64_t reserved_31_63 : 33; 1730215976Sjmallett uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ 1731215976Sjmallett uint64_t statovr : 1; /**< TX Statistics overflow 1732215976Sjmallett The common FIFO to SGMII and XAUI had an overflow 1733215976Sjmallett TX Stats are corrupted */ 1734215976Sjmallett uint64_t loststat : 4; /**< TX Statistics data was over-written 1735215976Sjmallett In SGMII, one bit per port 1736215976Sjmallett In XAUI, only port0 is used 1737215976Sjmallett TX Stats are corrupted */ 1738215976Sjmallett uint64_t reserved_18_21 : 4; 1739215976Sjmallett uint64_t out_ovr : 16; /**< Outbound data FIFO overflow (per port) */ 1740215976Sjmallett uint64_t ncb_ovr : 1; /**< Outbound NCB FIFO Overflow */ 1741215976Sjmallett uint64_t out_col : 1; /**< Outbound collision occured between PKO and NCB */ 1742215976Sjmallett#else 1743215976Sjmallett uint64_t out_col : 1; 1744215976Sjmallett uint64_t ncb_ovr : 1; 1745215976Sjmallett uint64_t out_ovr : 16; 1746215976Sjmallett uint64_t reserved_18_21 : 4; 1747215976Sjmallett uint64_t loststat : 4; 1748215976Sjmallett uint64_t statovr : 1; 1749215976Sjmallett uint64_t inb_nxa : 4; 1750215976Sjmallett uint64_t reserved_31_63 : 33; 1751215976Sjmallett#endif 1752215976Sjmallett } s; 1753215976Sjmallett struct cvmx_gmxx_bad_reg_cn30xx 1754215976Sjmallett { 1755215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1756215976Sjmallett uint64_t reserved_31_63 : 33; 1757215976Sjmallett uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ 1758215976Sjmallett uint64_t statovr : 1; /**< TX Statistics overflow */ 1759215976Sjmallett uint64_t reserved_25_25 : 1; 1760215976Sjmallett uint64_t loststat : 3; /**< TX Statistics data was over-written (per RGM port) 1761215976Sjmallett TX Stats are corrupted */ 1762215976Sjmallett uint64_t reserved_5_21 : 17; 1763215976Sjmallett uint64_t out_ovr : 3; /**< Outbound data FIFO overflow (per port) */ 1764215976Sjmallett uint64_t reserved_0_1 : 2; 1765215976Sjmallett#else 1766215976Sjmallett uint64_t reserved_0_1 : 2; 1767215976Sjmallett uint64_t out_ovr : 3; 1768215976Sjmallett uint64_t reserved_5_21 : 17; 1769215976Sjmallett uint64_t loststat : 3; 1770215976Sjmallett uint64_t reserved_25_25 : 1; 1771215976Sjmallett uint64_t statovr : 1; 1772215976Sjmallett uint64_t inb_nxa : 4; 1773215976Sjmallett uint64_t reserved_31_63 : 33; 1774215976Sjmallett#endif 1775215976Sjmallett } cn30xx; 1776215976Sjmallett struct cvmx_gmxx_bad_reg_cn30xx cn31xx; 1777215976Sjmallett struct cvmx_gmxx_bad_reg_s cn38xx; 1778215976Sjmallett struct cvmx_gmxx_bad_reg_s cn38xxp2; 1779215976Sjmallett struct cvmx_gmxx_bad_reg_cn30xx cn50xx; 1780215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx 1781215976Sjmallett { 1782215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1783215976Sjmallett uint64_t reserved_31_63 : 33; 1784215976Sjmallett uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ 1785215976Sjmallett uint64_t statovr : 1; /**< TX Statistics overflow 1786215976Sjmallett The common FIFO to SGMII and XAUI had an overflow 1787215976Sjmallett TX Stats are corrupted */ 1788215976Sjmallett uint64_t loststat : 4; /**< TX Statistics data was over-written 1789215976Sjmallett In SGMII, one bit per port 1790215976Sjmallett In XAUI, only port0 is used 1791215976Sjmallett TX Stats are corrupted */ 1792215976Sjmallett uint64_t reserved_6_21 : 16; 1793215976Sjmallett uint64_t out_ovr : 4; /**< Outbound data FIFO overflow (per port) */ 1794215976Sjmallett uint64_t reserved_0_1 : 2; 1795215976Sjmallett#else 1796215976Sjmallett uint64_t reserved_0_1 : 2; 1797215976Sjmallett uint64_t out_ovr : 4; 1798215976Sjmallett uint64_t reserved_6_21 : 16; 1799215976Sjmallett uint64_t loststat : 4; 1800215976Sjmallett uint64_t statovr : 1; 1801215976Sjmallett uint64_t inb_nxa : 4; 1802215976Sjmallett uint64_t reserved_31_63 : 33; 1803215976Sjmallett#endif 1804215976Sjmallett } cn52xx; 1805215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1; 1806215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx cn56xx; 1807215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1; 1808215976Sjmallett struct cvmx_gmxx_bad_reg_s cn58xx; 1809215976Sjmallett struct cvmx_gmxx_bad_reg_s cn58xxp1; 1810215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx cn63xx; 1811215976Sjmallett struct cvmx_gmxx_bad_reg_cn52xx cn63xxp1; 1812215976Sjmallett}; 1813215976Sjmalletttypedef union cvmx_gmxx_bad_reg cvmx_gmxx_bad_reg_t; 1814215976Sjmallett 1815215976Sjmallett/** 1816215976Sjmallett * cvmx_gmx#_bist 1817215976Sjmallett * 1818215976Sjmallett * GMX_BIST = GMX BIST Results 1819215976Sjmallett * 1820215976Sjmallett */ 1821215976Sjmallettunion cvmx_gmxx_bist 1822215976Sjmallett{ 1823215976Sjmallett uint64_t u64; 1824215976Sjmallett struct cvmx_gmxx_bist_s 1825215976Sjmallett { 1826215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1827215976Sjmallett uint64_t reserved_25_63 : 39; 1828215976Sjmallett uint64_t status : 25; /**< BIST Results. 1829215976Sjmallett HW sets a bit in BIST for for memory that fails 1830215976Sjmallett - 0: gmx#.inb.fif_bnk0 1831215976Sjmallett - 1: gmx#.inb.fif_bnk1 1832215976Sjmallett - 2: gmx#.inb.fif_bnk2 1833215976Sjmallett - 3: gmx#.inb.fif_bnk3 1834215976Sjmallett - 4: gmx#.inb.fif_bnk_ext0 1835215976Sjmallett - 5: gmx#.inb.fif_bnk_ext1 1836215976Sjmallett - 6: gmx#.inb.fif_bnk_ext2 1837215976Sjmallett - 7: gmx#.inb.fif_bnk_ext3 1838215976Sjmallett - 8: gmx#.outb.fif.fif_bnk0 1839215976Sjmallett - 9: gmx#.outb.fif.fif_bnk1 1840215976Sjmallett - 10: gmx#.outb.fif.fif_bnk2 1841215976Sjmallett - 11: gmx#.outb.fif.fif_bnk3 1842215976Sjmallett - 12: gmx#.outb.fif.fif_bnk_ext0 1843215976Sjmallett - 13: gmx#.outb.fif.fif_bnk_ext1 1844215976Sjmallett - 14: gmx#.outb.fif.fif_bnk_ext2 1845215976Sjmallett - 15: gmx#.outb.fif.fif_bnk_ext3 1846215976Sjmallett - 16: gmx#.csr.gmi0.srf8x64m1_bist 1847215976Sjmallett - 17: gmx#.csr.gmi1.srf8x64m1_bist 1848215976Sjmallett - 18: gmx#.csr.gmi2.srf8x64m1_bist 1849215976Sjmallett - 19: gmx#.csr.gmi3.srf8x64m1_bist 1850215976Sjmallett - 20: gmx#.csr.drf20x32m2_bist 1851215976Sjmallett - 21: gmx#.csr.drf20x48m2_bist 1852215976Sjmallett - 22: gmx#.outb.stat.drf16x27m1_bist 1853215976Sjmallett - 23: gmx#.outb.stat.drf40x64m1_bist 1854215976Sjmallett - 24: xgmii.tx.drf16x38m1_async_bist */ 1855215976Sjmallett#else 1856215976Sjmallett uint64_t status : 25; 1857215976Sjmallett uint64_t reserved_25_63 : 39; 1858215976Sjmallett#endif 1859215976Sjmallett } s; 1860215976Sjmallett struct cvmx_gmxx_bist_cn30xx 1861215976Sjmallett { 1862215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1863215976Sjmallett uint64_t reserved_10_63 : 54; 1864215976Sjmallett uint64_t status : 10; /**< BIST Results. 1865215976Sjmallett HW sets a bit in BIST for for memory that fails 1866215976Sjmallett - 0: gmx#.inb.dpr512x78m4_bist 1867215976Sjmallett - 1: gmx#.outb.fif.dpr512x71m4_bist 1868215976Sjmallett - 2: gmx#.csr.gmi0.srf8x64m1_bist 1869215976Sjmallett - 3: gmx#.csr.gmi1.srf8x64m1_bist 1870215976Sjmallett - 4: gmx#.csr.gmi2.srf8x64m1_bist 1871215976Sjmallett - 5: 0 1872215976Sjmallett - 6: gmx#.csr.drf20x80m1_bist 1873215976Sjmallett - 7: gmx#.outb.stat.drf16x27m1_bist 1874215976Sjmallett - 8: gmx#.outb.stat.drf40x64m1_bist 1875215976Sjmallett - 9: 0 */ 1876215976Sjmallett#else 1877215976Sjmallett uint64_t status : 10; 1878215976Sjmallett uint64_t reserved_10_63 : 54; 1879215976Sjmallett#endif 1880215976Sjmallett } cn30xx; 1881215976Sjmallett struct cvmx_gmxx_bist_cn30xx cn31xx; 1882215976Sjmallett struct cvmx_gmxx_bist_cn30xx cn38xx; 1883215976Sjmallett struct cvmx_gmxx_bist_cn30xx cn38xxp2; 1884215976Sjmallett struct cvmx_gmxx_bist_cn50xx 1885215976Sjmallett { 1886215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1887215976Sjmallett uint64_t reserved_12_63 : 52; 1888215976Sjmallett uint64_t status : 12; /**< BIST Results. 1889215976Sjmallett HW sets a bit in BIST for for memory that fails */ 1890215976Sjmallett#else 1891215976Sjmallett uint64_t status : 12; 1892215976Sjmallett uint64_t reserved_12_63 : 52; 1893215976Sjmallett#endif 1894215976Sjmallett } cn50xx; 1895215976Sjmallett struct cvmx_gmxx_bist_cn52xx 1896215976Sjmallett { 1897215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1898215976Sjmallett uint64_t reserved_16_63 : 48; 1899215976Sjmallett uint64_t status : 16; /**< BIST Results. 1900215976Sjmallett HW sets a bit in BIST for for memory that fails 1901215976Sjmallett - 0: gmx#.inb.fif_bnk0 1902215976Sjmallett - 1: gmx#.inb.fif_bnk1 1903215976Sjmallett - 2: gmx#.inb.fif_bnk2 1904215976Sjmallett - 3: gmx#.inb.fif_bnk3 1905215976Sjmallett - 4: gmx#.outb.fif.fif_bnk0 1906215976Sjmallett - 5: gmx#.outb.fif.fif_bnk1 1907215976Sjmallett - 6: gmx#.outb.fif.fif_bnk2 1908215976Sjmallett - 7: gmx#.outb.fif.fif_bnk3 1909215976Sjmallett - 8: gmx#.csr.gmi0.srf8x64m1_bist 1910215976Sjmallett - 9: gmx#.csr.gmi1.srf8x64m1_bist 1911215976Sjmallett - 10: gmx#.csr.gmi2.srf8x64m1_bist 1912215976Sjmallett - 11: gmx#.csr.gmi3.srf8x64m1_bist 1913215976Sjmallett - 12: gmx#.csr.drf20x80m1_bist 1914215976Sjmallett - 13: gmx#.outb.stat.drf16x27m1_bist 1915215976Sjmallett - 14: gmx#.outb.stat.drf40x64m1_bist 1916215976Sjmallett - 15: xgmii.tx.drf16x38m1_async_bist */ 1917215976Sjmallett#else 1918215976Sjmallett uint64_t status : 16; 1919215976Sjmallett uint64_t reserved_16_63 : 48; 1920215976Sjmallett#endif 1921215976Sjmallett } cn52xx; 1922215976Sjmallett struct cvmx_gmxx_bist_cn52xx cn52xxp1; 1923215976Sjmallett struct cvmx_gmxx_bist_cn52xx cn56xx; 1924215976Sjmallett struct cvmx_gmxx_bist_cn52xx cn56xxp1; 1925215976Sjmallett struct cvmx_gmxx_bist_cn58xx 1926215976Sjmallett { 1927215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1928215976Sjmallett uint64_t reserved_17_63 : 47; 1929215976Sjmallett uint64_t status : 17; /**< BIST Results. 1930215976Sjmallett HW sets a bit in BIST for for memory that fails 1931215976Sjmallett - 0: gmx#.inb.fif_bnk0 1932215976Sjmallett - 1: gmx#.inb.fif_bnk1 1933215976Sjmallett - 2: gmx#.inb.fif_bnk2 1934215976Sjmallett - 3: gmx#.inb.fif_bnk3 1935215976Sjmallett - 4: gmx#.outb.fif.fif_bnk0 1936215976Sjmallett - 5: gmx#.outb.fif.fif_bnk1 1937215976Sjmallett - 6: gmx#.outb.fif.fif_bnk2 1938215976Sjmallett - 7: gmx#.outb.fif.fif_bnk3 1939215976Sjmallett - 8: gmx#.csr.gmi0.srf8x64m1_bist 1940215976Sjmallett - 9: gmx#.csr.gmi1.srf8x64m1_bist 1941215976Sjmallett - 10: gmx#.csr.gmi2.srf8x64m1_bist 1942215976Sjmallett - 11: gmx#.csr.gmi3.srf8x64m1_bist 1943215976Sjmallett - 12: gmx#.csr.drf20x80m1_bist 1944215976Sjmallett - 13: gmx#.outb.stat.drf16x27m1_bist 1945215976Sjmallett - 14: gmx#.outb.stat.drf40x64m1_bist 1946215976Sjmallett - 15: gmx#.outb.ncb.drf16x76m1_bist 1947215976Sjmallett - 16: gmx#.outb.fif.srf32x16m2_bist */ 1948215976Sjmallett#else 1949215976Sjmallett uint64_t status : 17; 1950215976Sjmallett uint64_t reserved_17_63 : 47; 1951215976Sjmallett#endif 1952215976Sjmallett } cn58xx; 1953215976Sjmallett struct cvmx_gmxx_bist_cn58xx cn58xxp1; 1954215976Sjmallett struct cvmx_gmxx_bist_s cn63xx; 1955215976Sjmallett struct cvmx_gmxx_bist_s cn63xxp1; 1956215976Sjmallett}; 1957215976Sjmalletttypedef union cvmx_gmxx_bist cvmx_gmxx_bist_t; 1958215976Sjmallett 1959215976Sjmallett/** 1960215976Sjmallett * cvmx_gmx#_clk_en 1961215976Sjmallett * 1962215976Sjmallett * DO NOT DOCUMENT THIS REGISTER - IT IS NOT OFFICIAL 1963215976Sjmallett * 1964215976Sjmallett */ 1965215976Sjmallettunion cvmx_gmxx_clk_en 1966215976Sjmallett{ 1967215976Sjmallett uint64_t u64; 1968215976Sjmallett struct cvmx_gmxx_clk_en_s 1969215976Sjmallett { 1970215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 1971215976Sjmallett uint64_t reserved_1_63 : 63; 1972215976Sjmallett uint64_t clk_en : 1; /**< Force the clock enables on */ 1973215976Sjmallett#else 1974215976Sjmallett uint64_t clk_en : 1; 1975215976Sjmallett uint64_t reserved_1_63 : 63; 1976215976Sjmallett#endif 1977215976Sjmallett } s; 1978215976Sjmallett struct cvmx_gmxx_clk_en_s cn52xx; 1979215976Sjmallett struct cvmx_gmxx_clk_en_s cn52xxp1; 1980215976Sjmallett struct cvmx_gmxx_clk_en_s cn56xx; 1981215976Sjmallett struct cvmx_gmxx_clk_en_s cn56xxp1; 1982215976Sjmallett struct cvmx_gmxx_clk_en_s cn63xx; 1983215976Sjmallett struct cvmx_gmxx_clk_en_s cn63xxp1; 1984215976Sjmallett}; 1985215976Sjmalletttypedef union cvmx_gmxx_clk_en cvmx_gmxx_clk_en_t; 1986215976Sjmallett 1987215976Sjmallett/** 1988215976Sjmallett * cvmx_gmx#_hg2_control 1989215976Sjmallett * 1990215976Sjmallett * Notes: 1991215976Sjmallett * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However 1992215976Sjmallett * setting just the TX or RX bit will result in only the HG2 message transmit or the receive 1993215976Sjmallett * capability. 1994215976Sjmallett * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received 1995215976Sjmallett * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages 1996215976Sjmallett * are disabled. 1997215976Sjmallett * 1998215976Sjmallett * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN 1999215976Sjmallett * are set. 2000215976Sjmallett * 2001215976Sjmallett * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN 2002215976Sjmallett * are set. 2003215976Sjmallett * 2004215976Sjmallett * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero 2005215976Sjmallett * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when 2006215976Sjmallett * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1 2007215976Sjmallett * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages 2008215976Sjmallett * (optionally, when HG2TX_EN=1) with the HiGig2 protocol. 2009215976Sjmallett */ 2010215976Sjmallettunion cvmx_gmxx_hg2_control 2011215976Sjmallett{ 2012215976Sjmallett uint64_t u64; 2013215976Sjmallett struct cvmx_gmxx_hg2_control_s 2014215976Sjmallett { 2015215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2016215976Sjmallett uint64_t reserved_19_63 : 45; 2017215976Sjmallett uint64_t hg2tx_en : 1; /**< Enable Transmission of HG2 phys and logl messages 2018215976Sjmallett When set, also disables HW auto-generated (802.3 2019215976Sjmallett and CBFC) pause frames. (OCTEON cannot generate 2020215976Sjmallett proper 802.3 or CBFC pause frames in HiGig2 mode.) */ 2021215976Sjmallett uint64_t hg2rx_en : 1; /**< Enable extraction and processing of HG2 message 2022215976Sjmallett packet from RX flow. Physical logical pause info 2023215976Sjmallett is used to pause physical link, back pressure PKO 2024215976Sjmallett HG2RX_EN must be set when HiGig2 messages are 2025215976Sjmallett present in the receive stream. */ 2026215976Sjmallett uint64_t phys_en : 1; /**< 1 bit physical link pause enable for recevied 2027215976Sjmallett HiGig2 physical pause message */ 2028215976Sjmallett uint64_t logl_en : 16; /**< 16 bit xof enables for recevied HiGig2 messages 2029215976Sjmallett or CBFC packets */ 2030215976Sjmallett#else 2031215976Sjmallett uint64_t logl_en : 16; 2032215976Sjmallett uint64_t phys_en : 1; 2033215976Sjmallett uint64_t hg2rx_en : 1; 2034215976Sjmallett uint64_t hg2tx_en : 1; 2035215976Sjmallett uint64_t reserved_19_63 : 45; 2036215976Sjmallett#endif 2037215976Sjmallett } s; 2038215976Sjmallett struct cvmx_gmxx_hg2_control_s cn52xx; 2039215976Sjmallett struct cvmx_gmxx_hg2_control_s cn52xxp1; 2040215976Sjmallett struct cvmx_gmxx_hg2_control_s cn56xx; 2041215976Sjmallett struct cvmx_gmxx_hg2_control_s cn63xx; 2042215976Sjmallett struct cvmx_gmxx_hg2_control_s cn63xxp1; 2043215976Sjmallett}; 2044215976Sjmalletttypedef union cvmx_gmxx_hg2_control cvmx_gmxx_hg2_control_t; 2045215976Sjmallett 2046215976Sjmallett/** 2047215976Sjmallett * cvmx_gmx#_inf_mode 2048215976Sjmallett * 2049215976Sjmallett * GMX_INF_MODE = Interface Mode 2050215976Sjmallett * 2051215976Sjmallett */ 2052215976Sjmallettunion cvmx_gmxx_inf_mode 2053215976Sjmallett{ 2054215976Sjmallett uint64_t u64; 2055215976Sjmallett struct cvmx_gmxx_inf_mode_s 2056215976Sjmallett { 2057215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2058215976Sjmallett uint64_t reserved_12_63 : 52; 2059215976Sjmallett uint64_t speed : 4; /**< Interface Speed */ 2060215976Sjmallett uint64_t reserved_6_7 : 2; 2061215976Sjmallett uint64_t mode : 2; /**< Interface Electrical Operating Mode 2062215976Sjmallett - 0: SGMII (v1.8) 2063215976Sjmallett - 1: XAUI (IEEE 802.3-2005) */ 2064215976Sjmallett uint64_t reserved_3_3 : 1; 2065215976Sjmallett uint64_t p0mii : 1; /**< Port 0 Interface Mode 2066215976Sjmallett - 0: Port 0 is RGMII 2067215976Sjmallett - 1: Port 0 is MII */ 2068215976Sjmallett uint64_t en : 1; /**< Interface Enable 2069215976Sjmallett Must be set to enable the packet interface. 2070215976Sjmallett Should be enabled before any other requests to 2071215976Sjmallett GMX including enabling port back pressure with 2072215976Sjmallett IPD_CTL_STATUS[PBP_EN] */ 2073215976Sjmallett uint64_t type : 1; /**< Interface Protocol Type 2074215976Sjmallett - 0: SGMII/1000Base-X 2075215976Sjmallett - 1: XAUI */ 2076215976Sjmallett#else 2077215976Sjmallett uint64_t type : 1; 2078215976Sjmallett uint64_t en : 1; 2079215976Sjmallett uint64_t p0mii : 1; 2080215976Sjmallett uint64_t reserved_3_3 : 1; 2081215976Sjmallett uint64_t mode : 2; 2082215976Sjmallett uint64_t reserved_6_7 : 2; 2083215976Sjmallett uint64_t speed : 4; 2084215976Sjmallett uint64_t reserved_12_63 : 52; 2085215976Sjmallett#endif 2086215976Sjmallett } s; 2087215976Sjmallett struct cvmx_gmxx_inf_mode_cn30xx 2088215976Sjmallett { 2089215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2090215976Sjmallett uint64_t reserved_3_63 : 61; 2091215976Sjmallett uint64_t p0mii : 1; /**< Port 0 Interface Mode 2092215976Sjmallett - 0: Port 0 is RGMII 2093215976Sjmallett - 1: Port 0 is MII */ 2094215976Sjmallett uint64_t en : 1; /**< Interface Enable 2095215976Sjmallett Must be set to enable the packet interface. 2096215976Sjmallett Should be enabled before any other requests to 2097215976Sjmallett GMX including enabling port back pressure with 2098215976Sjmallett IPD_CTL_STATUS[PBP_EN] */ 2099215976Sjmallett uint64_t type : 1; /**< Port 1/2 Interface Mode 2100215976Sjmallett - 0: Ports 1 and 2 are RGMII 2101215976Sjmallett - 1: Port 1 is GMII/MII, Port 2 is unused 2102215976Sjmallett GMII/MII is selected by GMX_PRT1_CFG[SPEED] */ 2103215976Sjmallett#else 2104215976Sjmallett uint64_t type : 1; 2105215976Sjmallett uint64_t en : 1; 2106215976Sjmallett uint64_t p0mii : 1; 2107215976Sjmallett uint64_t reserved_3_63 : 61; 2108215976Sjmallett#endif 2109215976Sjmallett } cn30xx; 2110215976Sjmallett struct cvmx_gmxx_inf_mode_cn31xx 2111215976Sjmallett { 2112215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2113215976Sjmallett uint64_t reserved_2_63 : 62; 2114215976Sjmallett uint64_t en : 1; /**< Interface Enable 2115215976Sjmallett Must be set to enable the packet interface. 2116215976Sjmallett Should be enabled before any other requests to 2117215976Sjmallett GMX including enabling port back pressure with 2118215976Sjmallett IPD_CTL_STATUS[PBP_EN] */ 2119215976Sjmallett uint64_t type : 1; /**< Interface Mode 2120215976Sjmallett - 0: All three ports are RGMII ports 2121215976Sjmallett - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */ 2122215976Sjmallett#else 2123215976Sjmallett uint64_t type : 1; 2124215976Sjmallett uint64_t en : 1; 2125215976Sjmallett uint64_t reserved_2_63 : 62; 2126215976Sjmallett#endif 2127215976Sjmallett } cn31xx; 2128215976Sjmallett struct cvmx_gmxx_inf_mode_cn31xx cn38xx; 2129215976Sjmallett struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2; 2130215976Sjmallett struct cvmx_gmxx_inf_mode_cn30xx cn50xx; 2131215976Sjmallett struct cvmx_gmxx_inf_mode_cn52xx 2132215976Sjmallett { 2133215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2134215976Sjmallett uint64_t reserved_10_63 : 54; 2135215976Sjmallett uint64_t speed : 2; /**< Interface Speed 2136215976Sjmallett - 0: 1.250GHz 2137215976Sjmallett - 1: 2.500GHz 2138215976Sjmallett - 2: 3.125GHz 2139215976Sjmallett - 3: 3.750GHz */ 2140215976Sjmallett uint64_t reserved_6_7 : 2; 2141215976Sjmallett uint64_t mode : 2; /**< Interface Electrical Operating Mode 2142215976Sjmallett - 0: Disabled (PCIe) 2143215976Sjmallett - 1: XAUI (IEEE 802.3-2005) 2144215976Sjmallett - 2: SGMII (v1.8) 2145215976Sjmallett - 3: PICMG3.1 */ 2146215976Sjmallett uint64_t reserved_2_3 : 2; 2147215976Sjmallett uint64_t en : 1; /**< Interface Enable 2148215976Sjmallett Must be set to enable the packet interface. 2149215976Sjmallett Should be enabled before any other requests to 2150215976Sjmallett GMX including enabling port back pressure with 2151215976Sjmallett IPD_CTL_STATUS[PBP_EN] */ 2152215976Sjmallett uint64_t type : 1; /**< Interface Protocol Type 2153215976Sjmallett - 0: SGMII/1000Base-X 2154215976Sjmallett - 1: XAUI */ 2155215976Sjmallett#else 2156215976Sjmallett uint64_t type : 1; 2157215976Sjmallett uint64_t en : 1; 2158215976Sjmallett uint64_t reserved_2_3 : 2; 2159215976Sjmallett uint64_t mode : 2; 2160215976Sjmallett uint64_t reserved_6_7 : 2; 2161215976Sjmallett uint64_t speed : 2; 2162215976Sjmallett uint64_t reserved_10_63 : 54; 2163215976Sjmallett#endif 2164215976Sjmallett } cn52xx; 2165215976Sjmallett struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1; 2166215976Sjmallett struct cvmx_gmxx_inf_mode_cn52xx cn56xx; 2167215976Sjmallett struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1; 2168215976Sjmallett struct cvmx_gmxx_inf_mode_cn31xx cn58xx; 2169215976Sjmallett struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1; 2170215976Sjmallett struct cvmx_gmxx_inf_mode_cn63xx 2171215976Sjmallett { 2172215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2173215976Sjmallett uint64_t reserved_12_63 : 52; 2174215976Sjmallett uint64_t speed : 4; /**< Interface Speed */ 2175215976Sjmallett uint64_t reserved_5_7 : 3; 2176215976Sjmallett uint64_t mode : 1; /**< Interface Electrical Operating Mode 2177215976Sjmallett - 0: SGMII (v1.8) 2178215976Sjmallett - 1: XAUI (IEEE 802.3-2005) */ 2179215976Sjmallett uint64_t reserved_2_3 : 2; 2180215976Sjmallett uint64_t en : 1; /**< Interface Enable 2181215976Sjmallett Must be set to enable the packet interface. 2182215976Sjmallett Should be enabled before any other requests to 2183215976Sjmallett GMX including enabling port back pressure with 2184215976Sjmallett IPD_CTL_STATUS[PBP_EN] */ 2185215976Sjmallett uint64_t type : 1; /**< Interface Protocol Type 2186215976Sjmallett - 0: SGMII/1000Base-X 2187215976Sjmallett - 1: XAUI */ 2188215976Sjmallett#else 2189215976Sjmallett uint64_t type : 1; 2190215976Sjmallett uint64_t en : 1; 2191215976Sjmallett uint64_t reserved_2_3 : 2; 2192215976Sjmallett uint64_t mode : 1; 2193215976Sjmallett uint64_t reserved_5_7 : 3; 2194215976Sjmallett uint64_t speed : 4; 2195215976Sjmallett uint64_t reserved_12_63 : 52; 2196215976Sjmallett#endif 2197215976Sjmallett } cn63xx; 2198215976Sjmallett struct cvmx_gmxx_inf_mode_cn63xx cn63xxp1; 2199215976Sjmallett}; 2200215976Sjmalletttypedef union cvmx_gmxx_inf_mode cvmx_gmxx_inf_mode_t; 2201215976Sjmallett 2202215976Sjmallett/** 2203215976Sjmallett * cvmx_gmx#_nxa_adr 2204215976Sjmallett * 2205215976Sjmallett * GMX_NXA_ADR = NXA Port Address 2206215976Sjmallett * 2207215976Sjmallett */ 2208215976Sjmallettunion cvmx_gmxx_nxa_adr 2209215976Sjmallett{ 2210215976Sjmallett uint64_t u64; 2211215976Sjmallett struct cvmx_gmxx_nxa_adr_s 2212215976Sjmallett { 2213215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2214215976Sjmallett uint64_t reserved_6_63 : 58; 2215215976Sjmallett uint64_t prt : 6; /**< Logged address for NXA exceptions 2216215976Sjmallett The logged address will be from the first 2217215976Sjmallett exception that caused the problem. NCB has 2218215976Sjmallett higher priority than PKO and will win. 2219215976Sjmallett (only PRT[3:0]) */ 2220215976Sjmallett#else 2221215976Sjmallett uint64_t prt : 6; 2222215976Sjmallett uint64_t reserved_6_63 : 58; 2223215976Sjmallett#endif 2224215976Sjmallett } s; 2225215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn30xx; 2226215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn31xx; 2227215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn38xx; 2228215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn38xxp2; 2229215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn50xx; 2230215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn52xx; 2231215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn52xxp1; 2232215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn56xx; 2233215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn56xxp1; 2234215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn58xx; 2235215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn58xxp1; 2236215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn63xx; 2237215976Sjmallett struct cvmx_gmxx_nxa_adr_s cn63xxp1; 2238215976Sjmallett}; 2239215976Sjmalletttypedef union cvmx_gmxx_nxa_adr cvmx_gmxx_nxa_adr_t; 2240215976Sjmallett 2241215976Sjmallett/** 2242215976Sjmallett * cvmx_gmx#_prt#_cbfc_ctl 2243215976Sjmallett * 2244215976Sjmallett * ** HG2 message CSRs end 2245215976Sjmallett * 2246215976Sjmallett * 2247215976Sjmallett * Notes: 2248215976Sjmallett * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>) 2249215976Sjmallett * 2250215976Sjmallett */ 2251215976Sjmallettunion cvmx_gmxx_prtx_cbfc_ctl 2252215976Sjmallett{ 2253215976Sjmallett uint64_t u64; 2254215976Sjmallett struct cvmx_gmxx_prtx_cbfc_ctl_s 2255215976Sjmallett { 2256215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2257215976Sjmallett uint64_t phys_en : 16; /**< Determines which ports will have physical 2258215976Sjmallett backpressure pause packets. 2259215976Sjmallett The value pplaced in the Class Enable Vector 2260215976Sjmallett field of the CBFC pause packet will be 2261215976Sjmallett PHYS_EN | LOGL_EN */ 2262215976Sjmallett uint64_t logl_en : 16; /**< Determines which ports will have logical 2263215976Sjmallett backpressure pause packets. 2264215976Sjmallett The value pplaced in the Class Enable Vector 2265215976Sjmallett field of the CBFC pause packet will be 2266215976Sjmallett PHYS_EN | LOGL_EN */ 2267215976Sjmallett uint64_t phys_bp : 16; /**< When RX_EN is set and the HW is backpressuring any 2268215976Sjmallett ports (from either CBFC pause packets or the 2269215976Sjmallett GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports 2270215976Sjmallett indiciated by PHYS_BP are backpressured, simulate 2271215976Sjmallett physical backpressure by defering all packets on 2272215976Sjmallett the transmitter. */ 2273215976Sjmallett uint64_t reserved_4_15 : 12; 2274215976Sjmallett uint64_t bck_en : 1; /**< Forward CBFC Pause information to BP block */ 2275215976Sjmallett uint64_t drp_en : 1; /**< Drop Control CBFC Pause Frames */ 2276215976Sjmallett uint64_t tx_en : 1; /**< When set, allow for CBFC Pause Packets 2277215976Sjmallett Must be clear in HiGig2 mode i.e. when 2278215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 2279215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 2280215976Sjmallett uint64_t rx_en : 1; /**< When set, allow for CBFC Pause Packets 2281215976Sjmallett Must be clear in HiGig2 mode i.e. when 2282215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 2283215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 2284215976Sjmallett#else 2285215976Sjmallett uint64_t rx_en : 1; 2286215976Sjmallett uint64_t tx_en : 1; 2287215976Sjmallett uint64_t drp_en : 1; 2288215976Sjmallett uint64_t bck_en : 1; 2289215976Sjmallett uint64_t reserved_4_15 : 12; 2290215976Sjmallett uint64_t phys_bp : 16; 2291215976Sjmallett uint64_t logl_en : 16; 2292215976Sjmallett uint64_t phys_en : 16; 2293215976Sjmallett#endif 2294215976Sjmallett } s; 2295215976Sjmallett struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx; 2296215976Sjmallett struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx; 2297215976Sjmallett struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xx; 2298215976Sjmallett struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xxp1; 2299215976Sjmallett}; 2300215976Sjmalletttypedef union cvmx_gmxx_prtx_cbfc_ctl cvmx_gmxx_prtx_cbfc_ctl_t; 2301215976Sjmallett 2302215976Sjmallett/** 2303215976Sjmallett * cvmx_gmx#_prt#_cfg 2304215976Sjmallett * 2305215976Sjmallett * GMX_PRT_CFG = Port description 2306215976Sjmallett * 2307215976Sjmallett */ 2308215976Sjmallettunion cvmx_gmxx_prtx_cfg 2309215976Sjmallett{ 2310215976Sjmallett uint64_t u64; 2311215976Sjmallett struct cvmx_gmxx_prtx_cfg_s 2312215976Sjmallett { 2313215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2314215976Sjmallett uint64_t reserved_14_63 : 50; 2315215976Sjmallett uint64_t tx_idle : 1; /**< TX Machine is idle */ 2316215976Sjmallett uint64_t rx_idle : 1; /**< RX Machine is idle */ 2317215976Sjmallett uint64_t reserved_9_11 : 3; 2318215976Sjmallett uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED] 2319215976Sjmallett 10 = 10Mbs operation 2320215976Sjmallett 00 = 100Mbs operation 2321215976Sjmallett 01 = 1000Mbs operation 2322215976Sjmallett 11 = Reserved 2323215976Sjmallett (SGMII/1000Base-X only) */ 2324215976Sjmallett uint64_t reserved_4_7 : 4; 2325215976Sjmallett uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 2326215976Sjmallett 0 = 512 bitimes (10/100Mbs operation) 2327215976Sjmallett 1 = 4096 bitimes (1000Mbs operation) 2328215976Sjmallett (SGMII/1000Base-X only) */ 2329215976Sjmallett uint64_t duplex : 1; /**< Duplex 2330215976Sjmallett 0 = Half Duplex (collisions/extentions/bursts) 2331215976Sjmallett 1 = Full Duplex 2332215976Sjmallett (SGMII/1000Base-X only) */ 2333215976Sjmallett uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED] 2334215976Sjmallett 10 = 10Mbs operation 2335215976Sjmallett 00 = 100Mbs operation 2336215976Sjmallett 01 = 1000Mbs operation 2337215976Sjmallett 11 = Reserved 2338215976Sjmallett (SGMII/1000Base-X only) */ 2339215976Sjmallett uint64_t en : 1; /**< Link Enable 2340215976Sjmallett When EN is clear, packets will not be received 2341215976Sjmallett or transmitted (including PAUSE and JAM packets). 2342215976Sjmallett If EN is cleared while a packet is currently 2343215976Sjmallett being received or transmitted, the packet will 2344215976Sjmallett be allowed to complete before the bus is idled. 2345215976Sjmallett On the RX side, subsequent packets in a burst 2346215976Sjmallett will be ignored. */ 2347215976Sjmallett#else 2348215976Sjmallett uint64_t en : 1; 2349215976Sjmallett uint64_t speed : 1; 2350215976Sjmallett uint64_t duplex : 1; 2351215976Sjmallett uint64_t slottime : 1; 2352215976Sjmallett uint64_t reserved_4_7 : 4; 2353215976Sjmallett uint64_t speed_msb : 1; 2354215976Sjmallett uint64_t reserved_9_11 : 3; 2355215976Sjmallett uint64_t rx_idle : 1; 2356215976Sjmallett uint64_t tx_idle : 1; 2357215976Sjmallett uint64_t reserved_14_63 : 50; 2358215976Sjmallett#endif 2359215976Sjmallett } s; 2360215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx 2361215976Sjmallett { 2362215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2363215976Sjmallett uint64_t reserved_4_63 : 60; 2364215976Sjmallett uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 2365215976Sjmallett 0 = 512 bitimes (10/100Mbs operation) 2366215976Sjmallett 1 = 4096 bitimes (1000Mbs operation) */ 2367215976Sjmallett uint64_t duplex : 1; /**< Duplex 2368215976Sjmallett 0 = Half Duplex (collisions/extentions/bursts) 2369215976Sjmallett 1 = Full Duplex */ 2370215976Sjmallett uint64_t speed : 1; /**< Link Speed 2371215976Sjmallett 0 = 10/100Mbs operation 2372215976Sjmallett (in RGMII mode, GMX_TX_CLK[CLK_CNT] > 1) 2373215976Sjmallett (in MII mode, GMX_TX_CLK[CLK_CNT] == 1) 2374215976Sjmallett 1 = 1000Mbs operation */ 2375215976Sjmallett uint64_t en : 1; /**< Link Enable 2376215976Sjmallett When EN is clear, packets will not be received 2377215976Sjmallett or transmitted (including PAUSE and JAM packets). 2378215976Sjmallett If EN is cleared while a packet is currently 2379215976Sjmallett being received or transmitted, the packet will 2380215976Sjmallett be allowed to complete before the bus is idled. 2381215976Sjmallett On the RX side, subsequent packets in a burst 2382215976Sjmallett will be ignored. */ 2383215976Sjmallett#else 2384215976Sjmallett uint64_t en : 1; 2385215976Sjmallett uint64_t speed : 1; 2386215976Sjmallett uint64_t duplex : 1; 2387215976Sjmallett uint64_t slottime : 1; 2388215976Sjmallett uint64_t reserved_4_63 : 60; 2389215976Sjmallett#endif 2390215976Sjmallett } cn30xx; 2391215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx; 2392215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx; 2393215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2; 2394215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx; 2395215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn52xx; 2396215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn52xxp1; 2397215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn56xx; 2398215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn56xxp1; 2399215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx; 2400215976Sjmallett struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1; 2401215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn63xx; 2402215976Sjmallett struct cvmx_gmxx_prtx_cfg_s cn63xxp1; 2403215976Sjmallett}; 2404215976Sjmalletttypedef union cvmx_gmxx_prtx_cfg cvmx_gmxx_prtx_cfg_t; 2405215976Sjmallett 2406215976Sjmallett/** 2407215976Sjmallett * cvmx_gmx#_rx#_adr_cam0 2408215976Sjmallett * 2409215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2410215976Sjmallett * 2411215976Sjmallett */ 2412215976Sjmallettunion cvmx_gmxx_rxx_adr_cam0 2413215976Sjmallett{ 2414215976Sjmallett uint64_t u64; 2415215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s 2416215976Sjmallett { 2417215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2418215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2419215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2420215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2421215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2422215976Sjmallett The CAM matches against unicst or multicst DMAC 2423215976Sjmallett addresses. 2424215976Sjmallett In XAUI mode, all ports will reflect the data 2425215976Sjmallett written to port0. */ 2426215976Sjmallett#else 2427215976Sjmallett uint64_t adr : 64; 2428215976Sjmallett#endif 2429215976Sjmallett } s; 2430215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn30xx; 2431215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn31xx; 2432215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn38xx; 2433215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2; 2434215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn50xx; 2435215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn52xx; 2436215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1; 2437215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn56xx; 2438215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1; 2439215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn58xx; 2440215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1; 2441215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn63xx; 2442215976Sjmallett struct cvmx_gmxx_rxx_adr_cam0_s cn63xxp1; 2443215976Sjmallett}; 2444215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam0 cvmx_gmxx_rxx_adr_cam0_t; 2445215976Sjmallett 2446215976Sjmallett/** 2447215976Sjmallett * cvmx_gmx#_rx#_adr_cam1 2448215976Sjmallett * 2449215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2450215976Sjmallett * 2451215976Sjmallett */ 2452215976Sjmallettunion cvmx_gmxx_rxx_adr_cam1 2453215976Sjmallett{ 2454215976Sjmallett uint64_t u64; 2455215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s 2456215976Sjmallett { 2457215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2458215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2459215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2460215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2461215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2462215976Sjmallett The CAM matches against unicst or multicst DMAC 2463215976Sjmallett addresses. 2464215976Sjmallett In XAUI mode, all ports will reflect the data 2465215976Sjmallett written to port0. */ 2466215976Sjmallett#else 2467215976Sjmallett uint64_t adr : 64; 2468215976Sjmallett#endif 2469215976Sjmallett } s; 2470215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn30xx; 2471215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn31xx; 2472215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn38xx; 2473215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2; 2474215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn50xx; 2475215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn52xx; 2476215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1; 2477215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn56xx; 2478215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1; 2479215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn58xx; 2480215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1; 2481215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn63xx; 2482215976Sjmallett struct cvmx_gmxx_rxx_adr_cam1_s cn63xxp1; 2483215976Sjmallett}; 2484215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam1 cvmx_gmxx_rxx_adr_cam1_t; 2485215976Sjmallett 2486215976Sjmallett/** 2487215976Sjmallett * cvmx_gmx#_rx#_adr_cam2 2488215976Sjmallett * 2489215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2490215976Sjmallett * 2491215976Sjmallett */ 2492215976Sjmallettunion cvmx_gmxx_rxx_adr_cam2 2493215976Sjmallett{ 2494215976Sjmallett uint64_t u64; 2495215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s 2496215976Sjmallett { 2497215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2498215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2499215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2500215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2501215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2502215976Sjmallett The CAM matches against unicst or multicst DMAC 2503215976Sjmallett addresses. 2504215976Sjmallett In XAUI mode, all ports will reflect the data 2505215976Sjmallett written to port0. */ 2506215976Sjmallett#else 2507215976Sjmallett uint64_t adr : 64; 2508215976Sjmallett#endif 2509215976Sjmallett } s; 2510215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn30xx; 2511215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn31xx; 2512215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn38xx; 2513215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2; 2514215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn50xx; 2515215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn52xx; 2516215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1; 2517215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn56xx; 2518215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1; 2519215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn58xx; 2520215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1; 2521215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn63xx; 2522215976Sjmallett struct cvmx_gmxx_rxx_adr_cam2_s cn63xxp1; 2523215976Sjmallett}; 2524215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam2 cvmx_gmxx_rxx_adr_cam2_t; 2525215976Sjmallett 2526215976Sjmallett/** 2527215976Sjmallett * cvmx_gmx#_rx#_adr_cam3 2528215976Sjmallett * 2529215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2530215976Sjmallett * 2531215976Sjmallett */ 2532215976Sjmallettunion cvmx_gmxx_rxx_adr_cam3 2533215976Sjmallett{ 2534215976Sjmallett uint64_t u64; 2535215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s 2536215976Sjmallett { 2537215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2538215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2539215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2540215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2541215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2542215976Sjmallett The CAM matches against unicst or multicst DMAC 2543215976Sjmallett addresses. 2544215976Sjmallett In XAUI mode, all ports will reflect the data 2545215976Sjmallett written to port0. */ 2546215976Sjmallett#else 2547215976Sjmallett uint64_t adr : 64; 2548215976Sjmallett#endif 2549215976Sjmallett } s; 2550215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn30xx; 2551215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn31xx; 2552215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn38xx; 2553215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2; 2554215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn50xx; 2555215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn52xx; 2556215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1; 2557215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn56xx; 2558215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1; 2559215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn58xx; 2560215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1; 2561215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn63xx; 2562215976Sjmallett struct cvmx_gmxx_rxx_adr_cam3_s cn63xxp1; 2563215976Sjmallett}; 2564215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam3 cvmx_gmxx_rxx_adr_cam3_t; 2565215976Sjmallett 2566215976Sjmallett/** 2567215976Sjmallett * cvmx_gmx#_rx#_adr_cam4 2568215976Sjmallett * 2569215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2570215976Sjmallett * 2571215976Sjmallett */ 2572215976Sjmallettunion cvmx_gmxx_rxx_adr_cam4 2573215976Sjmallett{ 2574215976Sjmallett uint64_t u64; 2575215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s 2576215976Sjmallett { 2577215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2578215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2579215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2580215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2581215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2582215976Sjmallett The CAM matches against unicst or multicst DMAC 2583215976Sjmallett addresses. 2584215976Sjmallett In XAUI mode, all ports will reflect the data 2585215976Sjmallett written to port0. */ 2586215976Sjmallett#else 2587215976Sjmallett uint64_t adr : 64; 2588215976Sjmallett#endif 2589215976Sjmallett } s; 2590215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn30xx; 2591215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn31xx; 2592215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn38xx; 2593215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2; 2594215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn50xx; 2595215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn52xx; 2596215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1; 2597215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn56xx; 2598215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1; 2599215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn58xx; 2600215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1; 2601215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn63xx; 2602215976Sjmallett struct cvmx_gmxx_rxx_adr_cam4_s cn63xxp1; 2603215976Sjmallett}; 2604215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam4 cvmx_gmxx_rxx_adr_cam4_t; 2605215976Sjmallett 2606215976Sjmallett/** 2607215976Sjmallett * cvmx_gmx#_rx#_adr_cam5 2608215976Sjmallett * 2609215976Sjmallett * GMX_RX_ADR_CAM = Address Filtering Control 2610215976Sjmallett * 2611215976Sjmallett */ 2612215976Sjmallettunion cvmx_gmxx_rxx_adr_cam5 2613215976Sjmallett{ 2614215976Sjmallett uint64_t u64; 2615215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s 2616215976Sjmallett { 2617215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2618215976Sjmallett uint64_t adr : 64; /**< The DMAC address to match on 2619215976Sjmallett Each entry contributes 8bits to one of 8 matchers 2620215976Sjmallett Write transactions to GMX_RX_ADR_CAM will not 2621215976Sjmallett change the CSR when GMX_PRT_CFG[EN] is enabled 2622215976Sjmallett The CAM matches against unicst or multicst DMAC 2623215976Sjmallett addresses. 2624215976Sjmallett In XAUI mode, all ports will reflect the data 2625215976Sjmallett written to port0. */ 2626215976Sjmallett#else 2627215976Sjmallett uint64_t adr : 64; 2628215976Sjmallett#endif 2629215976Sjmallett } s; 2630215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn30xx; 2631215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn31xx; 2632215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn38xx; 2633215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2; 2634215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn50xx; 2635215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn52xx; 2636215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1; 2637215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn56xx; 2638215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1; 2639215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn58xx; 2640215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1; 2641215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn63xx; 2642215976Sjmallett struct cvmx_gmxx_rxx_adr_cam5_s cn63xxp1; 2643215976Sjmallett}; 2644215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam5 cvmx_gmxx_rxx_adr_cam5_t; 2645215976Sjmallett 2646215976Sjmallett/** 2647215976Sjmallett * cvmx_gmx#_rx#_adr_cam_en 2648215976Sjmallett * 2649215976Sjmallett * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable 2650215976Sjmallett * 2651215976Sjmallett */ 2652215976Sjmallettunion cvmx_gmxx_rxx_adr_cam_en 2653215976Sjmallett{ 2654215976Sjmallett uint64_t u64; 2655215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s 2656215976Sjmallett { 2657215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2658215976Sjmallett uint64_t reserved_8_63 : 56; 2659215976Sjmallett uint64_t en : 8; /**< CAM Entry Enables */ 2660215976Sjmallett#else 2661215976Sjmallett uint64_t en : 8; 2662215976Sjmallett uint64_t reserved_8_63 : 56; 2663215976Sjmallett#endif 2664215976Sjmallett } s; 2665215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx; 2666215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx; 2667215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx; 2668215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2; 2669215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx; 2670215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx; 2671215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1; 2672215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx; 2673215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1; 2674215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx; 2675215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1; 2676215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn63xx; 2677215976Sjmallett struct cvmx_gmxx_rxx_adr_cam_en_s cn63xxp1; 2678215976Sjmallett}; 2679215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_cam_en cvmx_gmxx_rxx_adr_cam_en_t; 2680215976Sjmallett 2681215976Sjmallett/** 2682215976Sjmallett * cvmx_gmx#_rx#_adr_ctl 2683215976Sjmallett * 2684215976Sjmallett * GMX_RX_ADR_CTL = Address Filtering Control 2685215976Sjmallett * 2686215976Sjmallett * 2687215976Sjmallett * Notes: 2688215976Sjmallett * * ALGORITHM 2689215976Sjmallett * Here is some pseudo code that represents the address filter behavior. 2690215976Sjmallett * 2691215976Sjmallett * @verbatim 2692215976Sjmallett * bool dmac_addr_filter(uint8 prt, uint48 dmac) [ 2693215976Sjmallett * ASSERT(prt >= 0 && prt <= 3); 2694215976Sjmallett * if (is_bcst(dmac)) // broadcast accept 2695215976Sjmallett * return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT); 2696215976Sjmallett * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject 2697215976Sjmallett * return REJECT; 2698215976Sjmallett * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept 2699215976Sjmallett * return ACCEPT; 2700215976Sjmallett * 2701215976Sjmallett * cam_hit = 0; 2702215976Sjmallett * 2703215976Sjmallett * for (i=0; i<8; i++) [ 2704215976Sjmallett * if (GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0) 2705215976Sjmallett * continue; 2706215976Sjmallett * uint48 unswizzled_mac_adr = 0x0; 2707215976Sjmallett * for (j=5; j>=0; j--) [ 2708215976Sjmallett * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>]; 2709215976Sjmallett * ] 2710215976Sjmallett * if (unswizzled_mac_adr == dmac) [ 2711215976Sjmallett * cam_hit = 1; 2712215976Sjmallett * break; 2713215976Sjmallett * ] 2714215976Sjmallett * ] 2715215976Sjmallett * 2716215976Sjmallett * if (cam_hit) 2717215976Sjmallett * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT); 2718215976Sjmallett * else 2719215976Sjmallett * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT); 2720215976Sjmallett * ] 2721215976Sjmallett * @endverbatim 2722215976Sjmallett */ 2723215976Sjmallettunion cvmx_gmxx_rxx_adr_ctl 2724215976Sjmallett{ 2725215976Sjmallett uint64_t u64; 2726215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s 2727215976Sjmallett { 2728215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2729215976Sjmallett uint64_t reserved_4_63 : 60; 2730215976Sjmallett uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter 2731215976Sjmallett 0 = reject the packet on DMAC address match 2732215976Sjmallett 1 = accept the packet on DMAC address match */ 2733215976Sjmallett uint64_t mcst : 2; /**< Multicast Mode 2734215976Sjmallett 0 = Use the Address Filter CAM 2735215976Sjmallett 1 = Force reject all multicast packets 2736215976Sjmallett 2 = Force accept all multicast packets 2737215976Sjmallett 3 = Reserved */ 2738215976Sjmallett uint64_t bcst : 1; /**< Accept All Broadcast Packets */ 2739215976Sjmallett#else 2740215976Sjmallett uint64_t bcst : 1; 2741215976Sjmallett uint64_t mcst : 2; 2742215976Sjmallett uint64_t cam_mode : 1; 2743215976Sjmallett uint64_t reserved_4_63 : 60; 2744215976Sjmallett#endif 2745215976Sjmallett } s; 2746215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn30xx; 2747215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn31xx; 2748215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn38xx; 2749215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2; 2750215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn50xx; 2751215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn52xx; 2752215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1; 2753215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn56xx; 2754215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1; 2755215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn58xx; 2756215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1; 2757215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn63xx; 2758215976Sjmallett struct cvmx_gmxx_rxx_adr_ctl_s cn63xxp1; 2759215976Sjmallett}; 2760215976Sjmalletttypedef union cvmx_gmxx_rxx_adr_ctl cvmx_gmxx_rxx_adr_ctl_t; 2761215976Sjmallett 2762215976Sjmallett/** 2763215976Sjmallett * cvmx_gmx#_rx#_decision 2764215976Sjmallett * 2765215976Sjmallett * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet 2766215976Sjmallett * 2767215976Sjmallett * 2768215976Sjmallett * Notes: 2769215976Sjmallett * As each byte in a packet is received by GMX, the L2 byte count is compared 2770215976Sjmallett * against the GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes 2771215976Sjmallett * from the beginning of the L2 header (DMAC). In normal operation, the L2 2772215976Sjmallett * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any 2773215976Sjmallett * optional UDD skip data (GMX_RX_UDD_SKP[LEN]). 2774215976Sjmallett * 2775215976Sjmallett * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the 2776215976Sjmallett * packet and would require UDD skip length to account for them. 2777215976Sjmallett * 2778215976Sjmallett * L2 Size 2779215976Sjmallett * Port Mode <GMX_RX_DECISION bytes (default=24) >=GMX_RX_DECISION bytes (default=24) 2780215976Sjmallett * 2781215976Sjmallett * Full Duplex accept packet apply filters 2782215976Sjmallett * no filtering is applied accept packet based on DMAC and PAUSE packet filters 2783215976Sjmallett * 2784215976Sjmallett * Half Duplex drop packet apply filters 2785215976Sjmallett * packet is unconditionally dropped accept packet based on DMAC 2786215976Sjmallett * 2787215976Sjmallett * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8) 2788215976Sjmallett */ 2789215976Sjmallettunion cvmx_gmxx_rxx_decision 2790215976Sjmallett{ 2791215976Sjmallett uint64_t u64; 2792215976Sjmallett struct cvmx_gmxx_rxx_decision_s 2793215976Sjmallett { 2794215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2795215976Sjmallett uint64_t reserved_5_63 : 59; 2796215976Sjmallett uint64_t cnt : 5; /**< The byte count to decide when to accept or filter 2797215976Sjmallett a packet. */ 2798215976Sjmallett#else 2799215976Sjmallett uint64_t cnt : 5; 2800215976Sjmallett uint64_t reserved_5_63 : 59; 2801215976Sjmallett#endif 2802215976Sjmallett } s; 2803215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn30xx; 2804215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn31xx; 2805215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn38xx; 2806215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn38xxp2; 2807215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn50xx; 2808215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn52xx; 2809215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn52xxp1; 2810215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn56xx; 2811215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn56xxp1; 2812215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn58xx; 2813215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn58xxp1; 2814215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn63xx; 2815215976Sjmallett struct cvmx_gmxx_rxx_decision_s cn63xxp1; 2816215976Sjmallett}; 2817215976Sjmalletttypedef union cvmx_gmxx_rxx_decision cvmx_gmxx_rxx_decision_t; 2818215976Sjmallett 2819215976Sjmallett/** 2820215976Sjmallett * cvmx_gmx#_rx#_frm_chk 2821215976Sjmallett * 2822215976Sjmallett * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame 2823215976Sjmallett * 2824215976Sjmallett * 2825215976Sjmallett * Notes: 2826215976Sjmallett * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW. 2827215976Sjmallett * 2828215976Sjmallett * In XAUI mode prt0 is used for checking. 2829215976Sjmallett */ 2830215976Sjmallettunion cvmx_gmxx_rxx_frm_chk 2831215976Sjmallett{ 2832215976Sjmallett uint64_t u64; 2833215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s 2834215976Sjmallett { 2835215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2836215976Sjmallett uint64_t reserved_10_63 : 54; 2837215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 2838215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 2839215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 2840215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 2841215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2842215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2843215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2844215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 2845215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 2846215976Sjmallett (SGMII/1000Base-X only) */ 2847215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */ 2848215976Sjmallett#else 2849215976Sjmallett uint64_t minerr : 1; 2850215976Sjmallett uint64_t carext : 1; 2851215976Sjmallett uint64_t maxerr : 1; 2852215976Sjmallett uint64_t jabber : 1; 2853215976Sjmallett uint64_t fcserr : 1; 2854215976Sjmallett uint64_t alnerr : 1; 2855215976Sjmallett uint64_t lenerr : 1; 2856215976Sjmallett uint64_t rcverr : 1; 2857215976Sjmallett uint64_t skperr : 1; 2858215976Sjmallett uint64_t niberr : 1; 2859215976Sjmallett uint64_t reserved_10_63 : 54; 2860215976Sjmallett#endif 2861215976Sjmallett } s; 2862215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn30xx; 2863215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn31xx; 2864215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn38xx; 2865215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2; 2866215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn50xx 2867215976Sjmallett { 2868215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2869215976Sjmallett uint64_t reserved_10_63 : 54; 2870215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 2871215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 2872215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 2873215976Sjmallett uint64_t reserved_6_6 : 1; 2874215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 2875215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2876215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2877215976Sjmallett uint64_t reserved_2_2 : 1; 2878215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 2879215976Sjmallett uint64_t reserved_0_0 : 1; 2880215976Sjmallett#else 2881215976Sjmallett uint64_t reserved_0_0 : 1; 2882215976Sjmallett uint64_t carext : 1; 2883215976Sjmallett uint64_t reserved_2_2 : 1; 2884215976Sjmallett uint64_t jabber : 1; 2885215976Sjmallett uint64_t fcserr : 1; 2886215976Sjmallett uint64_t alnerr : 1; 2887215976Sjmallett uint64_t reserved_6_6 : 1; 2888215976Sjmallett uint64_t rcverr : 1; 2889215976Sjmallett uint64_t skperr : 1; 2890215976Sjmallett uint64_t niberr : 1; 2891215976Sjmallett uint64_t reserved_10_63 : 54; 2892215976Sjmallett#endif 2893215976Sjmallett } cn50xx; 2894215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn52xx 2895215976Sjmallett { 2896215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2897215976Sjmallett uint64_t reserved_9_63 : 55; 2898215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 2899215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 2900215976Sjmallett uint64_t reserved_5_6 : 2; 2901215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2902215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2903215976Sjmallett uint64_t reserved_2_2 : 1; 2904215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 2905215976Sjmallett (SGMII/1000Base-X only) */ 2906215976Sjmallett uint64_t reserved_0_0 : 1; 2907215976Sjmallett#else 2908215976Sjmallett uint64_t reserved_0_0 : 1; 2909215976Sjmallett uint64_t carext : 1; 2910215976Sjmallett uint64_t reserved_2_2 : 1; 2911215976Sjmallett uint64_t jabber : 1; 2912215976Sjmallett uint64_t fcserr : 1; 2913215976Sjmallett uint64_t reserved_5_6 : 2; 2914215976Sjmallett uint64_t rcverr : 1; 2915215976Sjmallett uint64_t skperr : 1; 2916215976Sjmallett uint64_t reserved_9_63 : 55; 2917215976Sjmallett#endif 2918215976Sjmallett } cn52xx; 2919215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1; 2920215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx; 2921215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1; 2922215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn58xx; 2923215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1; 2924215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn63xx 2925215976Sjmallett { 2926215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2927215976Sjmallett uint64_t reserved_9_63 : 55; 2928215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 2929215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 2930215976Sjmallett uint64_t reserved_5_6 : 2; 2931215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 2932215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 2933215976Sjmallett uint64_t reserved_2_2 : 1; 2934215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 2935215976Sjmallett (SGMII/1000Base-X only) */ 2936215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */ 2937215976Sjmallett#else 2938215976Sjmallett uint64_t minerr : 1; 2939215976Sjmallett uint64_t carext : 1; 2940215976Sjmallett uint64_t reserved_2_2 : 1; 2941215976Sjmallett uint64_t jabber : 1; 2942215976Sjmallett uint64_t fcserr : 1; 2943215976Sjmallett uint64_t reserved_5_6 : 2; 2944215976Sjmallett uint64_t rcverr : 1; 2945215976Sjmallett uint64_t skperr : 1; 2946215976Sjmallett uint64_t reserved_9_63 : 55; 2947215976Sjmallett#endif 2948215976Sjmallett } cn63xx; 2949215976Sjmallett struct cvmx_gmxx_rxx_frm_chk_cn63xx cn63xxp1; 2950215976Sjmallett}; 2951215976Sjmalletttypedef union cvmx_gmxx_rxx_frm_chk cvmx_gmxx_rxx_frm_chk_t; 2952215976Sjmallett 2953215976Sjmallett/** 2954215976Sjmallett * cvmx_gmx#_rx#_frm_ctl 2955215976Sjmallett * 2956215976Sjmallett * GMX_RX_FRM_CTL = Frame Control 2957215976Sjmallett * 2958215976Sjmallett * 2959215976Sjmallett * Notes: 2960215976Sjmallett * * PRE_STRP 2961215976Sjmallett * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP 2962215976Sjmallett * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane 2963215976Sjmallett * core as part of the packet. 2964215976Sjmallett * 2965215976Sjmallett * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet 2966215976Sjmallett * size when checking against the MIN and MAX bounds. Furthermore, the bytes 2967215976Sjmallett * are skipped when locating the start of the L2 header for DMAC and Control 2968215976Sjmallett * frame recognition. 2969215976Sjmallett * 2970215976Sjmallett * * CTL_BCK/CTL_DRP 2971215976Sjmallett * These bits control how the HW handles incoming PAUSE packets. Here are 2972215976Sjmallett * the most common modes of operation: 2973215976Sjmallett * CTL_BCK=1,CTL_DRP=1 - HW does it all 2974215976Sjmallett * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames 2975215976Sjmallett * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored 2976215976Sjmallett * 2977215976Sjmallett * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode. 2978215976Sjmallett * Since PAUSE packets only apply to fulldup operation, any PAUSE packet 2979215976Sjmallett * would constitute an exception which should be handled by the processing 2980215976Sjmallett * cores. PAUSE packets should not be forwarded. 2981215976Sjmallett */ 2982215976Sjmallettunion cvmx_gmxx_rxx_frm_ctl 2983215976Sjmallett{ 2984215976Sjmallett uint64_t u64; 2985215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_s 2986215976Sjmallett { 2987215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 2988215976Sjmallett uint64_t reserved_13_63 : 51; 2989215976Sjmallett uint64_t ptp_mode : 1; /**< Timestamp mode 2990215976Sjmallett When PTP_MODE is set, a 64-bit timestamp will be 2991215976Sjmallett prepended to every incoming packet. The timestamp 2992215976Sjmallett bytes are added to the packet in such a way as to 2993215976Sjmallett not modify the packet's receive byte count. This 2994215976Sjmallett implies that the GMX_RX_JABBER, MINERR, 2995215976Sjmallett GMX_RX_DECISION, GMX_RX_UDD_SKP, and the 2996215976Sjmallett GMX_RX_STATS_* do not require any adjustment as 2997215976Sjmallett they operate on the received packet size. 2998215976Sjmallett When the packet reaches PKI, its size will 2999215976Sjmallett reflect the additional bytes and is subject to 3000215976Sjmallett the restrictions below. 3001215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. 3002215976Sjmallett If PTP_MODE=1, 3003215976Sjmallett PIP_PRT_CFGx[SKIP] should be increased by 8. 3004215976Sjmallett PIP_PRT_CFGx[HIGIG_EN] should be 0. 3005215976Sjmallett PIP_FRM_CHKx[MAXLEN] should be increased by 8. 3006215976Sjmallett PIP_FRM_CHKx[MINLEN] should be increased by 8. 3007215976Sjmallett PIP_TAG_INCx[EN] should be adjusted. */ 3008215976Sjmallett uint64_t reserved_11_11 : 1; 3009215976Sjmallett uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks 3010215976Sjmallett due to PARITAL packets */ 3011215976Sjmallett uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 3012215976Sjmallett regardless of the number of previous PREAMBLE 3013215976Sjmallett nibbles. In this mode, PRE_STRP should be set to 3014215976Sjmallett account for the variable nature of the PREAMBLE. 3015215976Sjmallett PRE_CHK must be set to enable this and all 3016215976Sjmallett PREAMBLE features. 3017215976Sjmallett (SGMII at 10/100Mbs only) */ 3018215976Sjmallett uint64_t pad_len : 1; /**< When set, disables the length check for non-min 3019215976Sjmallett sized pkts with padding in the client data 3020215976Sjmallett (PASS3 Only) */ 3021215976Sjmallett uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 3022215976Sjmallett uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 3023215976Sjmallett GMX will begin the frame at the first SFD. 3024215976Sjmallett PRE_CHK must be set to enable this and all 3025215976Sjmallett PREAMBLE features. 3026215976Sjmallett (SGMII/1000Base-X only) */ 3027215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3028215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3029215976Sjmallett Multicast address */ 3030215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3031215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3032215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3033215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3034215976Sjmallett 1=PREAMBLE+SFD is dropped 3035215976Sjmallett PRE_CHK must be set to enable this and all 3036215976Sjmallett PREAMBLE features. 3037215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */ 3038215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3 3039215976Sjmallett PREAMBLE to begin every frame. GMX checks that a 3040215976Sjmallett valid PREAMBLE is received (based on PRE_FREE). 3041215976Sjmallett When a problem does occur within the PREAMBLE 3042215976Sjmallett seqeunce, the frame is marked as bad and not sent 3043215976Sjmallett into the core. The GMX_GMX_RX_INT_REG[PCTERR] 3044215976Sjmallett interrupt is also raised. 3045215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK 3046215976Sjmallett must be zero. 3047215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */ 3048215976Sjmallett#else 3049215976Sjmallett uint64_t pre_chk : 1; 3050215976Sjmallett uint64_t pre_strp : 1; 3051215976Sjmallett uint64_t ctl_drp : 1; 3052215976Sjmallett uint64_t ctl_bck : 1; 3053215976Sjmallett uint64_t ctl_mcst : 1; 3054215976Sjmallett uint64_t ctl_smac : 1; 3055215976Sjmallett uint64_t pre_free : 1; 3056215976Sjmallett uint64_t vlan_len : 1; 3057215976Sjmallett uint64_t pad_len : 1; 3058215976Sjmallett uint64_t pre_align : 1; 3059215976Sjmallett uint64_t null_dis : 1; 3060215976Sjmallett uint64_t reserved_11_11 : 1; 3061215976Sjmallett uint64_t ptp_mode : 1; 3062215976Sjmallett uint64_t reserved_13_63 : 51; 3063215976Sjmallett#endif 3064215976Sjmallett } s; 3065215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn30xx 3066215976Sjmallett { 3067215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3068215976Sjmallett uint64_t reserved_9_63 : 55; 3069215976Sjmallett uint64_t pad_len : 1; /**< When set, disables the length check for non-min 3070215976Sjmallett sized pkts with padding in the client data */ 3071215976Sjmallett uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 3072215976Sjmallett uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 3073215976Sjmallett 0-7 cycles of PREAMBLE followed by SFD (pass 1.0) 3074215976Sjmallett 0-254 cycles of PREAMBLE followed by SFD (else) */ 3075215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3076215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3077215976Sjmallett Multicast address */ 3078215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3079215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3080215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3081215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3082215976Sjmallett 1=PREAMBLE+SFD is dropped */ 3083215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 3084215976Sjmallett to begin every frame. GMX checks that the 3085215976Sjmallett PREAMBLE is sent correctly */ 3086215976Sjmallett#else 3087215976Sjmallett uint64_t pre_chk : 1; 3088215976Sjmallett uint64_t pre_strp : 1; 3089215976Sjmallett uint64_t ctl_drp : 1; 3090215976Sjmallett uint64_t ctl_bck : 1; 3091215976Sjmallett uint64_t ctl_mcst : 1; 3092215976Sjmallett uint64_t ctl_smac : 1; 3093215976Sjmallett uint64_t pre_free : 1; 3094215976Sjmallett uint64_t vlan_len : 1; 3095215976Sjmallett uint64_t pad_len : 1; 3096215976Sjmallett uint64_t reserved_9_63 : 55; 3097215976Sjmallett#endif 3098215976Sjmallett } cn30xx; 3099215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn31xx 3100215976Sjmallett { 3101215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3102215976Sjmallett uint64_t reserved_8_63 : 56; 3103215976Sjmallett uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 3104215976Sjmallett uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 3105215976Sjmallett 0 - 7 cycles of PREAMBLE followed by SFD (pass1.0) 3106215976Sjmallett 0 - 254 cycles of PREAMBLE followed by SFD (else) */ 3107215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3108215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3109215976Sjmallett Multicast address */ 3110215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3111215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3112215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3113215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3114215976Sjmallett 1=PREAMBLE+SFD is dropped */ 3115215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 3116215976Sjmallett to begin every frame. GMX checks that the 3117215976Sjmallett PREAMBLE is sent correctly */ 3118215976Sjmallett#else 3119215976Sjmallett uint64_t pre_chk : 1; 3120215976Sjmallett uint64_t pre_strp : 1; 3121215976Sjmallett uint64_t ctl_drp : 1; 3122215976Sjmallett uint64_t ctl_bck : 1; 3123215976Sjmallett uint64_t ctl_mcst : 1; 3124215976Sjmallett uint64_t ctl_smac : 1; 3125215976Sjmallett uint64_t pre_free : 1; 3126215976Sjmallett uint64_t vlan_len : 1; 3127215976Sjmallett uint64_t reserved_8_63 : 56; 3128215976Sjmallett#endif 3129215976Sjmallett } cn31xx; 3130215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx; 3131215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2; 3132215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn50xx 3133215976Sjmallett { 3134215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3135215976Sjmallett uint64_t reserved_11_63 : 53; 3136215976Sjmallett uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks 3137215976Sjmallett due to PARITAL packets */ 3138215976Sjmallett uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 3139215976Sjmallett regardless of the number of previous PREAMBLE 3140215976Sjmallett nibbles. In this mode, PREAMBLE can be consumed 3141215976Sjmallett by the HW so when PRE_ALIGN is set, PRE_FREE, 3142215976Sjmallett PRE_STRP must be set for correct operation. 3143215976Sjmallett PRE_CHK must be set to enable this and all 3144215976Sjmallett PREAMBLE features. */ 3145215976Sjmallett uint64_t reserved_7_8 : 2; 3146215976Sjmallett uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 3147215976Sjmallett 0-254 cycles of PREAMBLE followed by SFD */ 3148215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3149215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3150215976Sjmallett Multicast address */ 3151215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3152215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3153215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3154215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3155215976Sjmallett 1=PREAMBLE+SFD is dropped */ 3156215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 3157215976Sjmallett to begin every frame. GMX checks that the 3158215976Sjmallett PREAMBLE is sent correctly */ 3159215976Sjmallett#else 3160215976Sjmallett uint64_t pre_chk : 1; 3161215976Sjmallett uint64_t pre_strp : 1; 3162215976Sjmallett uint64_t ctl_drp : 1; 3163215976Sjmallett uint64_t ctl_bck : 1; 3164215976Sjmallett uint64_t ctl_mcst : 1; 3165215976Sjmallett uint64_t ctl_smac : 1; 3166215976Sjmallett uint64_t pre_free : 1; 3167215976Sjmallett uint64_t reserved_7_8 : 2; 3168215976Sjmallett uint64_t pre_align : 1; 3169215976Sjmallett uint64_t null_dis : 1; 3170215976Sjmallett uint64_t reserved_11_63 : 53; 3171215976Sjmallett#endif 3172215976Sjmallett } cn50xx; 3173215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx; 3174215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1; 3175215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx; 3176215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 3177215976Sjmallett { 3178215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3179215976Sjmallett uint64_t reserved_10_63 : 54; 3180215976Sjmallett uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 3181215976Sjmallett regardless of the number of previous PREAMBLE 3182215976Sjmallett nibbles. In this mode, PRE_STRP should be set to 3183215976Sjmallett account for the variable nature of the PREAMBLE. 3184215976Sjmallett PRE_CHK must be set to enable this and all 3185215976Sjmallett PREAMBLE features. 3186215976Sjmallett (SGMII at 10/100Mbs only) */ 3187215976Sjmallett uint64_t reserved_7_8 : 2; 3188215976Sjmallett uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 3189215976Sjmallett 0 - 254 cycles of PREAMBLE followed by SFD 3190215976Sjmallett PRE_CHK must be set to enable this and all 3191215976Sjmallett PREAMBLE features. 3192215976Sjmallett (SGMII/1000Base-X only) */ 3193215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3194215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3195215976Sjmallett Multicast address */ 3196215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3197215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3198215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3199215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3200215976Sjmallett 1=PREAMBLE+SFD is dropped 3201215976Sjmallett PRE_CHK must be set to enable this and all 3202215976Sjmallett PREAMBLE features. */ 3203215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 3204215976Sjmallett to begin every frame. GMX checks that the 3205215976Sjmallett PREAMBLE is sent correctly. 3206215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK 3207215976Sjmallett must be zero. */ 3208215976Sjmallett#else 3209215976Sjmallett uint64_t pre_chk : 1; 3210215976Sjmallett uint64_t pre_strp : 1; 3211215976Sjmallett uint64_t ctl_drp : 1; 3212215976Sjmallett uint64_t ctl_bck : 1; 3213215976Sjmallett uint64_t ctl_mcst : 1; 3214215976Sjmallett uint64_t ctl_smac : 1; 3215215976Sjmallett uint64_t pre_free : 1; 3216215976Sjmallett uint64_t reserved_7_8 : 2; 3217215976Sjmallett uint64_t pre_align : 1; 3218215976Sjmallett uint64_t reserved_10_63 : 54; 3219215976Sjmallett#endif 3220215976Sjmallett } cn56xxp1; 3221215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn58xx 3222215976Sjmallett { 3223215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3224215976Sjmallett uint64_t reserved_11_63 : 53; 3225215976Sjmallett uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks 3226215976Sjmallett due to PARITAL packets 3227215976Sjmallett In spi4 mode, all ports use prt0 for checking. */ 3228215976Sjmallett uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 3229215976Sjmallett regardless of the number of previous PREAMBLE 3230215976Sjmallett nibbles. In this mode, PREAMBLE can be consumed 3231215976Sjmallett by the HW so when PRE_ALIGN is set, PRE_FREE, 3232215976Sjmallett PRE_STRP must be set for correct operation. 3233215976Sjmallett PRE_CHK must be set to enable this and all 3234215976Sjmallett PREAMBLE features. */ 3235215976Sjmallett uint64_t pad_len : 1; /**< When set, disables the length check for non-min 3236215976Sjmallett sized pkts with padding in the client data 3237215976Sjmallett (PASS3 Only) */ 3238215976Sjmallett uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ 3239215976Sjmallett uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 3240215976Sjmallett 0 - 254 cycles of PREAMBLE followed by SFD */ 3241215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3242215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3243215976Sjmallett Multicast address */ 3244215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3245215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3246215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3247215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3248215976Sjmallett 1=PREAMBLE+SFD is dropped */ 3249215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD 3250215976Sjmallett to begin every frame. GMX checks that the 3251215976Sjmallett PREAMBLE is sent correctly */ 3252215976Sjmallett#else 3253215976Sjmallett uint64_t pre_chk : 1; 3254215976Sjmallett uint64_t pre_strp : 1; 3255215976Sjmallett uint64_t ctl_drp : 1; 3256215976Sjmallett uint64_t ctl_bck : 1; 3257215976Sjmallett uint64_t ctl_mcst : 1; 3258215976Sjmallett uint64_t ctl_smac : 1; 3259215976Sjmallett uint64_t pre_free : 1; 3260215976Sjmallett uint64_t vlan_len : 1; 3261215976Sjmallett uint64_t pad_len : 1; 3262215976Sjmallett uint64_t pre_align : 1; 3263215976Sjmallett uint64_t null_dis : 1; 3264215976Sjmallett uint64_t reserved_11_63 : 53; 3265215976Sjmallett#endif 3266215976Sjmallett } cn58xx; 3267215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1; 3268215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn63xx 3269215976Sjmallett { 3270215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3271215976Sjmallett uint64_t reserved_13_63 : 51; 3272215976Sjmallett uint64_t ptp_mode : 1; /**< Timestamp mode 3273215976Sjmallett When PTP_MODE is set, a 64-bit timestamp will be 3274215976Sjmallett prepended to every incoming packet. The timestamp 3275215976Sjmallett bytes are added to the packet in such a way as to 3276215976Sjmallett not modify the packet's receive byte count. This 3277215976Sjmallett implies that the GMX_RX_JABBER, MINERR, 3278215976Sjmallett GMX_RX_DECISION, GMX_RX_UDD_SKP, and the 3279215976Sjmallett GMX_RX_STATS_* do not require any adjustment as 3280215976Sjmallett they operate on the received packet size. 3281215976Sjmallett When the packet reaches PKI, its size will 3282215976Sjmallett reflect the additional bytes and is subject to 3283215976Sjmallett the restrictions below. 3284215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. 3285215976Sjmallett If PTP_MODE=1, 3286215976Sjmallett PIP_PRT_CFGx[SKIP] should be increased by 8. 3287215976Sjmallett PIP_PRT_CFGx[HIGIG_EN] should be 0. 3288215976Sjmallett PIP_FRM_CHKx[MAXLEN] should be increased by 8. 3289215976Sjmallett PIP_FRM_CHKx[MINLEN] should be increased by 8. 3290215976Sjmallett PIP_TAG_INCx[EN] should be adjusted. */ 3291215976Sjmallett uint64_t reserved_11_11 : 1; 3292215976Sjmallett uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks 3293215976Sjmallett due to PARITAL packets */ 3294215976Sjmallett uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte 3295215976Sjmallett regardless of the number of previous PREAMBLE 3296215976Sjmallett nibbles. In this mode, PRE_STRP should be set to 3297215976Sjmallett account for the variable nature of the PREAMBLE. 3298215976Sjmallett PRE_CHK must be set to enable this and all 3299215976Sjmallett PREAMBLE features. 3300215976Sjmallett (SGMII at 10/100Mbs only) */ 3301215976Sjmallett uint64_t reserved_7_8 : 2; 3302215976Sjmallett uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 3303215976Sjmallett GMX will begin the frame at the first SFD. 3304215976Sjmallett PRE_CHK must be set to enable this and all 3305215976Sjmallett PREAMBLE features. 3306215976Sjmallett (SGMII/1000Base-X only) */ 3307215976Sjmallett uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ 3308215976Sjmallett uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign 3309215976Sjmallett Multicast address */ 3310215976Sjmallett uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ 3311215976Sjmallett uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ 3312215976Sjmallett uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 3313215976Sjmallett 0=PREAMBLE+SFD is sent to core as part of frame 3314215976Sjmallett 1=PREAMBLE+SFD is dropped 3315215976Sjmallett PRE_CHK must be set to enable this and all 3316215976Sjmallett PREAMBLE features. 3317215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */ 3318215976Sjmallett uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3 3319215976Sjmallett PREAMBLE to begin every frame. GMX checks that a 3320215976Sjmallett valid PREAMBLE is received (based on PRE_FREE). 3321215976Sjmallett When a problem does occur within the PREAMBLE 3322215976Sjmallett seqeunce, the frame is marked as bad and not sent 3323215976Sjmallett into the core. The GMX_GMX_RX_INT_REG[PCTERR] 3324215976Sjmallett interrupt is also raised. 3325215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK 3326215976Sjmallett must be zero. 3327215976Sjmallett If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */ 3328215976Sjmallett#else 3329215976Sjmallett uint64_t pre_chk : 1; 3330215976Sjmallett uint64_t pre_strp : 1; 3331215976Sjmallett uint64_t ctl_drp : 1; 3332215976Sjmallett uint64_t ctl_bck : 1; 3333215976Sjmallett uint64_t ctl_mcst : 1; 3334215976Sjmallett uint64_t ctl_smac : 1; 3335215976Sjmallett uint64_t pre_free : 1; 3336215976Sjmallett uint64_t reserved_7_8 : 2; 3337215976Sjmallett uint64_t pre_align : 1; 3338215976Sjmallett uint64_t null_dis : 1; 3339215976Sjmallett uint64_t reserved_11_11 : 1; 3340215976Sjmallett uint64_t ptp_mode : 1; 3341215976Sjmallett uint64_t reserved_13_63 : 51; 3342215976Sjmallett#endif 3343215976Sjmallett } cn63xx; 3344215976Sjmallett struct cvmx_gmxx_rxx_frm_ctl_cn63xx cn63xxp1; 3345215976Sjmallett}; 3346215976Sjmalletttypedef union cvmx_gmxx_rxx_frm_ctl cvmx_gmxx_rxx_frm_ctl_t; 3347215976Sjmallett 3348215976Sjmallett/** 3349215976Sjmallett * cvmx_gmx#_rx#_frm_max 3350215976Sjmallett * 3351215976Sjmallett * GMX_RX_FRM_MAX = Frame Max length 3352215976Sjmallett * 3353215976Sjmallett * 3354215976Sjmallett * Notes: 3355215976Sjmallett * In spi4 mode, all spi4 ports use prt0 for checking. 3356215976Sjmallett * 3357215976Sjmallett * When changing the LEN field, be sure that LEN does not exceed 3358215976Sjmallett * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that 3359215976Sjmallett * are within the maximum length parameter to be rejected because they exceed 3360215976Sjmallett * the GMX_RX_JABBER[CNT] limit. 3361215976Sjmallett */ 3362215976Sjmallettunion cvmx_gmxx_rxx_frm_max 3363215976Sjmallett{ 3364215976Sjmallett uint64_t u64; 3365215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s 3366215976Sjmallett { 3367215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3368215976Sjmallett uint64_t reserved_16_63 : 48; 3369215976Sjmallett uint64_t len : 16; /**< Byte count for Max-sized frame check 3370215976Sjmallett GMX_RXn_FRM_CHK[MAXERR] enables the check for 3371215976Sjmallett port n. 3372215976Sjmallett If enabled, failing packets set the MAXERR 3373215976Sjmallett interrupt and work-queue entry WORD2[opcode] is 3374215976Sjmallett set to OVER_FCS (0x3, if packet has bad FCS) or 3375215976Sjmallett OVER_ERR (0x4, if packet has good FCS). 3376215976Sjmallett LEN =< GMX_RX_JABBER[CNT] */ 3377215976Sjmallett#else 3378215976Sjmallett uint64_t len : 16; 3379215976Sjmallett uint64_t reserved_16_63 : 48; 3380215976Sjmallett#endif 3381215976Sjmallett } s; 3382215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn30xx; 3383215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn31xx; 3384215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn38xx; 3385215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn38xxp2; 3386215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn58xx; 3387215976Sjmallett struct cvmx_gmxx_rxx_frm_max_s cn58xxp1; 3388215976Sjmallett}; 3389215976Sjmalletttypedef union cvmx_gmxx_rxx_frm_max cvmx_gmxx_rxx_frm_max_t; 3390215976Sjmallett 3391215976Sjmallett/** 3392215976Sjmallett * cvmx_gmx#_rx#_frm_min 3393215976Sjmallett * 3394215976Sjmallett * GMX_RX_FRM_MIN = Frame Min length 3395215976Sjmallett * 3396215976Sjmallett * 3397215976Sjmallett * Notes: 3398215976Sjmallett * In spi4 mode, all spi4 ports use prt0 for checking. 3399215976Sjmallett * 3400215976Sjmallett */ 3401215976Sjmallettunion cvmx_gmxx_rxx_frm_min 3402215976Sjmallett{ 3403215976Sjmallett uint64_t u64; 3404215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s 3405215976Sjmallett { 3406215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3407215976Sjmallett uint64_t reserved_16_63 : 48; 3408215976Sjmallett uint64_t len : 16; /**< Byte count for Min-sized frame check 3409215976Sjmallett GMX_RXn_FRM_CHK[MINERR] enables the check for 3410215976Sjmallett port n. 3411215976Sjmallett If enabled, failing packets set the MINERR 3412215976Sjmallett interrupt and work-queue entry WORD2[opcode] is 3413215976Sjmallett set to UNDER_FCS (0x6, if packet has bad FCS) or 3414215976Sjmallett UNDER_ERR (0x8, if packet has good FCS). */ 3415215976Sjmallett#else 3416215976Sjmallett uint64_t len : 16; 3417215976Sjmallett uint64_t reserved_16_63 : 48; 3418215976Sjmallett#endif 3419215976Sjmallett } s; 3420215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn30xx; 3421215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn31xx; 3422215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn38xx; 3423215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn38xxp2; 3424215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn58xx; 3425215976Sjmallett struct cvmx_gmxx_rxx_frm_min_s cn58xxp1; 3426215976Sjmallett}; 3427215976Sjmalletttypedef union cvmx_gmxx_rxx_frm_min cvmx_gmxx_rxx_frm_min_t; 3428215976Sjmallett 3429215976Sjmallett/** 3430215976Sjmallett * cvmx_gmx#_rx#_ifg 3431215976Sjmallett * 3432215976Sjmallett * GMX_RX_IFG = RX Min IFG 3433215976Sjmallett * 3434215976Sjmallett */ 3435215976Sjmallettunion cvmx_gmxx_rxx_ifg 3436215976Sjmallett{ 3437215976Sjmallett uint64_t u64; 3438215976Sjmallett struct cvmx_gmxx_rxx_ifg_s 3439215976Sjmallett { 3440215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3441215976Sjmallett uint64_t reserved_4_63 : 60; 3442215976Sjmallett uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to 3443215976Sjmallett determine IFGERR. Normally IFG is 96 bits. 3444215976Sjmallett Note in some operating modes, IFG cycles can be 3445215976Sjmallett inserted or removed in order to achieve clock rate 3446215976Sjmallett adaptation. For these reasons, the default value 3447215976Sjmallett is slightly conservative and does not check upto 3448215976Sjmallett the full 96 bits of IFG. 3449215976Sjmallett (SGMII/1000Base-X only) */ 3450215976Sjmallett#else 3451215976Sjmallett uint64_t ifg : 4; 3452215976Sjmallett uint64_t reserved_4_63 : 60; 3453215976Sjmallett#endif 3454215976Sjmallett } s; 3455215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn30xx; 3456215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn31xx; 3457215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn38xx; 3458215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn38xxp2; 3459215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn50xx; 3460215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn52xx; 3461215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn52xxp1; 3462215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn56xx; 3463215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn56xxp1; 3464215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn58xx; 3465215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn58xxp1; 3466215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn63xx; 3467215976Sjmallett struct cvmx_gmxx_rxx_ifg_s cn63xxp1; 3468215976Sjmallett}; 3469215976Sjmalletttypedef union cvmx_gmxx_rxx_ifg cvmx_gmxx_rxx_ifg_t; 3470215976Sjmallett 3471215976Sjmallett/** 3472215976Sjmallett * cvmx_gmx#_rx#_int_en 3473215976Sjmallett * 3474215976Sjmallett * GMX_RX_INT_EN = Interrupt Enable 3475215976Sjmallett * 3476215976Sjmallett * 3477215976Sjmallett * Notes: 3478215976Sjmallett * In XAUI mode prt0 is used for checking. 3479215976Sjmallett * 3480215976Sjmallett */ 3481215976Sjmallettunion cvmx_gmxx_rxx_int_en 3482215976Sjmallett{ 3483215976Sjmallett uint64_t u64; 3484215976Sjmallett struct cvmx_gmxx_rxx_int_en_s 3485215976Sjmallett { 3486215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3487215976Sjmallett uint64_t reserved_29_63 : 35; 3488215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */ 3489215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */ 3490215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 3491215976Sjmallett (XAUI Mode only) */ 3492215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 3493215976Sjmallett (XAUI Mode only) */ 3494215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 3495215976Sjmallett (XAUI Mode only) */ 3496215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 3497215976Sjmallett than /T/. The error propagation control 3498215976Sjmallett character /E/ will be included as part of the 3499215976Sjmallett frame and does not cause a frame termination. 3500215976Sjmallett (XAUI Mode only) */ 3501215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 3502215976Sjmallett (XAUI Mode only) */ 3503215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 3504215976Sjmallett (XAUI Mode only) */ 3505215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 3506215976Sjmallett (XAUI Mode only) */ 3507215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3508215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 3509215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 3510215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 3511215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 3512215976Sjmallett (SGMII/1000Base-X only) */ 3513215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 3514215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 3515215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 3516215976Sjmallett (SGMII/1000Base-X only) */ 3517215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 3518215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3519215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 3520215976Sjmallett (SGMII/1000Base-X only) */ 3521215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 3522215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3523215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 3524215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 3525215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 3526215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3527215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3528215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 3529215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 3530215976Sjmallett (SGMII/1000Base-X only) */ 3531215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */ 3532215976Sjmallett#else 3533215976Sjmallett uint64_t minerr : 1; 3534215976Sjmallett uint64_t carext : 1; 3535215976Sjmallett uint64_t maxerr : 1; 3536215976Sjmallett uint64_t jabber : 1; 3537215976Sjmallett uint64_t fcserr : 1; 3538215976Sjmallett uint64_t alnerr : 1; 3539215976Sjmallett uint64_t lenerr : 1; 3540215976Sjmallett uint64_t rcverr : 1; 3541215976Sjmallett uint64_t skperr : 1; 3542215976Sjmallett uint64_t niberr : 1; 3543215976Sjmallett uint64_t ovrerr : 1; 3544215976Sjmallett uint64_t pcterr : 1; 3545215976Sjmallett uint64_t rsverr : 1; 3546215976Sjmallett uint64_t falerr : 1; 3547215976Sjmallett uint64_t coldet : 1; 3548215976Sjmallett uint64_t ifgerr : 1; 3549215976Sjmallett uint64_t phy_link : 1; 3550215976Sjmallett uint64_t phy_spd : 1; 3551215976Sjmallett uint64_t phy_dupx : 1; 3552215976Sjmallett uint64_t pause_drp : 1; 3553215976Sjmallett uint64_t loc_fault : 1; 3554215976Sjmallett uint64_t rem_fault : 1; 3555215976Sjmallett uint64_t bad_seq : 1; 3556215976Sjmallett uint64_t bad_term : 1; 3557215976Sjmallett uint64_t unsop : 1; 3558215976Sjmallett uint64_t uneop : 1; 3559215976Sjmallett uint64_t undat : 1; 3560215976Sjmallett uint64_t hg2fld : 1; 3561215976Sjmallett uint64_t hg2cc : 1; 3562215976Sjmallett uint64_t reserved_29_63 : 35; 3563215976Sjmallett#endif 3564215976Sjmallett } s; 3565215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn30xx 3566215976Sjmallett { 3567215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3568215976Sjmallett uint64_t reserved_19_63 : 45; 3569215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 3570215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 3571215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 3572215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation */ 3573215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 3574215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 3575215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 3576215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3577215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ 3578215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 3579215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3580215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 3581215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 3582215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 3583215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3584215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3585215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 3586215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 3587215976Sjmallett uint64_t minerr : 1; /**< Frame was received with length < min_length */ 3588215976Sjmallett#else 3589215976Sjmallett uint64_t minerr : 1; 3590215976Sjmallett uint64_t carext : 1; 3591215976Sjmallett uint64_t maxerr : 1; 3592215976Sjmallett uint64_t jabber : 1; 3593215976Sjmallett uint64_t fcserr : 1; 3594215976Sjmallett uint64_t alnerr : 1; 3595215976Sjmallett uint64_t lenerr : 1; 3596215976Sjmallett uint64_t rcverr : 1; 3597215976Sjmallett uint64_t skperr : 1; 3598215976Sjmallett uint64_t niberr : 1; 3599215976Sjmallett uint64_t ovrerr : 1; 3600215976Sjmallett uint64_t pcterr : 1; 3601215976Sjmallett uint64_t rsverr : 1; 3602215976Sjmallett uint64_t falerr : 1; 3603215976Sjmallett uint64_t coldet : 1; 3604215976Sjmallett uint64_t ifgerr : 1; 3605215976Sjmallett uint64_t phy_link : 1; 3606215976Sjmallett uint64_t phy_spd : 1; 3607215976Sjmallett uint64_t phy_dupx : 1; 3608215976Sjmallett uint64_t reserved_19_63 : 45; 3609215976Sjmallett#endif 3610215976Sjmallett } cn30xx; 3611215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx; 3612215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx; 3613215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2; 3614215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn50xx 3615215976Sjmallett { 3616215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3617215976Sjmallett uint64_t reserved_20_63 : 44; 3618215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3619215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 3620215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 3621215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 3622215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation */ 3623215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 3624215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 3625215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 3626215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3627215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ 3628215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 3629215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3630215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 3631215976Sjmallett uint64_t reserved_6_6 : 1; 3632215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 3633215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3634215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3635215976Sjmallett uint64_t reserved_2_2 : 1; 3636215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 3637215976Sjmallett uint64_t reserved_0_0 : 1; 3638215976Sjmallett#else 3639215976Sjmallett uint64_t reserved_0_0 : 1; 3640215976Sjmallett uint64_t carext : 1; 3641215976Sjmallett uint64_t reserved_2_2 : 1; 3642215976Sjmallett uint64_t jabber : 1; 3643215976Sjmallett uint64_t fcserr : 1; 3644215976Sjmallett uint64_t alnerr : 1; 3645215976Sjmallett uint64_t reserved_6_6 : 1; 3646215976Sjmallett uint64_t rcverr : 1; 3647215976Sjmallett uint64_t skperr : 1; 3648215976Sjmallett uint64_t niberr : 1; 3649215976Sjmallett uint64_t ovrerr : 1; 3650215976Sjmallett uint64_t pcterr : 1; 3651215976Sjmallett uint64_t rsverr : 1; 3652215976Sjmallett uint64_t falerr : 1; 3653215976Sjmallett uint64_t coldet : 1; 3654215976Sjmallett uint64_t ifgerr : 1; 3655215976Sjmallett uint64_t phy_link : 1; 3656215976Sjmallett uint64_t phy_spd : 1; 3657215976Sjmallett uint64_t phy_dupx : 1; 3658215976Sjmallett uint64_t pause_drp : 1; 3659215976Sjmallett uint64_t reserved_20_63 : 44; 3660215976Sjmallett#endif 3661215976Sjmallett } cn50xx; 3662215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn52xx 3663215976Sjmallett { 3664215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3665215976Sjmallett uint64_t reserved_29_63 : 35; 3666215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */ 3667215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */ 3668215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 3669215976Sjmallett (XAUI Mode only) */ 3670215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 3671215976Sjmallett (XAUI Mode only) */ 3672215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 3673215976Sjmallett (XAUI Mode only) */ 3674215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 3675215976Sjmallett than /T/. The error propagation control 3676215976Sjmallett character /E/ will be included as part of the 3677215976Sjmallett frame and does not cause a frame termination. 3678215976Sjmallett (XAUI Mode only) */ 3679215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 3680215976Sjmallett (XAUI Mode only) */ 3681215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 3682215976Sjmallett (XAUI Mode only) */ 3683215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 3684215976Sjmallett (XAUI Mode only) */ 3685215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3686215976Sjmallett uint64_t reserved_16_18 : 3; 3687215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 3688215976Sjmallett (SGMII/1000Base-X only) */ 3689215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 3690215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 3691215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 3692215976Sjmallett (SGMII/1000Base-X only) */ 3693215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 3694215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3695215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 3696215976Sjmallett (SGMII/1000Base-X only) */ 3697215976Sjmallett uint64_t reserved_9_9 : 1; 3698215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3699215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 3700215976Sjmallett uint64_t reserved_5_6 : 2; 3701215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3702215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3703215976Sjmallett uint64_t reserved_2_2 : 1; 3704215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 3705215976Sjmallett (SGMII/1000Base-X only) */ 3706215976Sjmallett uint64_t reserved_0_0 : 1; 3707215976Sjmallett#else 3708215976Sjmallett uint64_t reserved_0_0 : 1; 3709215976Sjmallett uint64_t carext : 1; 3710215976Sjmallett uint64_t reserved_2_2 : 1; 3711215976Sjmallett uint64_t jabber : 1; 3712215976Sjmallett uint64_t fcserr : 1; 3713215976Sjmallett uint64_t reserved_5_6 : 2; 3714215976Sjmallett uint64_t rcverr : 1; 3715215976Sjmallett uint64_t skperr : 1; 3716215976Sjmallett uint64_t reserved_9_9 : 1; 3717215976Sjmallett uint64_t ovrerr : 1; 3718215976Sjmallett uint64_t pcterr : 1; 3719215976Sjmallett uint64_t rsverr : 1; 3720215976Sjmallett uint64_t falerr : 1; 3721215976Sjmallett uint64_t coldet : 1; 3722215976Sjmallett uint64_t ifgerr : 1; 3723215976Sjmallett uint64_t reserved_16_18 : 3; 3724215976Sjmallett uint64_t pause_drp : 1; 3725215976Sjmallett uint64_t loc_fault : 1; 3726215976Sjmallett uint64_t rem_fault : 1; 3727215976Sjmallett uint64_t bad_seq : 1; 3728215976Sjmallett uint64_t bad_term : 1; 3729215976Sjmallett uint64_t unsop : 1; 3730215976Sjmallett uint64_t uneop : 1; 3731215976Sjmallett uint64_t undat : 1; 3732215976Sjmallett uint64_t hg2fld : 1; 3733215976Sjmallett uint64_t hg2cc : 1; 3734215976Sjmallett uint64_t reserved_29_63 : 35; 3735215976Sjmallett#endif 3736215976Sjmallett } cn52xx; 3737215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1; 3738215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx; 3739215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn56xxp1 3740215976Sjmallett { 3741215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3742215976Sjmallett uint64_t reserved_27_63 : 37; 3743215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 3744215976Sjmallett (XAUI Mode only) */ 3745215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 3746215976Sjmallett (XAUI Mode only) */ 3747215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 3748215976Sjmallett (XAUI Mode only) */ 3749215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 3750215976Sjmallett than /T/. The error propagation control 3751215976Sjmallett character /E/ will be included as part of the 3752215976Sjmallett frame and does not cause a frame termination. 3753215976Sjmallett (XAUI Mode only) */ 3754215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 3755215976Sjmallett (XAUI Mode only) */ 3756215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 3757215976Sjmallett (XAUI Mode only) */ 3758215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 3759215976Sjmallett (XAUI Mode only) */ 3760215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3761215976Sjmallett uint64_t reserved_16_18 : 3; 3762215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 3763215976Sjmallett (SGMII/1000Base-X only) */ 3764215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 3765215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 3766215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 3767215976Sjmallett (SGMII/1000Base-X only) */ 3768215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 3769215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3770215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 3771215976Sjmallett (SGMII/1000Base-X only) */ 3772215976Sjmallett uint64_t reserved_9_9 : 1; 3773215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3774215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 3775215976Sjmallett uint64_t reserved_5_6 : 2; 3776215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3777215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3778215976Sjmallett uint64_t reserved_2_2 : 1; 3779215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 3780215976Sjmallett (SGMII/1000Base-X only) */ 3781215976Sjmallett uint64_t reserved_0_0 : 1; 3782215976Sjmallett#else 3783215976Sjmallett uint64_t reserved_0_0 : 1; 3784215976Sjmallett uint64_t carext : 1; 3785215976Sjmallett uint64_t reserved_2_2 : 1; 3786215976Sjmallett uint64_t jabber : 1; 3787215976Sjmallett uint64_t fcserr : 1; 3788215976Sjmallett uint64_t reserved_5_6 : 2; 3789215976Sjmallett uint64_t rcverr : 1; 3790215976Sjmallett uint64_t skperr : 1; 3791215976Sjmallett uint64_t reserved_9_9 : 1; 3792215976Sjmallett uint64_t ovrerr : 1; 3793215976Sjmallett uint64_t pcterr : 1; 3794215976Sjmallett uint64_t rsverr : 1; 3795215976Sjmallett uint64_t falerr : 1; 3796215976Sjmallett uint64_t coldet : 1; 3797215976Sjmallett uint64_t ifgerr : 1; 3798215976Sjmallett uint64_t reserved_16_18 : 3; 3799215976Sjmallett uint64_t pause_drp : 1; 3800215976Sjmallett uint64_t loc_fault : 1; 3801215976Sjmallett uint64_t rem_fault : 1; 3802215976Sjmallett uint64_t bad_seq : 1; 3803215976Sjmallett uint64_t bad_term : 1; 3804215976Sjmallett uint64_t unsop : 1; 3805215976Sjmallett uint64_t uneop : 1; 3806215976Sjmallett uint64_t undat : 1; 3807215976Sjmallett uint64_t reserved_27_63 : 37; 3808215976Sjmallett#endif 3809215976Sjmallett } cn56xxp1; 3810215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn58xx 3811215976Sjmallett { 3812215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3813215976Sjmallett uint64_t reserved_20_63 : 44; 3814215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3815215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 3816215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 3817215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 3818215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation */ 3819215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 3820215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 3821215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 3822215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3823215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ 3824215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 3825215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3826215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 3827215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 3828215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 3829215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3830215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3831215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 3832215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 3833215976Sjmallett uint64_t minerr : 1; /**< Frame was received with length < min_length */ 3834215976Sjmallett#else 3835215976Sjmallett uint64_t minerr : 1; 3836215976Sjmallett uint64_t carext : 1; 3837215976Sjmallett uint64_t maxerr : 1; 3838215976Sjmallett uint64_t jabber : 1; 3839215976Sjmallett uint64_t fcserr : 1; 3840215976Sjmallett uint64_t alnerr : 1; 3841215976Sjmallett uint64_t lenerr : 1; 3842215976Sjmallett uint64_t rcverr : 1; 3843215976Sjmallett uint64_t skperr : 1; 3844215976Sjmallett uint64_t niberr : 1; 3845215976Sjmallett uint64_t ovrerr : 1; 3846215976Sjmallett uint64_t pcterr : 1; 3847215976Sjmallett uint64_t rsverr : 1; 3848215976Sjmallett uint64_t falerr : 1; 3849215976Sjmallett uint64_t coldet : 1; 3850215976Sjmallett uint64_t ifgerr : 1; 3851215976Sjmallett uint64_t phy_link : 1; 3852215976Sjmallett uint64_t phy_spd : 1; 3853215976Sjmallett uint64_t phy_dupx : 1; 3854215976Sjmallett uint64_t pause_drp : 1; 3855215976Sjmallett uint64_t reserved_20_63 : 44; 3856215976Sjmallett#endif 3857215976Sjmallett } cn58xx; 3858215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1; 3859215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn63xx 3860215976Sjmallett { 3861215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 3862215976Sjmallett uint64_t reserved_29_63 : 35; 3863215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */ 3864215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */ 3865215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 3866215976Sjmallett (XAUI Mode only) */ 3867215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 3868215976Sjmallett (XAUI Mode only) */ 3869215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 3870215976Sjmallett (XAUI Mode only) */ 3871215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 3872215976Sjmallett than /T/. The error propagation control 3873215976Sjmallett character /E/ will be included as part of the 3874215976Sjmallett frame and does not cause a frame termination. 3875215976Sjmallett (XAUI Mode only) */ 3876215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 3877215976Sjmallett (XAUI Mode only) */ 3878215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 3879215976Sjmallett (XAUI Mode only) */ 3880215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 3881215976Sjmallett (XAUI Mode only) */ 3882215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 3883215976Sjmallett uint64_t reserved_16_18 : 3; 3884215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 3885215976Sjmallett (SGMII/1000Base-X only) */ 3886215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 3887215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 3888215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 3889215976Sjmallett (SGMII/1000Base-X only) */ 3890215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 3891215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 3892215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 3893215976Sjmallett (SGMII/1000Base-X only) */ 3894215976Sjmallett uint64_t reserved_9_9 : 1; 3895215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 3896215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 3897215976Sjmallett uint64_t reserved_5_6 : 2; 3898215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 3899215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 3900215976Sjmallett uint64_t reserved_2_2 : 1; 3901215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 3902215976Sjmallett (SGMII/1000Base-X only) */ 3903215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */ 3904215976Sjmallett#else 3905215976Sjmallett uint64_t minerr : 1; 3906215976Sjmallett uint64_t carext : 1; 3907215976Sjmallett uint64_t reserved_2_2 : 1; 3908215976Sjmallett uint64_t jabber : 1; 3909215976Sjmallett uint64_t fcserr : 1; 3910215976Sjmallett uint64_t reserved_5_6 : 2; 3911215976Sjmallett uint64_t rcverr : 1; 3912215976Sjmallett uint64_t skperr : 1; 3913215976Sjmallett uint64_t reserved_9_9 : 1; 3914215976Sjmallett uint64_t ovrerr : 1; 3915215976Sjmallett uint64_t pcterr : 1; 3916215976Sjmallett uint64_t rsverr : 1; 3917215976Sjmallett uint64_t falerr : 1; 3918215976Sjmallett uint64_t coldet : 1; 3919215976Sjmallett uint64_t ifgerr : 1; 3920215976Sjmallett uint64_t reserved_16_18 : 3; 3921215976Sjmallett uint64_t pause_drp : 1; 3922215976Sjmallett uint64_t loc_fault : 1; 3923215976Sjmallett uint64_t rem_fault : 1; 3924215976Sjmallett uint64_t bad_seq : 1; 3925215976Sjmallett uint64_t bad_term : 1; 3926215976Sjmallett uint64_t unsop : 1; 3927215976Sjmallett uint64_t uneop : 1; 3928215976Sjmallett uint64_t undat : 1; 3929215976Sjmallett uint64_t hg2fld : 1; 3930215976Sjmallett uint64_t hg2cc : 1; 3931215976Sjmallett uint64_t reserved_29_63 : 35; 3932215976Sjmallett#endif 3933215976Sjmallett } cn63xx; 3934215976Sjmallett struct cvmx_gmxx_rxx_int_en_cn63xx cn63xxp1; 3935215976Sjmallett}; 3936215976Sjmalletttypedef union cvmx_gmxx_rxx_int_en cvmx_gmxx_rxx_int_en_t; 3937215976Sjmallett 3938215976Sjmallett/** 3939215976Sjmallett * cvmx_gmx#_rx#_int_reg 3940215976Sjmallett * 3941215976Sjmallett * GMX_RX_INT_REG = Interrupt Register 3942215976Sjmallett * 3943215976Sjmallett * 3944215976Sjmallett * Notes: 3945215976Sjmallett * (1) exceptions will only be raised to the control processor if the 3946215976Sjmallett * corresponding bit in the GMX_RX_INT_EN register is set. 3947215976Sjmallett * 3948215976Sjmallett * (2) exception conditions 10:0 can also set the rcv/opcode in the received 3949215976Sjmallett * packet's workQ entry. The GMX_RX_FRM_CHK register provides a bit mask 3950215976Sjmallett * for configuring which conditions set the error. 3951215976Sjmallett * 3952215976Sjmallett * (3) in half duplex operation, the expectation is that collisions will appear 3953215976Sjmallett * as either MINERR o r CAREXT errors. 3954215976Sjmallett * 3955215976Sjmallett * (4) JABBER - An RX Jabber error indicates that a packet was received which 3956215976Sjmallett * is longer than the maximum allowed packet as defined by the 3957215976Sjmallett * system. GMX will truncate the packet at the JABBER count. 3958215976Sjmallett * Failure to do so could lead to system instabilty. 3959215976Sjmallett * 3960215976Sjmallett * (5) NIBERR - This error is illegal at 1000Mbs speeds 3961215976Sjmallett * (GMX_RX_PRT_CFG[SPEED]==0) and will never assert. 3962215976Sjmallett * 3963215976Sjmallett * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS > 3964215976Sjmallett * GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS 3965215976Sjmallett * > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED. 3966215976Sjmallett * 3967215976Sjmallett * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < 64 3968215976Sjmallett * 3969215976Sjmallett * (8) ALNERR - Indicates that the packet received was not an integer number of 3970215976Sjmallett * bytes. If FCS checking is enabled, ALNERR will only assert if 3971215976Sjmallett * the FCS is bad. If FCS checking is disabled, ALNERR will 3972215976Sjmallett * assert in all non-integer frame cases. 3973215976Sjmallett * 3974215976Sjmallett * (9) Collisions - Collisions can only occur in half-duplex mode. A collision 3975215976Sjmallett * is assumed by the receiver when the slottime 3976215976Sjmallett * (GMX_PRT_CFG[SLOTTIME]) is not satisfied. In 10/100 mode, 3977215976Sjmallett * this will result in a frame < SLOTTIME. In 1000 mode, it 3978215976Sjmallett * could result either in frame < SLOTTIME or a carrier extend 3979215976Sjmallett * error with the SLOTTIME. These conditions are visible by... 3980215976Sjmallett * 3981215976Sjmallett * . transfer ended before slottime - COLDET 3982215976Sjmallett * . carrier extend error - CAREXT 3983215976Sjmallett * 3984215976Sjmallett * (A) LENERR - Length errors occur when the received packet does not match the 3985215976Sjmallett * length field. LENERR is only checked for packets between 64 3986215976Sjmallett * and 1500 bytes. For untagged frames, the length must exact 3987215976Sjmallett * match. For tagged frames the length or length+4 must match. 3988215976Sjmallett * 3989215976Sjmallett * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence. 3990215976Sjmallett * Does not check the number of PREAMBLE cycles. 3991215976Sjmallett * 3992215976Sjmallett * (C) OVRERR - Not to be included in the HRM 3993215976Sjmallett * 3994215976Sjmallett * OVRERR is an architectural assertion check internal to GMX to 3995215976Sjmallett * make sure no assumption was violated. In a correctly operating 3996215976Sjmallett * system, this interrupt can never fire. 3997215976Sjmallett * 3998215976Sjmallett * GMX has an internal arbiter which selects which of 4 ports to 3999215976Sjmallett * buffer in the main RX FIFO. If we normally buffer 8 bytes, 4000215976Sjmallett * then each port will typically push a tick every 8 cycles - if 4001215976Sjmallett * the packet interface is going as fast as possible. If there 4002215976Sjmallett * are four ports, they push every two cycles. So that's the 4003215976Sjmallett * assumption. That the inbound module will always be able to 4004215976Sjmallett * consume the tick before another is produced. If that doesn't 4005215976Sjmallett * happen - that's when OVRERR will assert. 4006215976Sjmallett * 4007215976Sjmallett * (D) In XAUI mode prt0 is used for interrupt logging. 4008215976Sjmallett */ 4009215976Sjmallettunion cvmx_gmxx_rxx_int_reg 4010215976Sjmallett{ 4011215976Sjmallett uint64_t u64; 4012215976Sjmallett struct cvmx_gmxx_rxx_int_reg_s 4013215976Sjmallett { 4014215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4015215976Sjmallett uint64_t reserved_29_63 : 35; 4016215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error 4017215976Sjmallett Set when either CRC8 error detected or when 4018215976Sjmallett a Control Character is found in the message 4019215976Sjmallett bytes after the K.SOM 4020215976Sjmallett NOTE: HG2CC has higher priority than HG2FLD 4021215976Sjmallett i.e. a HiGig2 message that results in HG2CC 4022215976Sjmallett getting set, will never set HG2FLD. */ 4023215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below 4024215976Sjmallett 1) MSG_TYPE field not 6'b00_0000 4025215976Sjmallett i.e. it is not a FLOW CONTROL message, which 4026215976Sjmallett is the only defined type for HiGig2 4027215976Sjmallett 2) FWD_TYPE field not 2'b00 i.e. Link Level msg 4028215976Sjmallett which is the only defined type for HiGig2 4029215976Sjmallett 3) FC_OBJECT field is neither 4'b0000 for 4030215976Sjmallett Physical Link nor 4'b0010 for Logical Link. 4031215976Sjmallett Those are the only two defined types in HiGig2 */ 4032215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 4033215976Sjmallett (XAUI Mode only) */ 4034215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 4035215976Sjmallett (XAUI Mode only) */ 4036215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 4037215976Sjmallett (XAUI Mode only) */ 4038215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 4039215976Sjmallett than /T/. The error propagation control 4040215976Sjmallett character /E/ will be included as part of the 4041215976Sjmallett frame and does not cause a frame termination. 4042215976Sjmallett (XAUI Mode only) */ 4043215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 4044215976Sjmallett (XAUI Mode only) */ 4045215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 4046215976Sjmallett (XAUI Mode only) */ 4047215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 4048215976Sjmallett (XAUI Mode only) */ 4049215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4050215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 4051215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 4052215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 4053215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4054215976Sjmallett Does not necessarily indicate a failure 4055215976Sjmallett (SGMII/1000Base-X only) */ 4056215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 4057215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 4058215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 4059215976Sjmallett (SGMII/1000Base-X only) */ 4060215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 4061215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol 4062215976Sjmallett In XAUI mode, the column of data that was bad 4063215976Sjmallett will be logged in GMX_RX_XAUI_BAD_COL */ 4064215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4065215976Sjmallett This interrupt should never assert 4066215976Sjmallett (SGMII/1000Base-X only) */ 4067215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 4068215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4069215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 4070215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 4071215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 4072215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4073215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4074215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 4075215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 4076215976Sjmallett (SGMII/1000Base-X only) */ 4077215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize 4078215976Sjmallett Frame length checks are typically handled in PIP 4079215976Sjmallett (PIP_INT_REG[MINERR]), but pause frames are 4080215976Sjmallett normally discarded before being inspected by PIP. */ 4081215976Sjmallett#else 4082215976Sjmallett uint64_t minerr : 1; 4083215976Sjmallett uint64_t carext : 1; 4084215976Sjmallett uint64_t maxerr : 1; 4085215976Sjmallett uint64_t jabber : 1; 4086215976Sjmallett uint64_t fcserr : 1; 4087215976Sjmallett uint64_t alnerr : 1; 4088215976Sjmallett uint64_t lenerr : 1; 4089215976Sjmallett uint64_t rcverr : 1; 4090215976Sjmallett uint64_t skperr : 1; 4091215976Sjmallett uint64_t niberr : 1; 4092215976Sjmallett uint64_t ovrerr : 1; 4093215976Sjmallett uint64_t pcterr : 1; 4094215976Sjmallett uint64_t rsverr : 1; 4095215976Sjmallett uint64_t falerr : 1; 4096215976Sjmallett uint64_t coldet : 1; 4097215976Sjmallett uint64_t ifgerr : 1; 4098215976Sjmallett uint64_t phy_link : 1; 4099215976Sjmallett uint64_t phy_spd : 1; 4100215976Sjmallett uint64_t phy_dupx : 1; 4101215976Sjmallett uint64_t pause_drp : 1; 4102215976Sjmallett uint64_t loc_fault : 1; 4103215976Sjmallett uint64_t rem_fault : 1; 4104215976Sjmallett uint64_t bad_seq : 1; 4105215976Sjmallett uint64_t bad_term : 1; 4106215976Sjmallett uint64_t unsop : 1; 4107215976Sjmallett uint64_t uneop : 1; 4108215976Sjmallett uint64_t undat : 1; 4109215976Sjmallett uint64_t hg2fld : 1; 4110215976Sjmallett uint64_t hg2cc : 1; 4111215976Sjmallett uint64_t reserved_29_63 : 35; 4112215976Sjmallett#endif 4113215976Sjmallett } s; 4114215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn30xx 4115215976Sjmallett { 4116215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4117215976Sjmallett uint64_t reserved_19_63 : 45; 4118215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 4119215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 4120215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 4121215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4122215976Sjmallett Does not necessarily indicate a failure */ 4123215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 4124215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 4125215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 4126215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 4127215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4128215976Sjmallett This interrupt should never assert */ 4129215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 4130215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4131215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 4132215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 4133215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 4134215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4135215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4136215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 4137215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 4138215976Sjmallett uint64_t minerr : 1; /**< Frame was received with length < min_length */ 4139215976Sjmallett#else 4140215976Sjmallett uint64_t minerr : 1; 4141215976Sjmallett uint64_t carext : 1; 4142215976Sjmallett uint64_t maxerr : 1; 4143215976Sjmallett uint64_t jabber : 1; 4144215976Sjmallett uint64_t fcserr : 1; 4145215976Sjmallett uint64_t alnerr : 1; 4146215976Sjmallett uint64_t lenerr : 1; 4147215976Sjmallett uint64_t rcverr : 1; 4148215976Sjmallett uint64_t skperr : 1; 4149215976Sjmallett uint64_t niberr : 1; 4150215976Sjmallett uint64_t ovrerr : 1; 4151215976Sjmallett uint64_t pcterr : 1; 4152215976Sjmallett uint64_t rsverr : 1; 4153215976Sjmallett uint64_t falerr : 1; 4154215976Sjmallett uint64_t coldet : 1; 4155215976Sjmallett uint64_t ifgerr : 1; 4156215976Sjmallett uint64_t phy_link : 1; 4157215976Sjmallett uint64_t phy_spd : 1; 4158215976Sjmallett uint64_t phy_dupx : 1; 4159215976Sjmallett uint64_t reserved_19_63 : 45; 4160215976Sjmallett#endif 4161215976Sjmallett } cn30xx; 4162215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx; 4163215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx; 4164215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2; 4165215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn50xx 4166215976Sjmallett { 4167215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4168215976Sjmallett uint64_t reserved_20_63 : 44; 4169215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4170215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 4171215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 4172215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 4173215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4174215976Sjmallett Does not necessarily indicate a failure */ 4175215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 4176215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 4177215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 4178215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 4179215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4180215976Sjmallett This interrupt should never assert */ 4181215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 4182215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4183215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 4184215976Sjmallett uint64_t reserved_6_6 : 1; 4185215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 4186215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4187215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4188215976Sjmallett uint64_t reserved_2_2 : 1; 4189215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 4190215976Sjmallett uint64_t reserved_0_0 : 1; 4191215976Sjmallett#else 4192215976Sjmallett uint64_t reserved_0_0 : 1; 4193215976Sjmallett uint64_t carext : 1; 4194215976Sjmallett uint64_t reserved_2_2 : 1; 4195215976Sjmallett uint64_t jabber : 1; 4196215976Sjmallett uint64_t fcserr : 1; 4197215976Sjmallett uint64_t alnerr : 1; 4198215976Sjmallett uint64_t reserved_6_6 : 1; 4199215976Sjmallett uint64_t rcverr : 1; 4200215976Sjmallett uint64_t skperr : 1; 4201215976Sjmallett uint64_t niberr : 1; 4202215976Sjmallett uint64_t ovrerr : 1; 4203215976Sjmallett uint64_t pcterr : 1; 4204215976Sjmallett uint64_t rsverr : 1; 4205215976Sjmallett uint64_t falerr : 1; 4206215976Sjmallett uint64_t coldet : 1; 4207215976Sjmallett uint64_t ifgerr : 1; 4208215976Sjmallett uint64_t phy_link : 1; 4209215976Sjmallett uint64_t phy_spd : 1; 4210215976Sjmallett uint64_t phy_dupx : 1; 4211215976Sjmallett uint64_t pause_drp : 1; 4212215976Sjmallett uint64_t reserved_20_63 : 44; 4213215976Sjmallett#endif 4214215976Sjmallett } cn50xx; 4215215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn52xx 4216215976Sjmallett { 4217215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4218215976Sjmallett uint64_t reserved_29_63 : 35; 4219215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error 4220215976Sjmallett Set when either CRC8 error detected or when 4221215976Sjmallett a Control Character is found in the message 4222215976Sjmallett bytes after the K.SOM 4223215976Sjmallett NOTE: HG2CC has higher priority than HG2FLD 4224215976Sjmallett i.e. a HiGig2 message that results in HG2CC 4225215976Sjmallett getting set, will never set HG2FLD. */ 4226215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below 4227215976Sjmallett 1) MSG_TYPE field not 6'b00_0000 4228215976Sjmallett i.e. it is not a FLOW CONTROL message, which 4229215976Sjmallett is the only defined type for HiGig2 4230215976Sjmallett 2) FWD_TYPE field not 2'b00 i.e. Link Level msg 4231215976Sjmallett which is the only defined type for HiGig2 4232215976Sjmallett 3) FC_OBJECT field is neither 4'b0000 for 4233215976Sjmallett Physical Link nor 4'b0010 for Logical Link. 4234215976Sjmallett Those are the only two defined types in HiGig2 */ 4235215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 4236215976Sjmallett (XAUI Mode only) */ 4237215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 4238215976Sjmallett (XAUI Mode only) */ 4239215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 4240215976Sjmallett (XAUI Mode only) */ 4241215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 4242215976Sjmallett than /T/. The error propagation control 4243215976Sjmallett character /E/ will be included as part of the 4244215976Sjmallett frame and does not cause a frame termination. 4245215976Sjmallett (XAUI Mode only) */ 4246215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 4247215976Sjmallett (XAUI Mode only) */ 4248215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 4249215976Sjmallett (XAUI Mode only) */ 4250215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 4251215976Sjmallett (XAUI Mode only) */ 4252215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4253215976Sjmallett uint64_t reserved_16_18 : 3; 4254215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4255215976Sjmallett Does not necessarily indicate a failure 4256215976Sjmallett (SGMII/1000Base-X only) */ 4257215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 4258215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 4259215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 4260215976Sjmallett (SGMII/1000Base-X only) */ 4261215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 4262215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol 4263215976Sjmallett In XAUI mode, the column of data that was bad 4264215976Sjmallett will be logged in GMX_RX_XAUI_BAD_COL */ 4265215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4266215976Sjmallett This interrupt should never assert 4267215976Sjmallett (SGMII/1000Base-X only) */ 4268215976Sjmallett uint64_t reserved_9_9 : 1; 4269215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4270215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 4271215976Sjmallett uint64_t reserved_5_6 : 2; 4272215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4273215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4274215976Sjmallett uint64_t reserved_2_2 : 1; 4275215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 4276215976Sjmallett (SGMII/1000Base-X only) */ 4277215976Sjmallett uint64_t reserved_0_0 : 1; 4278215976Sjmallett#else 4279215976Sjmallett uint64_t reserved_0_0 : 1; 4280215976Sjmallett uint64_t carext : 1; 4281215976Sjmallett uint64_t reserved_2_2 : 1; 4282215976Sjmallett uint64_t jabber : 1; 4283215976Sjmallett uint64_t fcserr : 1; 4284215976Sjmallett uint64_t reserved_5_6 : 2; 4285215976Sjmallett uint64_t rcverr : 1; 4286215976Sjmallett uint64_t skperr : 1; 4287215976Sjmallett uint64_t reserved_9_9 : 1; 4288215976Sjmallett uint64_t ovrerr : 1; 4289215976Sjmallett uint64_t pcterr : 1; 4290215976Sjmallett uint64_t rsverr : 1; 4291215976Sjmallett uint64_t falerr : 1; 4292215976Sjmallett uint64_t coldet : 1; 4293215976Sjmallett uint64_t ifgerr : 1; 4294215976Sjmallett uint64_t reserved_16_18 : 3; 4295215976Sjmallett uint64_t pause_drp : 1; 4296215976Sjmallett uint64_t loc_fault : 1; 4297215976Sjmallett uint64_t rem_fault : 1; 4298215976Sjmallett uint64_t bad_seq : 1; 4299215976Sjmallett uint64_t bad_term : 1; 4300215976Sjmallett uint64_t unsop : 1; 4301215976Sjmallett uint64_t uneop : 1; 4302215976Sjmallett uint64_t undat : 1; 4303215976Sjmallett uint64_t hg2fld : 1; 4304215976Sjmallett uint64_t hg2cc : 1; 4305215976Sjmallett uint64_t reserved_29_63 : 35; 4306215976Sjmallett#endif 4307215976Sjmallett } cn52xx; 4308215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1; 4309215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx; 4310215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn56xxp1 4311215976Sjmallett { 4312215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4313215976Sjmallett uint64_t reserved_27_63 : 37; 4314215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 4315215976Sjmallett (XAUI Mode only) */ 4316215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 4317215976Sjmallett (XAUI Mode only) */ 4318215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 4319215976Sjmallett (XAUI Mode only) */ 4320215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 4321215976Sjmallett than /T/. The error propagation control 4322215976Sjmallett character /E/ will be included as part of the 4323215976Sjmallett frame and does not cause a frame termination. 4324215976Sjmallett (XAUI Mode only) */ 4325215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 4326215976Sjmallett (XAUI Mode only) */ 4327215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 4328215976Sjmallett (XAUI Mode only) */ 4329215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 4330215976Sjmallett (XAUI Mode only) */ 4331215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4332215976Sjmallett uint64_t reserved_16_18 : 3; 4333215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4334215976Sjmallett Does not necessarily indicate a failure 4335215976Sjmallett (SGMII/1000Base-X only) */ 4336215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 4337215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 4338215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 4339215976Sjmallett (SGMII/1000Base-X only) */ 4340215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 4341215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol 4342215976Sjmallett In XAUI mode, the column of data that was bad 4343215976Sjmallett will be logged in GMX_RX_XAUI_BAD_COL */ 4344215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4345215976Sjmallett This interrupt should never assert 4346215976Sjmallett (SGMII/1000Base-X only) */ 4347215976Sjmallett uint64_t reserved_9_9 : 1; 4348215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4349215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 4350215976Sjmallett uint64_t reserved_5_6 : 2; 4351215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4352215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4353215976Sjmallett uint64_t reserved_2_2 : 1; 4354215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 4355215976Sjmallett (SGMII/1000Base-X only) */ 4356215976Sjmallett uint64_t reserved_0_0 : 1; 4357215976Sjmallett#else 4358215976Sjmallett uint64_t reserved_0_0 : 1; 4359215976Sjmallett uint64_t carext : 1; 4360215976Sjmallett uint64_t reserved_2_2 : 1; 4361215976Sjmallett uint64_t jabber : 1; 4362215976Sjmallett uint64_t fcserr : 1; 4363215976Sjmallett uint64_t reserved_5_6 : 2; 4364215976Sjmallett uint64_t rcverr : 1; 4365215976Sjmallett uint64_t skperr : 1; 4366215976Sjmallett uint64_t reserved_9_9 : 1; 4367215976Sjmallett uint64_t ovrerr : 1; 4368215976Sjmallett uint64_t pcterr : 1; 4369215976Sjmallett uint64_t rsverr : 1; 4370215976Sjmallett uint64_t falerr : 1; 4371215976Sjmallett uint64_t coldet : 1; 4372215976Sjmallett uint64_t ifgerr : 1; 4373215976Sjmallett uint64_t reserved_16_18 : 3; 4374215976Sjmallett uint64_t pause_drp : 1; 4375215976Sjmallett uint64_t loc_fault : 1; 4376215976Sjmallett uint64_t rem_fault : 1; 4377215976Sjmallett uint64_t bad_seq : 1; 4378215976Sjmallett uint64_t bad_term : 1; 4379215976Sjmallett uint64_t unsop : 1; 4380215976Sjmallett uint64_t uneop : 1; 4381215976Sjmallett uint64_t undat : 1; 4382215976Sjmallett uint64_t reserved_27_63 : 37; 4383215976Sjmallett#endif 4384215976Sjmallett } cn56xxp1; 4385215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn58xx 4386215976Sjmallett { 4387215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4388215976Sjmallett uint64_t reserved_20_63 : 44; 4389215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4390215976Sjmallett uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ 4391215976Sjmallett uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ 4392215976Sjmallett uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ 4393215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4394215976Sjmallett Does not necessarily indicate a failure */ 4395215976Sjmallett uint64_t coldet : 1; /**< Collision Detection */ 4396215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ 4397215976Sjmallett uint64_t rsverr : 1; /**< RGMII reserved opcodes */ 4398215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ 4399215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4400215976Sjmallett This interrupt should never assert */ 4401215976Sjmallett uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ 4402215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4403215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ 4404215976Sjmallett uint64_t lenerr : 1; /**< Frame was received with length error */ 4405215976Sjmallett uint64_t alnerr : 1; /**< Frame was received with an alignment error */ 4406215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4407215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4408215976Sjmallett uint64_t maxerr : 1; /**< Frame was received with length > max_length */ 4409215976Sjmallett uint64_t carext : 1; /**< RGMII carrier extend error */ 4410215976Sjmallett uint64_t minerr : 1; /**< Frame was received with length < min_length */ 4411215976Sjmallett#else 4412215976Sjmallett uint64_t minerr : 1; 4413215976Sjmallett uint64_t carext : 1; 4414215976Sjmallett uint64_t maxerr : 1; 4415215976Sjmallett uint64_t jabber : 1; 4416215976Sjmallett uint64_t fcserr : 1; 4417215976Sjmallett uint64_t alnerr : 1; 4418215976Sjmallett uint64_t lenerr : 1; 4419215976Sjmallett uint64_t rcverr : 1; 4420215976Sjmallett uint64_t skperr : 1; 4421215976Sjmallett uint64_t niberr : 1; 4422215976Sjmallett uint64_t ovrerr : 1; 4423215976Sjmallett uint64_t pcterr : 1; 4424215976Sjmallett uint64_t rsverr : 1; 4425215976Sjmallett uint64_t falerr : 1; 4426215976Sjmallett uint64_t coldet : 1; 4427215976Sjmallett uint64_t ifgerr : 1; 4428215976Sjmallett uint64_t phy_link : 1; 4429215976Sjmallett uint64_t phy_spd : 1; 4430215976Sjmallett uint64_t phy_dupx : 1; 4431215976Sjmallett uint64_t pause_drp : 1; 4432215976Sjmallett uint64_t reserved_20_63 : 44; 4433215976Sjmallett#endif 4434215976Sjmallett } cn58xx; 4435215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1; 4436215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn63xx 4437215976Sjmallett { 4438215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4439215976Sjmallett uint64_t reserved_29_63 : 35; 4440215976Sjmallett uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error 4441215976Sjmallett Set when either CRC8 error detected or when 4442215976Sjmallett a Control Character is found in the message 4443215976Sjmallett bytes after the K.SOM 4444215976Sjmallett NOTE: HG2CC has higher priority than HG2FLD 4445215976Sjmallett i.e. a HiGig2 message that results in HG2CC 4446215976Sjmallett getting set, will never set HG2FLD. */ 4447215976Sjmallett uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below 4448215976Sjmallett 1) MSG_TYPE field not 6'b00_0000 4449215976Sjmallett i.e. it is not a FLOW CONTROL message, which 4450215976Sjmallett is the only defined type for HiGig2 4451215976Sjmallett 2) FWD_TYPE field not 2'b00 i.e. Link Level msg 4452215976Sjmallett which is the only defined type for HiGig2 4453215976Sjmallett 3) FC_OBJECT field is neither 4'b0000 for 4454215976Sjmallett Physical Link nor 4'b0010 for Logical Link. 4455215976Sjmallett Those are the only two defined types in HiGig2 */ 4456215976Sjmallett uint64_t undat : 1; /**< Unexpected Data 4457215976Sjmallett (XAUI Mode only) */ 4458215976Sjmallett uint64_t uneop : 1; /**< Unexpected EOP 4459215976Sjmallett (XAUI Mode only) */ 4460215976Sjmallett uint64_t unsop : 1; /**< Unexpected SOP 4461215976Sjmallett (XAUI Mode only) */ 4462215976Sjmallett uint64_t bad_term : 1; /**< Frame is terminated by control character other 4463215976Sjmallett than /T/. The error propagation control 4464215976Sjmallett character /E/ will be included as part of the 4465215976Sjmallett frame and does not cause a frame termination. 4466215976Sjmallett (XAUI Mode only) */ 4467215976Sjmallett uint64_t bad_seq : 1; /**< Reserved Sequence Deteted 4468215976Sjmallett (XAUI Mode only) */ 4469215976Sjmallett uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted 4470215976Sjmallett (XAUI Mode only) */ 4471215976Sjmallett uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted 4472215976Sjmallett (XAUI Mode only) */ 4473215976Sjmallett uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ 4474215976Sjmallett uint64_t reserved_16_18 : 3; 4475215976Sjmallett uint64_t ifgerr : 1; /**< Interframe Gap Violation 4476215976Sjmallett Does not necessarily indicate a failure 4477215976Sjmallett (SGMII/1000Base-X only) */ 4478215976Sjmallett uint64_t coldet : 1; /**< Collision Detection 4479215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 4480215976Sjmallett uint64_t falerr : 1; /**< False carrier error or extend error after slottime 4481215976Sjmallett (SGMII/1000Base-X only) */ 4482215976Sjmallett uint64_t rsverr : 1; /**< Reserved opcodes */ 4483215976Sjmallett uint64_t pcterr : 1; /**< Bad Preamble / Protocol 4484215976Sjmallett In XAUI mode, the column of data that was bad 4485215976Sjmallett will be logged in GMX_RX_XAUI_BAD_COL */ 4486215976Sjmallett uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow 4487215976Sjmallett This interrupt should never assert 4488215976Sjmallett (SGMII/1000Base-X only) */ 4489215976Sjmallett uint64_t reserved_9_9 : 1; 4490215976Sjmallett uint64_t skperr : 1; /**< Skipper error */ 4491215976Sjmallett uint64_t rcverr : 1; /**< Frame was received with Data reception error */ 4492215976Sjmallett uint64_t reserved_5_6 : 2; 4493215976Sjmallett uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ 4494215976Sjmallett uint64_t jabber : 1; /**< Frame was received with length > sys_length */ 4495215976Sjmallett uint64_t reserved_2_2 : 1; 4496215976Sjmallett uint64_t carext : 1; /**< Carrier extend error 4497215976Sjmallett (SGMII/1000Base-X only) */ 4498215976Sjmallett uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize 4499215976Sjmallett Frame length checks are typically handled in PIP 4500215976Sjmallett (PIP_INT_REG[MINERR]), but pause frames are 4501215976Sjmallett normally discarded before being inspected by PIP. */ 4502215976Sjmallett#else 4503215976Sjmallett uint64_t minerr : 1; 4504215976Sjmallett uint64_t carext : 1; 4505215976Sjmallett uint64_t reserved_2_2 : 1; 4506215976Sjmallett uint64_t jabber : 1; 4507215976Sjmallett uint64_t fcserr : 1; 4508215976Sjmallett uint64_t reserved_5_6 : 2; 4509215976Sjmallett uint64_t rcverr : 1; 4510215976Sjmallett uint64_t skperr : 1; 4511215976Sjmallett uint64_t reserved_9_9 : 1; 4512215976Sjmallett uint64_t ovrerr : 1; 4513215976Sjmallett uint64_t pcterr : 1; 4514215976Sjmallett uint64_t rsverr : 1; 4515215976Sjmallett uint64_t falerr : 1; 4516215976Sjmallett uint64_t coldet : 1; 4517215976Sjmallett uint64_t ifgerr : 1; 4518215976Sjmallett uint64_t reserved_16_18 : 3; 4519215976Sjmallett uint64_t pause_drp : 1; 4520215976Sjmallett uint64_t loc_fault : 1; 4521215976Sjmallett uint64_t rem_fault : 1; 4522215976Sjmallett uint64_t bad_seq : 1; 4523215976Sjmallett uint64_t bad_term : 1; 4524215976Sjmallett uint64_t unsop : 1; 4525215976Sjmallett uint64_t uneop : 1; 4526215976Sjmallett uint64_t undat : 1; 4527215976Sjmallett uint64_t hg2fld : 1; 4528215976Sjmallett uint64_t hg2cc : 1; 4529215976Sjmallett uint64_t reserved_29_63 : 35; 4530215976Sjmallett#endif 4531215976Sjmallett } cn63xx; 4532215976Sjmallett struct cvmx_gmxx_rxx_int_reg_cn63xx cn63xxp1; 4533215976Sjmallett}; 4534215976Sjmalletttypedef union cvmx_gmxx_rxx_int_reg cvmx_gmxx_rxx_int_reg_t; 4535215976Sjmallett 4536215976Sjmallett/** 4537215976Sjmallett * cvmx_gmx#_rx#_jabber 4538215976Sjmallett * 4539215976Sjmallett * GMX_RX_JABBER = The max size packet after which GMX will truncate 4540215976Sjmallett * 4541215976Sjmallett * 4542215976Sjmallett * Notes: 4543215976Sjmallett * CNT must be 8-byte aligned such that CNT[2:0] == 0 4544215976Sjmallett * 4545215976Sjmallett * The packet that will be sent to the packet input logic will have an 4546215976Sjmallett * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and 4547215976Sjmallett * GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is 4548215976Sjmallett * defined as... 4549215976Sjmallett * 4550215976Sjmallett * max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8) 4551215976Sjmallett * 4552215976Sjmallett * In XAUI mode prt0 is used for checking. 4553215976Sjmallett */ 4554215976Sjmallettunion cvmx_gmxx_rxx_jabber 4555215976Sjmallett{ 4556215976Sjmallett uint64_t u64; 4557215976Sjmallett struct cvmx_gmxx_rxx_jabber_s 4558215976Sjmallett { 4559215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4560215976Sjmallett uint64_t reserved_16_63 : 48; 4561215976Sjmallett uint64_t cnt : 16; /**< Byte count for jabber check 4562215976Sjmallett Failing packets set the JABBER interrupt and are 4563215976Sjmallett optionally sent with opcode==JABBER 4564215976Sjmallett GMX will truncate the packet to CNT bytes */ 4565215976Sjmallett#else 4566215976Sjmallett uint64_t cnt : 16; 4567215976Sjmallett uint64_t reserved_16_63 : 48; 4568215976Sjmallett#endif 4569215976Sjmallett } s; 4570215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn30xx; 4571215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn31xx; 4572215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn38xx; 4573215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn38xxp2; 4574215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn50xx; 4575215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn52xx; 4576215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn52xxp1; 4577215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn56xx; 4578215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn56xxp1; 4579215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn58xx; 4580215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn58xxp1; 4581215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn63xx; 4582215976Sjmallett struct cvmx_gmxx_rxx_jabber_s cn63xxp1; 4583215976Sjmallett}; 4584215976Sjmalletttypedef union cvmx_gmxx_rxx_jabber cvmx_gmxx_rxx_jabber_t; 4585215976Sjmallett 4586215976Sjmallett/** 4587215976Sjmallett * cvmx_gmx#_rx#_pause_drop_time 4588215976Sjmallett * 4589215976Sjmallett * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition 4590215976Sjmallett * 4591215976Sjmallett */ 4592215976Sjmallettunion cvmx_gmxx_rxx_pause_drop_time 4593215976Sjmallett{ 4594215976Sjmallett uint64_t u64; 4595215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s 4596215976Sjmallett { 4597215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4598215976Sjmallett uint64_t reserved_16_63 : 48; 4599215976Sjmallett uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */ 4600215976Sjmallett#else 4601215976Sjmallett uint64_t status : 16; 4602215976Sjmallett uint64_t reserved_16_63 : 48; 4603215976Sjmallett#endif 4604215976Sjmallett } s; 4605215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx; 4606215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx; 4607215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1; 4608215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx; 4609215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1; 4610215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx; 4611215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1; 4612215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx; 4613215976Sjmallett struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1; 4614215976Sjmallett}; 4615215976Sjmalletttypedef union cvmx_gmxx_rxx_pause_drop_time cvmx_gmxx_rxx_pause_drop_time_t; 4616215976Sjmallett 4617215976Sjmallett/** 4618215976Sjmallett * cvmx_gmx#_rx#_rx_inbnd 4619215976Sjmallett * 4620215976Sjmallett * GMX_RX_INBND = RGMII InBand Link Status 4621215976Sjmallett * 4622215976Sjmallett * 4623215976Sjmallett * Notes: 4624215976Sjmallett * These fields are only valid if the attached PHY is operating in RGMII mode 4625215976Sjmallett * and supports the optional in-band status (see section 3.4.1 of the RGMII 4626215976Sjmallett * specification, version 1.3 for more information). 4627215976Sjmallett */ 4628215976Sjmallettunion cvmx_gmxx_rxx_rx_inbnd 4629215976Sjmallett{ 4630215976Sjmallett uint64_t u64; 4631215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s 4632215976Sjmallett { 4633215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4634215976Sjmallett uint64_t reserved_4_63 : 60; 4635215976Sjmallett uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex 4636215976Sjmallett 0=half-duplex 4637215976Sjmallett 1=full-duplex */ 4638215976Sjmallett uint64_t speed : 2; /**< RGMII Inbound LinkSpeed 4639215976Sjmallett 00=2.5MHz 4640215976Sjmallett 01=25MHz 4641215976Sjmallett 10=125MHz 4642215976Sjmallett 11=Reserved */ 4643215976Sjmallett uint64_t status : 1; /**< RGMII Inbound LinkStatus 4644215976Sjmallett 0=down 4645215976Sjmallett 1=up */ 4646215976Sjmallett#else 4647215976Sjmallett uint64_t status : 1; 4648215976Sjmallett uint64_t speed : 2; 4649215976Sjmallett uint64_t duplex : 1; 4650215976Sjmallett uint64_t reserved_4_63 : 60; 4651215976Sjmallett#endif 4652215976Sjmallett } s; 4653215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx; 4654215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx; 4655215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx; 4656215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2; 4657215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx; 4658215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx; 4659215976Sjmallett struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1; 4660215976Sjmallett}; 4661215976Sjmalletttypedef union cvmx_gmxx_rxx_rx_inbnd cvmx_gmxx_rxx_rx_inbnd_t; 4662215976Sjmallett 4663215976Sjmallett/** 4664215976Sjmallett * cvmx_gmx#_rx#_stats_ctl 4665215976Sjmallett * 4666215976Sjmallett * GMX_RX_STATS_CTL = RX Stats Control register 4667215976Sjmallett * 4668215976Sjmallett */ 4669215976Sjmallettunion cvmx_gmxx_rxx_stats_ctl 4670215976Sjmallett{ 4671215976Sjmallett uint64_t u64; 4672215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s 4673215976Sjmallett { 4674215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4675215976Sjmallett uint64_t reserved_1_63 : 63; 4676215976Sjmallett uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */ 4677215976Sjmallett#else 4678215976Sjmallett uint64_t rd_clr : 1; 4679215976Sjmallett uint64_t reserved_1_63 : 63; 4680215976Sjmallett#endif 4681215976Sjmallett } s; 4682215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn30xx; 4683215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn31xx; 4684215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn38xx; 4685215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2; 4686215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn50xx; 4687215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn52xx; 4688215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1; 4689215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn56xx; 4690215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1; 4691215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn58xx; 4692215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1; 4693215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn63xx; 4694215976Sjmallett struct cvmx_gmxx_rxx_stats_ctl_s cn63xxp1; 4695215976Sjmallett}; 4696215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_ctl cvmx_gmxx_rxx_stats_ctl_t; 4697215976Sjmallett 4698215976Sjmallett/** 4699215976Sjmallett * cvmx_gmx#_rx#_stats_octs 4700215976Sjmallett * 4701215976Sjmallett * Notes: 4702215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4703215976Sjmallett * - Counters will wrap 4704215976Sjmallett */ 4705215976Sjmallettunion cvmx_gmxx_rxx_stats_octs 4706215976Sjmallett{ 4707215976Sjmallett uint64_t u64; 4708215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s 4709215976Sjmallett { 4710215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4711215976Sjmallett uint64_t reserved_48_63 : 16; 4712215976Sjmallett uint64_t cnt : 48; /**< Octet count of received good packets */ 4713215976Sjmallett#else 4714215976Sjmallett uint64_t cnt : 48; 4715215976Sjmallett uint64_t reserved_48_63 : 16; 4716215976Sjmallett#endif 4717215976Sjmallett } s; 4718215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn30xx; 4719215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn31xx; 4720215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn38xx; 4721215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2; 4722215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn50xx; 4723215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn52xx; 4724215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1; 4725215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn56xx; 4726215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1; 4727215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn58xx; 4728215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1; 4729215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn63xx; 4730215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_s cn63xxp1; 4731215976Sjmallett}; 4732215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_octs cvmx_gmxx_rxx_stats_octs_t; 4733215976Sjmallett 4734215976Sjmallett/** 4735215976Sjmallett * cvmx_gmx#_rx#_stats_octs_ctl 4736215976Sjmallett * 4737215976Sjmallett * Notes: 4738215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4739215976Sjmallett * - Counters will wrap 4740215976Sjmallett */ 4741215976Sjmallettunion cvmx_gmxx_rxx_stats_octs_ctl 4742215976Sjmallett{ 4743215976Sjmallett uint64_t u64; 4744215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s 4745215976Sjmallett { 4746215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4747215976Sjmallett uint64_t reserved_48_63 : 16; 4748215976Sjmallett uint64_t cnt : 48; /**< Octet count of received pause packets */ 4749215976Sjmallett#else 4750215976Sjmallett uint64_t cnt : 48; 4751215976Sjmallett uint64_t reserved_48_63 : 16; 4752215976Sjmallett#endif 4753215976Sjmallett } s; 4754215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx; 4755215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx; 4756215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx; 4757215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2; 4758215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx; 4759215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx; 4760215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1; 4761215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx; 4762215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1; 4763215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx; 4764215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1; 4765215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx; 4766215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1; 4767215976Sjmallett}; 4768215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_octs_ctl cvmx_gmxx_rxx_stats_octs_ctl_t; 4769215976Sjmallett 4770215976Sjmallett/** 4771215976Sjmallett * cvmx_gmx#_rx#_stats_octs_dmac 4772215976Sjmallett * 4773215976Sjmallett * Notes: 4774215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4775215976Sjmallett * - Counters will wrap 4776215976Sjmallett */ 4777215976Sjmallettunion cvmx_gmxx_rxx_stats_octs_dmac 4778215976Sjmallett{ 4779215976Sjmallett uint64_t u64; 4780215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s 4781215976Sjmallett { 4782215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4783215976Sjmallett uint64_t reserved_48_63 : 16; 4784215976Sjmallett uint64_t cnt : 48; /**< Octet count of filtered dmac packets */ 4785215976Sjmallett#else 4786215976Sjmallett uint64_t cnt : 48; 4787215976Sjmallett uint64_t reserved_48_63 : 16; 4788215976Sjmallett#endif 4789215976Sjmallett } s; 4790215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx; 4791215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx; 4792215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx; 4793215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2; 4794215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx; 4795215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx; 4796215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1; 4797215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx; 4798215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1; 4799215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx; 4800215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1; 4801215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx; 4802215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1; 4803215976Sjmallett}; 4804215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_octs_dmac cvmx_gmxx_rxx_stats_octs_dmac_t; 4805215976Sjmallett 4806215976Sjmallett/** 4807215976Sjmallett * cvmx_gmx#_rx#_stats_octs_drp 4808215976Sjmallett * 4809215976Sjmallett * Notes: 4810215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4811215976Sjmallett * - Counters will wrap 4812215976Sjmallett */ 4813215976Sjmallettunion cvmx_gmxx_rxx_stats_octs_drp 4814215976Sjmallett{ 4815215976Sjmallett uint64_t u64; 4816215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s 4817215976Sjmallett { 4818215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4819215976Sjmallett uint64_t reserved_48_63 : 16; 4820215976Sjmallett uint64_t cnt : 48; /**< Octet count of dropped packets */ 4821215976Sjmallett#else 4822215976Sjmallett uint64_t cnt : 48; 4823215976Sjmallett uint64_t reserved_48_63 : 16; 4824215976Sjmallett#endif 4825215976Sjmallett } s; 4826215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx; 4827215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx; 4828215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx; 4829215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2; 4830215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx; 4831215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx; 4832215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1; 4833215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx; 4834215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1; 4835215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx; 4836215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1; 4837215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx; 4838215976Sjmallett struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1; 4839215976Sjmallett}; 4840215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_octs_drp cvmx_gmxx_rxx_stats_octs_drp_t; 4841215976Sjmallett 4842215976Sjmallett/** 4843215976Sjmallett * cvmx_gmx#_rx#_stats_pkts 4844215976Sjmallett * 4845215976Sjmallett * GMX_RX_STATS_PKTS 4846215976Sjmallett * 4847215976Sjmallett * Count of good received packets - packets that are not recognized as PAUSE 4848215976Sjmallett * packets, dropped due the DMAC filter, dropped due FIFO full status, or 4849215976Sjmallett * have any other OPCODE (FCS, Length, etc). 4850215976Sjmallett * 4851215976Sjmallett * Notes: 4852215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4853215976Sjmallett * - Counters will wrap 4854215976Sjmallett */ 4855215976Sjmallettunion cvmx_gmxx_rxx_stats_pkts 4856215976Sjmallett{ 4857215976Sjmallett uint64_t u64; 4858215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s 4859215976Sjmallett { 4860215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4861215976Sjmallett uint64_t reserved_32_63 : 32; 4862215976Sjmallett uint64_t cnt : 32; /**< Count of received good packets */ 4863215976Sjmallett#else 4864215976Sjmallett uint64_t cnt : 32; 4865215976Sjmallett uint64_t reserved_32_63 : 32; 4866215976Sjmallett#endif 4867215976Sjmallett } s; 4868215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn30xx; 4869215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn31xx; 4870215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn38xx; 4871215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2; 4872215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn50xx; 4873215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn52xx; 4874215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1; 4875215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn56xx; 4876215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1; 4877215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn58xx; 4878215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1; 4879215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn63xx; 4880215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_s cn63xxp1; 4881215976Sjmallett}; 4882215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_pkts cvmx_gmxx_rxx_stats_pkts_t; 4883215976Sjmallett 4884215976Sjmallett/** 4885215976Sjmallett * cvmx_gmx#_rx#_stats_pkts_bad 4886215976Sjmallett * 4887215976Sjmallett * GMX_RX_STATS_PKTS_BAD 4888215976Sjmallett * 4889215976Sjmallett * Count of all packets received with some error that were not dropped 4890215976Sjmallett * either due to the dmac filter or lack of room in the receive FIFO. 4891215976Sjmallett * 4892215976Sjmallett * Notes: 4893215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4894215976Sjmallett * - Counters will wrap 4895215976Sjmallett */ 4896215976Sjmallettunion cvmx_gmxx_rxx_stats_pkts_bad 4897215976Sjmallett{ 4898215976Sjmallett uint64_t u64; 4899215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s 4900215976Sjmallett { 4901215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4902215976Sjmallett uint64_t reserved_32_63 : 32; 4903215976Sjmallett uint64_t cnt : 32; /**< Count of bad packets */ 4904215976Sjmallett#else 4905215976Sjmallett uint64_t cnt : 32; 4906215976Sjmallett uint64_t reserved_32_63 : 32; 4907215976Sjmallett#endif 4908215976Sjmallett } s; 4909215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx; 4910215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx; 4911215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx; 4912215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2; 4913215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx; 4914215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx; 4915215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1; 4916215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx; 4917215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1; 4918215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx; 4919215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1; 4920215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx; 4921215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1; 4922215976Sjmallett}; 4923215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_pkts_bad cvmx_gmxx_rxx_stats_pkts_bad_t; 4924215976Sjmallett 4925215976Sjmallett/** 4926215976Sjmallett * cvmx_gmx#_rx#_stats_pkts_ctl 4927215976Sjmallett * 4928215976Sjmallett * GMX_RX_STATS_PKTS_CTL 4929215976Sjmallett * 4930215976Sjmallett * Count of all packets received that were recognized as Flow Control or 4931215976Sjmallett * PAUSE packets. PAUSE packets with any kind of error are counted in 4932215976Sjmallett * GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or 4933215976Sjmallett * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit. This count 4934215976Sjmallett * increments regardless of whether the packet is dropped. Pause packets 4935215976Sjmallett * will never be counted in GMX_RX_STATS_PKTS. Packets dropped due the dmac 4936215976Sjmallett * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here. 4937215976Sjmallett * 4938215976Sjmallett * Notes: 4939215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4940215976Sjmallett * - Counters will wrap 4941215976Sjmallett */ 4942215976Sjmallettunion cvmx_gmxx_rxx_stats_pkts_ctl 4943215976Sjmallett{ 4944215976Sjmallett uint64_t u64; 4945215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s 4946215976Sjmallett { 4947215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4948215976Sjmallett uint64_t reserved_32_63 : 32; 4949215976Sjmallett uint64_t cnt : 32; /**< Count of received pause packets */ 4950215976Sjmallett#else 4951215976Sjmallett uint64_t cnt : 32; 4952215976Sjmallett uint64_t reserved_32_63 : 32; 4953215976Sjmallett#endif 4954215976Sjmallett } s; 4955215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx; 4956215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx; 4957215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx; 4958215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2; 4959215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx; 4960215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx; 4961215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1; 4962215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx; 4963215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1; 4964215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx; 4965215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1; 4966215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx; 4967215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1; 4968215976Sjmallett}; 4969215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_pkts_ctl cvmx_gmxx_rxx_stats_pkts_ctl_t; 4970215976Sjmallett 4971215976Sjmallett/** 4972215976Sjmallett * cvmx_gmx#_rx#_stats_pkts_dmac 4973215976Sjmallett * 4974215976Sjmallett * GMX_RX_STATS_PKTS_DMAC 4975215976Sjmallett * 4976215976Sjmallett * Count of all packets received that were dropped by the dmac filter. 4977215976Sjmallett * Packets that match the DMAC will be dropped and counted here regardless 4978215976Sjmallett * of if they were bad packets. These packets will never be counted in 4979215976Sjmallett * GMX_RX_STATS_PKTS. 4980215976Sjmallett * 4981215976Sjmallett * Some packets that were not able to satisify the DECISION_CNT may not 4982215976Sjmallett * actually be dropped by Octeon, but they will be counted here as if they 4983215976Sjmallett * were dropped. 4984215976Sjmallett * 4985215976Sjmallett * Notes: 4986215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 4987215976Sjmallett * - Counters will wrap 4988215976Sjmallett */ 4989215976Sjmallettunion cvmx_gmxx_rxx_stats_pkts_dmac 4990215976Sjmallett{ 4991215976Sjmallett uint64_t u64; 4992215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s 4993215976Sjmallett { 4994215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 4995215976Sjmallett uint64_t reserved_32_63 : 32; 4996215976Sjmallett uint64_t cnt : 32; /**< Count of filtered dmac packets */ 4997215976Sjmallett#else 4998215976Sjmallett uint64_t cnt : 32; 4999215976Sjmallett uint64_t reserved_32_63 : 32; 5000215976Sjmallett#endif 5001215976Sjmallett } s; 5002215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx; 5003215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx; 5004215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx; 5005215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2; 5006215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx; 5007215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx; 5008215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1; 5009215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx; 5010215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1; 5011215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx; 5012215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1; 5013215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx; 5014215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1; 5015215976Sjmallett}; 5016215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_pkts_dmac cvmx_gmxx_rxx_stats_pkts_dmac_t; 5017215976Sjmallett 5018215976Sjmallett/** 5019215976Sjmallett * cvmx_gmx#_rx#_stats_pkts_drp 5020215976Sjmallett * 5021215976Sjmallett * GMX_RX_STATS_PKTS_DRP 5022215976Sjmallett * 5023215976Sjmallett * Count of all packets received that were dropped due to a full receive 5024215976Sjmallett * FIFO. This counts good and bad packets received - all packets dropped by 5025215976Sjmallett * the FIFO. It does not count packets dropped by the dmac or pause packet 5026215976Sjmallett * filters. 5027215976Sjmallett * 5028215976Sjmallett * Notes: 5029215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set 5030215976Sjmallett * - Counters will wrap 5031215976Sjmallett */ 5032215976Sjmallettunion cvmx_gmxx_rxx_stats_pkts_drp 5033215976Sjmallett{ 5034215976Sjmallett uint64_t u64; 5035215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s 5036215976Sjmallett { 5037215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5038215976Sjmallett uint64_t reserved_32_63 : 32; 5039215976Sjmallett uint64_t cnt : 32; /**< Count of dropped packets */ 5040215976Sjmallett#else 5041215976Sjmallett uint64_t cnt : 32; 5042215976Sjmallett uint64_t reserved_32_63 : 32; 5043215976Sjmallett#endif 5044215976Sjmallett } s; 5045215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx; 5046215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx; 5047215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx; 5048215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2; 5049215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx; 5050215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx; 5051215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1; 5052215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx; 5053215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1; 5054215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx; 5055215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1; 5056215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx; 5057215976Sjmallett struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1; 5058215976Sjmallett}; 5059215976Sjmalletttypedef union cvmx_gmxx_rxx_stats_pkts_drp cvmx_gmxx_rxx_stats_pkts_drp_t; 5060215976Sjmallett 5061215976Sjmallett/** 5062215976Sjmallett * cvmx_gmx#_rx#_udd_skp 5063215976Sjmallett * 5064215976Sjmallett * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data 5065215976Sjmallett * 5066215976Sjmallett * 5067215976Sjmallett * Notes: 5068215976Sjmallett * (1) The skip bytes are part of the packet and will be sent down the NCB 5069215976Sjmallett * packet interface and will be handled by PKI. 5070215976Sjmallett * 5071215976Sjmallett * (2) The system can determine if the UDD bytes are included in the FCS check 5072215976Sjmallett * by using the FCSSEL field - if the FCS check is enabled. 5073215976Sjmallett * 5074215976Sjmallett * (3) Assume that the preamble/sfd is always at the start of the frame - even 5075215976Sjmallett * before UDD bytes. In most cases, there will be no preamble in these 5076215976Sjmallett * cases since it will be packet interface in direct communication to 5077215976Sjmallett * another packet interface (MAC to MAC) without a PHY involved. 5078215976Sjmallett * 5079215976Sjmallett * (4) We can still do address filtering and control packet filtering is the 5080215976Sjmallett * user desires. 5081215976Sjmallett * 5082215976Sjmallett * (5) UDD_SKP must be 0 in half-duplex operation unless 5083215976Sjmallett * GMX_RX_FRM_CTL[PRE_CHK] is clear. If GMX_RX_FRM_CTL[PRE_CHK] is clear, 5084215976Sjmallett * then UDD_SKP will normally be 8. 5085215976Sjmallett * 5086215976Sjmallett * (6) In all cases, the UDD bytes will be sent down the packet interface as 5087215976Sjmallett * part of the packet. The UDD bytes are never stripped from the actual 5088215976Sjmallett * packet. 5089215976Sjmallett * 5090215976Sjmallett * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero 5091215976Sjmallett */ 5092215976Sjmallettunion cvmx_gmxx_rxx_udd_skp 5093215976Sjmallett{ 5094215976Sjmallett uint64_t u64; 5095215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s 5096215976Sjmallett { 5097215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5098215976Sjmallett uint64_t reserved_9_63 : 55; 5099215976Sjmallett uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation 5100215976Sjmallett 0 = all skip bytes are included in FCS 5101215976Sjmallett 1 = the skip bytes are not included in FCS 5102215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, FCSSEL must 5103215976Sjmallett be zero. */ 5104215976Sjmallett uint64_t reserved_7_7 : 1; 5105215976Sjmallett uint64_t len : 7; /**< Amount of User-defined data before the start of 5106215976Sjmallett the L2 data. Zero means L2 comes first. 5107215976Sjmallett Max value is 64. 5108215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, LEN must be 5109215976Sjmallett set to 12 or 16 (depending on HiGig header size) 5110215976Sjmallett to account for the HiGig header. LEN=12 selects 5111215976Sjmallett HiGig/HiGig+, and LEN=16 selects HiGig2. */ 5112215976Sjmallett#else 5113215976Sjmallett uint64_t len : 7; 5114215976Sjmallett uint64_t reserved_7_7 : 1; 5115215976Sjmallett uint64_t fcssel : 1; 5116215976Sjmallett uint64_t reserved_9_63 : 55; 5117215976Sjmallett#endif 5118215976Sjmallett } s; 5119215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn30xx; 5120215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn31xx; 5121215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn38xx; 5122215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2; 5123215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn50xx; 5124215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn52xx; 5125215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1; 5126215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn56xx; 5127215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1; 5128215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn58xx; 5129215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1; 5130215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn63xx; 5131215976Sjmallett struct cvmx_gmxx_rxx_udd_skp_s cn63xxp1; 5132215976Sjmallett}; 5133215976Sjmalletttypedef union cvmx_gmxx_rxx_udd_skp cvmx_gmxx_rxx_udd_skp_t; 5134215976Sjmallett 5135215976Sjmallett/** 5136215976Sjmallett * cvmx_gmx#_rx_bp_drop# 5137215976Sjmallett * 5138215976Sjmallett * GMX_RX_BP_DROP = FIFO mark for packet drop 5139215976Sjmallett * 5140215976Sjmallett * 5141215976Sjmallett * Notes: 5142215976Sjmallett * The actual watermark is dynamic with respect to the GMX_RX_PRTS 5143215976Sjmallett * register. The GMX_RX_PRTS controls the depth of the port's 5144215976Sjmallett * FIFO so as ports are added or removed, the drop point may change. 5145215976Sjmallett * 5146215976Sjmallett * In XAUI mode prt0 is used for checking. 5147215976Sjmallett */ 5148215976Sjmallettunion cvmx_gmxx_rx_bp_dropx 5149215976Sjmallett{ 5150215976Sjmallett uint64_t u64; 5151215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s 5152215976Sjmallett { 5153215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5154215976Sjmallett uint64_t reserved_6_63 : 58; 5155215976Sjmallett uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO. 5156215976Sjmallett When the FIFO exceeds this count, packets will 5157215976Sjmallett be dropped and not buffered. 5158215976Sjmallett MARK should typically be programmed to ports+1. 5159215976Sjmallett Failure to program correctly can lead to system 5160215976Sjmallett instability. */ 5161215976Sjmallett#else 5162215976Sjmallett uint64_t mark : 6; 5163215976Sjmallett uint64_t reserved_6_63 : 58; 5164215976Sjmallett#endif 5165215976Sjmallett } s; 5166215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn30xx; 5167215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn31xx; 5168215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn38xx; 5169215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2; 5170215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn50xx; 5171215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn52xx; 5172215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1; 5173215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn56xx; 5174215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1; 5175215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn58xx; 5176215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1; 5177215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn63xx; 5178215976Sjmallett struct cvmx_gmxx_rx_bp_dropx_s cn63xxp1; 5179215976Sjmallett}; 5180215976Sjmalletttypedef union cvmx_gmxx_rx_bp_dropx cvmx_gmxx_rx_bp_dropx_t; 5181215976Sjmallett 5182215976Sjmallett/** 5183215976Sjmallett * cvmx_gmx#_rx_bp_off# 5184215976Sjmallett * 5185215976Sjmallett * GMX_RX_BP_OFF = Lowater mark for packet drop 5186215976Sjmallett * 5187215976Sjmallett * 5188215976Sjmallett * Notes: 5189215976Sjmallett * In XAUI mode, prt0 is used for checking. 5190215976Sjmallett * 5191215976Sjmallett */ 5192215976Sjmallettunion cvmx_gmxx_rx_bp_offx 5193215976Sjmallett{ 5194215976Sjmallett uint64_t u64; 5195215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s 5196215976Sjmallett { 5197215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5198215976Sjmallett uint64_t reserved_6_63 : 58; 5199215976Sjmallett uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */ 5200215976Sjmallett#else 5201215976Sjmallett uint64_t mark : 6; 5202215976Sjmallett uint64_t reserved_6_63 : 58; 5203215976Sjmallett#endif 5204215976Sjmallett } s; 5205215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn30xx; 5206215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn31xx; 5207215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn38xx; 5208215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn38xxp2; 5209215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn50xx; 5210215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn52xx; 5211215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn52xxp1; 5212215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn56xx; 5213215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn56xxp1; 5214215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn58xx; 5215215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn58xxp1; 5216215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn63xx; 5217215976Sjmallett struct cvmx_gmxx_rx_bp_offx_s cn63xxp1; 5218215976Sjmallett}; 5219215976Sjmalletttypedef union cvmx_gmxx_rx_bp_offx cvmx_gmxx_rx_bp_offx_t; 5220215976Sjmallett 5221215976Sjmallett/** 5222215976Sjmallett * cvmx_gmx#_rx_bp_on# 5223215976Sjmallett * 5224215976Sjmallett * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure 5225215976Sjmallett * 5226215976Sjmallett * 5227215976Sjmallett * Notes: 5228215976Sjmallett * In XAUI mode, prt0 is used for checking. 5229215976Sjmallett * 5230215976Sjmallett */ 5231215976Sjmallettunion cvmx_gmxx_rx_bp_onx 5232215976Sjmallett{ 5233215976Sjmallett uint64_t u64; 5234215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s 5235215976Sjmallett { 5236215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5237215976Sjmallett uint64_t reserved_9_63 : 55; 5238215976Sjmallett uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. 5239215976Sjmallett Each register is for an individual port. In XAUI 5240215976Sjmallett mode, prt0 is used for the unified RX FIFO 5241215976Sjmallett GMX_RX_BP_ON must satisfy 5242215976Sjmallett BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP) 5243215976Sjmallett A value of zero will immediately assert back 5244215976Sjmallett pressure. */ 5245215976Sjmallett#else 5246215976Sjmallett uint64_t mark : 9; 5247215976Sjmallett uint64_t reserved_9_63 : 55; 5248215976Sjmallett#endif 5249215976Sjmallett } s; 5250215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn30xx; 5251215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn31xx; 5252215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn38xx; 5253215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn38xxp2; 5254215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn50xx; 5255215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn52xx; 5256215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn52xxp1; 5257215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn56xx; 5258215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn56xxp1; 5259215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn58xx; 5260215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn58xxp1; 5261215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn63xx; 5262215976Sjmallett struct cvmx_gmxx_rx_bp_onx_s cn63xxp1; 5263215976Sjmallett}; 5264215976Sjmalletttypedef union cvmx_gmxx_rx_bp_onx cvmx_gmxx_rx_bp_onx_t; 5265215976Sjmallett 5266215976Sjmallett/** 5267215976Sjmallett * cvmx_gmx#_rx_hg2_status 5268215976Sjmallett * 5269215976Sjmallett * ** HG2 message CSRs 5270215976Sjmallett * 5271215976Sjmallett */ 5272215976Sjmallettunion cvmx_gmxx_rx_hg2_status 5273215976Sjmallett{ 5274215976Sjmallett uint64_t u64; 5275215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s 5276215976Sjmallett { 5277215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5278215976Sjmallett uint64_t reserved_48_63 : 16; 5279215976Sjmallett uint64_t phtim2go : 16; /**< Physical time to go for removal of physical link 5280215976Sjmallett pause. Initial value from received HiGig2 msg pkt 5281215976Sjmallett Non-zero only when physical back pressure active */ 5282215976Sjmallett uint64_t xof : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt 5283215976Sjmallett or from CBFC packets. 5284215976Sjmallett Non-zero only when logical back pressure is active 5285215976Sjmallett All bits will be 0 when LGTIM2GO=0 */ 5286215976Sjmallett uint64_t lgtim2go : 16; /**< Logical packet flow back pressure time remaining 5287215976Sjmallett Initial value set from xof time field of HiGig2 5288215976Sjmallett message packet received or a function of the 5289215976Sjmallett enabled and current timers for CBFC packets. 5290215976Sjmallett Non-zero only when logical back pressure is active */ 5291215976Sjmallett#else 5292215976Sjmallett uint64_t lgtim2go : 16; 5293215976Sjmallett uint64_t xof : 16; 5294215976Sjmallett uint64_t phtim2go : 16; 5295215976Sjmallett uint64_t reserved_48_63 : 16; 5296215976Sjmallett#endif 5297215976Sjmallett } s; 5298215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s cn52xx; 5299215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s cn52xxp1; 5300215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s cn56xx; 5301215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s cn63xx; 5302215976Sjmallett struct cvmx_gmxx_rx_hg2_status_s cn63xxp1; 5303215976Sjmallett}; 5304215976Sjmalletttypedef union cvmx_gmxx_rx_hg2_status cvmx_gmxx_rx_hg2_status_t; 5305215976Sjmallett 5306215976Sjmallett/** 5307215976Sjmallett * cvmx_gmx#_rx_pass_en 5308215976Sjmallett * 5309215976Sjmallett * GMX_RX_PASS_EN = Packet pass through mode enable 5310215976Sjmallett * 5311215976Sjmallett * When both Octane ports are running in Spi4 mode, packets can be directly 5312215976Sjmallett * passed from one SPX interface to the other without being processed by the 5313215976Sjmallett * core or PP's. The register has one bit for each port to enable the pass 5314215976Sjmallett * through feature. 5315215976Sjmallett * 5316215976Sjmallett * Notes: 5317215976Sjmallett * (1) Can only be used in dual Spi4 configs 5318215976Sjmallett * 5319215976Sjmallett * (2) The mapped pass through output port cannot be the destination port for 5320215976Sjmallett * any Octane core traffic. 5321215976Sjmallett */ 5322215976Sjmallettunion cvmx_gmxx_rx_pass_en 5323215976Sjmallett{ 5324215976Sjmallett uint64_t u64; 5325215976Sjmallett struct cvmx_gmxx_rx_pass_en_s 5326215976Sjmallett { 5327215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5328215976Sjmallett uint64_t reserved_16_63 : 48; 5329215976Sjmallett uint64_t en : 16; /**< Which ports to configure in pass through mode */ 5330215976Sjmallett#else 5331215976Sjmallett uint64_t en : 16; 5332215976Sjmallett uint64_t reserved_16_63 : 48; 5333215976Sjmallett#endif 5334215976Sjmallett } s; 5335215976Sjmallett struct cvmx_gmxx_rx_pass_en_s cn38xx; 5336215976Sjmallett struct cvmx_gmxx_rx_pass_en_s cn38xxp2; 5337215976Sjmallett struct cvmx_gmxx_rx_pass_en_s cn58xx; 5338215976Sjmallett struct cvmx_gmxx_rx_pass_en_s cn58xxp1; 5339215976Sjmallett}; 5340215976Sjmalletttypedef union cvmx_gmxx_rx_pass_en cvmx_gmxx_rx_pass_en_t; 5341215976Sjmallett 5342215976Sjmallett/** 5343215976Sjmallett * cvmx_gmx#_rx_pass_map# 5344215976Sjmallett * 5345215976Sjmallett * GMX_RX_PASS_MAP = Packet pass through port map 5346215976Sjmallett * 5347215976Sjmallett */ 5348215976Sjmallettunion cvmx_gmxx_rx_pass_mapx 5349215976Sjmallett{ 5350215976Sjmallett uint64_t u64; 5351215976Sjmallett struct cvmx_gmxx_rx_pass_mapx_s 5352215976Sjmallett { 5353215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5354215976Sjmallett uint64_t reserved_4_63 : 60; 5355215976Sjmallett uint64_t dprt : 4; /**< Destination port to map Spi pass through traffic */ 5356215976Sjmallett#else 5357215976Sjmallett uint64_t dprt : 4; 5358215976Sjmallett uint64_t reserved_4_63 : 60; 5359215976Sjmallett#endif 5360215976Sjmallett } s; 5361215976Sjmallett struct cvmx_gmxx_rx_pass_mapx_s cn38xx; 5362215976Sjmallett struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2; 5363215976Sjmallett struct cvmx_gmxx_rx_pass_mapx_s cn58xx; 5364215976Sjmallett struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1; 5365215976Sjmallett}; 5366215976Sjmalletttypedef union cvmx_gmxx_rx_pass_mapx cvmx_gmxx_rx_pass_mapx_t; 5367215976Sjmallett 5368215976Sjmallett/** 5369215976Sjmallett * cvmx_gmx#_rx_prt_info 5370215976Sjmallett * 5371215976Sjmallett * GMX_RX_PRT_INFO = Report the RX status for port 5372215976Sjmallett * 5373215976Sjmallett * 5374215976Sjmallett * Notes: 5375215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used. 5376215976Sjmallett * 5377215976Sjmallett */ 5378215976Sjmallettunion cvmx_gmxx_rx_prt_info 5379215976Sjmallett{ 5380215976Sjmallett uint64_t u64; 5381215976Sjmallett struct cvmx_gmxx_rx_prt_info_s 5382215976Sjmallett { 5383215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5384215976Sjmallett uint64_t reserved_32_63 : 32; 5385215976Sjmallett uint64_t drop : 16; /**< Per port indication that data was dropped */ 5386215976Sjmallett uint64_t commit : 16; /**< Per port indication that SOP was accepted */ 5387215976Sjmallett#else 5388215976Sjmallett uint64_t commit : 16; 5389215976Sjmallett uint64_t drop : 16; 5390215976Sjmallett uint64_t reserved_32_63 : 32; 5391215976Sjmallett#endif 5392215976Sjmallett } s; 5393215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn30xx 5394215976Sjmallett { 5395215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5396215976Sjmallett uint64_t reserved_19_63 : 45; 5397215976Sjmallett uint64_t drop : 3; /**< Per port indication that data was dropped */ 5398215976Sjmallett uint64_t reserved_3_15 : 13; 5399215976Sjmallett uint64_t commit : 3; /**< Per port indication that SOP was accepted */ 5400215976Sjmallett#else 5401215976Sjmallett uint64_t commit : 3; 5402215976Sjmallett uint64_t reserved_3_15 : 13; 5403215976Sjmallett uint64_t drop : 3; 5404215976Sjmallett uint64_t reserved_19_63 : 45; 5405215976Sjmallett#endif 5406215976Sjmallett } cn30xx; 5407215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx; 5408215976Sjmallett struct cvmx_gmxx_rx_prt_info_s cn38xx; 5409215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx; 5410215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx 5411215976Sjmallett { 5412215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5413215976Sjmallett uint64_t reserved_20_63 : 44; 5414215976Sjmallett uint64_t drop : 4; /**< Per port indication that data was dropped */ 5415215976Sjmallett uint64_t reserved_4_15 : 12; 5416215976Sjmallett uint64_t commit : 4; /**< Per port indication that SOP was accepted */ 5417215976Sjmallett#else 5418215976Sjmallett uint64_t commit : 4; 5419215976Sjmallett uint64_t reserved_4_15 : 12; 5420215976Sjmallett uint64_t drop : 4; 5421215976Sjmallett uint64_t reserved_20_63 : 44; 5422215976Sjmallett#endif 5423215976Sjmallett } cn52xx; 5424215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1; 5425215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx; 5426215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1; 5427215976Sjmallett struct cvmx_gmxx_rx_prt_info_s cn58xx; 5428215976Sjmallett struct cvmx_gmxx_rx_prt_info_s cn58xxp1; 5429215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx cn63xx; 5430215976Sjmallett struct cvmx_gmxx_rx_prt_info_cn52xx cn63xxp1; 5431215976Sjmallett}; 5432215976Sjmalletttypedef union cvmx_gmxx_rx_prt_info cvmx_gmxx_rx_prt_info_t; 5433215976Sjmallett 5434215976Sjmallett/** 5435215976Sjmallett * cvmx_gmx#_rx_prts 5436215976Sjmallett * 5437215976Sjmallett * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into 5438215976Sjmallett * 5439215976Sjmallett * 5440215976Sjmallett * Notes: 5441215976Sjmallett * GMX_RX_PRTS[PRTS] must be set to '1' in XAUI mode. 5442215976Sjmallett * 5443215976Sjmallett */ 5444215976Sjmallettunion cvmx_gmxx_rx_prts 5445215976Sjmallett{ 5446215976Sjmallett uint64_t u64; 5447215976Sjmallett struct cvmx_gmxx_rx_prts_s 5448215976Sjmallett { 5449215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5450215976Sjmallett uint64_t reserved_3_63 : 61; 5451215976Sjmallett uint64_t prts : 3; /**< In SGMII/1000Base-X mode, the RX buffer can be 5452215976Sjmallett carved into several logical buffers depending on 5453215976Sjmallett the number or implemented ports. 5454215976Sjmallett 0 or 1 port = 512ticks / 4096bytes 5455215976Sjmallett 2 ports = 256ticks / 2048bytes 5456215976Sjmallett 3 or 4 ports = 128ticks / 1024bytes */ 5457215976Sjmallett#else 5458215976Sjmallett uint64_t prts : 3; 5459215976Sjmallett uint64_t reserved_3_63 : 61; 5460215976Sjmallett#endif 5461215976Sjmallett } s; 5462215976Sjmallett struct cvmx_gmxx_rx_prts_s cn30xx; 5463215976Sjmallett struct cvmx_gmxx_rx_prts_s cn31xx; 5464215976Sjmallett struct cvmx_gmxx_rx_prts_s cn38xx; 5465215976Sjmallett struct cvmx_gmxx_rx_prts_s cn38xxp2; 5466215976Sjmallett struct cvmx_gmxx_rx_prts_s cn50xx; 5467215976Sjmallett struct cvmx_gmxx_rx_prts_s cn52xx; 5468215976Sjmallett struct cvmx_gmxx_rx_prts_s cn52xxp1; 5469215976Sjmallett struct cvmx_gmxx_rx_prts_s cn56xx; 5470215976Sjmallett struct cvmx_gmxx_rx_prts_s cn56xxp1; 5471215976Sjmallett struct cvmx_gmxx_rx_prts_s cn58xx; 5472215976Sjmallett struct cvmx_gmxx_rx_prts_s cn58xxp1; 5473215976Sjmallett struct cvmx_gmxx_rx_prts_s cn63xx; 5474215976Sjmallett struct cvmx_gmxx_rx_prts_s cn63xxp1; 5475215976Sjmallett}; 5476215976Sjmalletttypedef union cvmx_gmxx_rx_prts cvmx_gmxx_rx_prts_t; 5477215976Sjmallett 5478215976Sjmallett/** 5479215976Sjmallett * cvmx_gmx#_rx_tx_status 5480215976Sjmallett * 5481215976Sjmallett * GMX_RX_TX_STATUS = GMX RX/TX Status 5482215976Sjmallett * 5483215976Sjmallett */ 5484215976Sjmallettunion cvmx_gmxx_rx_tx_status 5485215976Sjmallett{ 5486215976Sjmallett uint64_t u64; 5487215976Sjmallett struct cvmx_gmxx_rx_tx_status_s 5488215976Sjmallett { 5489215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5490215976Sjmallett uint64_t reserved_7_63 : 57; 5491215976Sjmallett uint64_t tx : 3; /**< Transmit data since last read */ 5492215976Sjmallett uint64_t reserved_3_3 : 1; 5493215976Sjmallett uint64_t rx : 3; /**< Receive data since last read */ 5494215976Sjmallett#else 5495215976Sjmallett uint64_t rx : 3; 5496215976Sjmallett uint64_t reserved_3_3 : 1; 5497215976Sjmallett uint64_t tx : 3; 5498215976Sjmallett uint64_t reserved_7_63 : 57; 5499215976Sjmallett#endif 5500215976Sjmallett } s; 5501215976Sjmallett struct cvmx_gmxx_rx_tx_status_s cn30xx; 5502215976Sjmallett struct cvmx_gmxx_rx_tx_status_s cn31xx; 5503215976Sjmallett struct cvmx_gmxx_rx_tx_status_s cn50xx; 5504215976Sjmallett}; 5505215976Sjmalletttypedef union cvmx_gmxx_rx_tx_status cvmx_gmxx_rx_tx_status_t; 5506215976Sjmallett 5507215976Sjmallett/** 5508215976Sjmallett * cvmx_gmx#_rx_xaui_bad_col 5509215976Sjmallett */ 5510215976Sjmallettunion cvmx_gmxx_rx_xaui_bad_col 5511215976Sjmallett{ 5512215976Sjmallett uint64_t u64; 5513215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s 5514215976Sjmallett { 5515215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5516215976Sjmallett uint64_t reserved_40_63 : 24; 5517215976Sjmallett uint64_t val : 1; /**< Set when GMX_RX_INT_REG[PCTERR] is set. 5518215976Sjmallett (XAUI mode only) */ 5519215976Sjmallett uint64_t state : 3; /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will 5520215976Sjmallett conatin the receive state at the time of the 5521215976Sjmallett error. 5522215976Sjmallett (XAUI mode only) */ 5523215976Sjmallett uint64_t lane_rxc : 4; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will 5524215976Sjmallett conatin the XAUI column at the time of the error. 5525215976Sjmallett (XAUI mode only) */ 5526215976Sjmallett uint64_t lane_rxd : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will 5527215976Sjmallett conatin the XAUI column at the time of the error. 5528215976Sjmallett (XAUI mode only) */ 5529215976Sjmallett#else 5530215976Sjmallett uint64_t lane_rxd : 32; 5531215976Sjmallett uint64_t lane_rxc : 4; 5532215976Sjmallett uint64_t state : 3; 5533215976Sjmallett uint64_t val : 1; 5534215976Sjmallett uint64_t reserved_40_63 : 24; 5535215976Sjmallett#endif 5536215976Sjmallett } s; 5537215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx; 5538215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1; 5539215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx; 5540215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1; 5541215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn63xx; 5542215976Sjmallett struct cvmx_gmxx_rx_xaui_bad_col_s cn63xxp1; 5543215976Sjmallett}; 5544215976Sjmalletttypedef union cvmx_gmxx_rx_xaui_bad_col cvmx_gmxx_rx_xaui_bad_col_t; 5545215976Sjmallett 5546215976Sjmallett/** 5547215976Sjmallett * cvmx_gmx#_rx_xaui_ctl 5548215976Sjmallett */ 5549215976Sjmallettunion cvmx_gmxx_rx_xaui_ctl 5550215976Sjmallett{ 5551215976Sjmallett uint64_t u64; 5552215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s 5553215976Sjmallett { 5554215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5555215976Sjmallett uint64_t reserved_2_63 : 62; 5556215976Sjmallett uint64_t status : 2; /**< Link Status 5557215976Sjmallett 0=Link OK 5558215976Sjmallett 1=Local Fault 5559215976Sjmallett 2=Remote Fault 5560215976Sjmallett 3=Reserved 5561215976Sjmallett (XAUI mode only) */ 5562215976Sjmallett#else 5563215976Sjmallett uint64_t status : 2; 5564215976Sjmallett uint64_t reserved_2_63 : 62; 5565215976Sjmallett#endif 5566215976Sjmallett } s; 5567215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn52xx; 5568215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1; 5569215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn56xx; 5570215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1; 5571215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn63xx; 5572215976Sjmallett struct cvmx_gmxx_rx_xaui_ctl_s cn63xxp1; 5573215976Sjmallett}; 5574215976Sjmalletttypedef union cvmx_gmxx_rx_xaui_ctl cvmx_gmxx_rx_xaui_ctl_t; 5575215976Sjmallett 5576215976Sjmallett/** 5577215976Sjmallett * cvmx_gmx#_smac# 5578215976Sjmallett * 5579215976Sjmallett * GMX_SMAC = Packet SMAC 5580215976Sjmallett * 5581215976Sjmallett */ 5582215976Sjmallettunion cvmx_gmxx_smacx 5583215976Sjmallett{ 5584215976Sjmallett uint64_t u64; 5585215976Sjmallett struct cvmx_gmxx_smacx_s 5586215976Sjmallett { 5587215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5588215976Sjmallett uint64_t reserved_48_63 : 16; 5589215976Sjmallett uint64_t smac : 48; /**< The SMAC field is used for generating and 5590215976Sjmallett accepting Control Pause packets */ 5591215976Sjmallett#else 5592215976Sjmallett uint64_t smac : 48; 5593215976Sjmallett uint64_t reserved_48_63 : 16; 5594215976Sjmallett#endif 5595215976Sjmallett } s; 5596215976Sjmallett struct cvmx_gmxx_smacx_s cn30xx; 5597215976Sjmallett struct cvmx_gmxx_smacx_s cn31xx; 5598215976Sjmallett struct cvmx_gmxx_smacx_s cn38xx; 5599215976Sjmallett struct cvmx_gmxx_smacx_s cn38xxp2; 5600215976Sjmallett struct cvmx_gmxx_smacx_s cn50xx; 5601215976Sjmallett struct cvmx_gmxx_smacx_s cn52xx; 5602215976Sjmallett struct cvmx_gmxx_smacx_s cn52xxp1; 5603215976Sjmallett struct cvmx_gmxx_smacx_s cn56xx; 5604215976Sjmallett struct cvmx_gmxx_smacx_s cn56xxp1; 5605215976Sjmallett struct cvmx_gmxx_smacx_s cn58xx; 5606215976Sjmallett struct cvmx_gmxx_smacx_s cn58xxp1; 5607215976Sjmallett struct cvmx_gmxx_smacx_s cn63xx; 5608215976Sjmallett struct cvmx_gmxx_smacx_s cn63xxp1; 5609215976Sjmallett}; 5610215976Sjmalletttypedef union cvmx_gmxx_smacx cvmx_gmxx_smacx_t; 5611215976Sjmallett 5612215976Sjmallett/** 5613215976Sjmallett * cvmx_gmx#_soft_bist 5614215976Sjmallett * 5615215976Sjmallett * GMX_SOFT_BIST = Software BIST Control 5616215976Sjmallett * 5617215976Sjmallett */ 5618215976Sjmallettunion cvmx_gmxx_soft_bist 5619215976Sjmallett{ 5620215976Sjmallett uint64_t u64; 5621215976Sjmallett struct cvmx_gmxx_soft_bist_s 5622215976Sjmallett { 5623215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5624215976Sjmallett uint64_t reserved_2_63 : 62; 5625215976Sjmallett uint64_t start_bist : 1; /**< Run BIST on all memories in the XAUI CLK domain */ 5626215976Sjmallett uint64_t clear_bist : 1; /**< Choose between full BIST and CLEAR bist 5627215976Sjmallett 0=Run full BIST 5628215976Sjmallett 1=Only run clear BIST */ 5629215976Sjmallett#else 5630215976Sjmallett uint64_t clear_bist : 1; 5631215976Sjmallett uint64_t start_bist : 1; 5632215976Sjmallett uint64_t reserved_2_63 : 62; 5633215976Sjmallett#endif 5634215976Sjmallett } s; 5635215976Sjmallett struct cvmx_gmxx_soft_bist_s cn63xx; 5636215976Sjmallett struct cvmx_gmxx_soft_bist_s cn63xxp1; 5637215976Sjmallett}; 5638215976Sjmalletttypedef union cvmx_gmxx_soft_bist cvmx_gmxx_soft_bist_t; 5639215976Sjmallett 5640215976Sjmallett/** 5641215976Sjmallett * cvmx_gmx#_stat_bp 5642215976Sjmallett * 5643215976Sjmallett * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation 5644215976Sjmallett * 5645215976Sjmallett */ 5646215976Sjmallettunion cvmx_gmxx_stat_bp 5647215976Sjmallett{ 5648215976Sjmallett uint64_t u64; 5649215976Sjmallett struct cvmx_gmxx_stat_bp_s 5650215976Sjmallett { 5651215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5652215976Sjmallett uint64_t reserved_17_63 : 47; 5653215976Sjmallett uint64_t bp : 1; /**< Current BP state */ 5654215976Sjmallett uint64_t cnt : 16; /**< Number of cycles that BP has been asserted 5655215976Sjmallett Saturating counter */ 5656215976Sjmallett#else 5657215976Sjmallett uint64_t cnt : 16; 5658215976Sjmallett uint64_t bp : 1; 5659215976Sjmallett uint64_t reserved_17_63 : 47; 5660215976Sjmallett#endif 5661215976Sjmallett } s; 5662215976Sjmallett struct cvmx_gmxx_stat_bp_s cn30xx; 5663215976Sjmallett struct cvmx_gmxx_stat_bp_s cn31xx; 5664215976Sjmallett struct cvmx_gmxx_stat_bp_s cn38xx; 5665215976Sjmallett struct cvmx_gmxx_stat_bp_s cn38xxp2; 5666215976Sjmallett struct cvmx_gmxx_stat_bp_s cn50xx; 5667215976Sjmallett struct cvmx_gmxx_stat_bp_s cn52xx; 5668215976Sjmallett struct cvmx_gmxx_stat_bp_s cn52xxp1; 5669215976Sjmallett struct cvmx_gmxx_stat_bp_s cn56xx; 5670215976Sjmallett struct cvmx_gmxx_stat_bp_s cn56xxp1; 5671215976Sjmallett struct cvmx_gmxx_stat_bp_s cn58xx; 5672215976Sjmallett struct cvmx_gmxx_stat_bp_s cn58xxp1; 5673215976Sjmallett struct cvmx_gmxx_stat_bp_s cn63xx; 5674215976Sjmallett struct cvmx_gmxx_stat_bp_s cn63xxp1; 5675215976Sjmallett}; 5676215976Sjmalletttypedef union cvmx_gmxx_stat_bp cvmx_gmxx_stat_bp_t; 5677215976Sjmallett 5678215976Sjmallett/** 5679215976Sjmallett * cvmx_gmx#_tx#_append 5680215976Sjmallett * 5681215976Sjmallett * GMX_TX_APPEND = Packet TX Append Control 5682215976Sjmallett * 5683215976Sjmallett */ 5684215976Sjmallettunion cvmx_gmxx_txx_append 5685215976Sjmallett{ 5686215976Sjmallett uint64_t u64; 5687215976Sjmallett struct cvmx_gmxx_txx_append_s 5688215976Sjmallett { 5689215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5690215976Sjmallett uint64_t reserved_4_63 : 60; 5691215976Sjmallett uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet 5692215976Sjmallett when FCS is clear. Pause packets are normally 5693215976Sjmallett padded to 60 bytes. If GMX_TX_MIN_PKT[MIN_SIZE] 5694215976Sjmallett exceeds 59, then FORCE_FCS will not be used. */ 5695215976Sjmallett uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */ 5696215976Sjmallett uint64_t pad : 1; /**< Append PAD bytes such that min sized */ 5697215976Sjmallett uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer 5698215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, PREAMBLE 5699215976Sjmallett must be zero. */ 5700215976Sjmallett#else 5701215976Sjmallett uint64_t preamble : 1; 5702215976Sjmallett uint64_t pad : 1; 5703215976Sjmallett uint64_t fcs : 1; 5704215976Sjmallett uint64_t force_fcs : 1; 5705215976Sjmallett uint64_t reserved_4_63 : 60; 5706215976Sjmallett#endif 5707215976Sjmallett } s; 5708215976Sjmallett struct cvmx_gmxx_txx_append_s cn30xx; 5709215976Sjmallett struct cvmx_gmxx_txx_append_s cn31xx; 5710215976Sjmallett struct cvmx_gmxx_txx_append_s cn38xx; 5711215976Sjmallett struct cvmx_gmxx_txx_append_s cn38xxp2; 5712215976Sjmallett struct cvmx_gmxx_txx_append_s cn50xx; 5713215976Sjmallett struct cvmx_gmxx_txx_append_s cn52xx; 5714215976Sjmallett struct cvmx_gmxx_txx_append_s cn52xxp1; 5715215976Sjmallett struct cvmx_gmxx_txx_append_s cn56xx; 5716215976Sjmallett struct cvmx_gmxx_txx_append_s cn56xxp1; 5717215976Sjmallett struct cvmx_gmxx_txx_append_s cn58xx; 5718215976Sjmallett struct cvmx_gmxx_txx_append_s cn58xxp1; 5719215976Sjmallett struct cvmx_gmxx_txx_append_s cn63xx; 5720215976Sjmallett struct cvmx_gmxx_txx_append_s cn63xxp1; 5721215976Sjmallett}; 5722215976Sjmalletttypedef union cvmx_gmxx_txx_append cvmx_gmxx_txx_append_t; 5723215976Sjmallett 5724215976Sjmallett/** 5725215976Sjmallett * cvmx_gmx#_tx#_burst 5726215976Sjmallett * 5727215976Sjmallett * GMX_TX_BURST = Packet TX Burst Counter 5728215976Sjmallett * 5729215976Sjmallett */ 5730215976Sjmallettunion cvmx_gmxx_txx_burst 5731215976Sjmallett{ 5732215976Sjmallett uint64_t u64; 5733215976Sjmallett struct cvmx_gmxx_txx_burst_s 5734215976Sjmallett { 5735215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5736215976Sjmallett uint64_t reserved_16_63 : 48; 5737215976Sjmallett uint64_t burst : 16; /**< Burst (refer to 802.3 to set correctly) 5738215976Sjmallett Only valid for 1000Mbs half-duplex operation 5739215976Sjmallett halfdup / 1000Mbs: 0x2000 5740215976Sjmallett all other modes: 0x0 5741215976Sjmallett (SGMII/1000Base-X only) */ 5742215976Sjmallett#else 5743215976Sjmallett uint64_t burst : 16; 5744215976Sjmallett uint64_t reserved_16_63 : 48; 5745215976Sjmallett#endif 5746215976Sjmallett } s; 5747215976Sjmallett struct cvmx_gmxx_txx_burst_s cn30xx; 5748215976Sjmallett struct cvmx_gmxx_txx_burst_s cn31xx; 5749215976Sjmallett struct cvmx_gmxx_txx_burst_s cn38xx; 5750215976Sjmallett struct cvmx_gmxx_txx_burst_s cn38xxp2; 5751215976Sjmallett struct cvmx_gmxx_txx_burst_s cn50xx; 5752215976Sjmallett struct cvmx_gmxx_txx_burst_s cn52xx; 5753215976Sjmallett struct cvmx_gmxx_txx_burst_s cn52xxp1; 5754215976Sjmallett struct cvmx_gmxx_txx_burst_s cn56xx; 5755215976Sjmallett struct cvmx_gmxx_txx_burst_s cn56xxp1; 5756215976Sjmallett struct cvmx_gmxx_txx_burst_s cn58xx; 5757215976Sjmallett struct cvmx_gmxx_txx_burst_s cn58xxp1; 5758215976Sjmallett struct cvmx_gmxx_txx_burst_s cn63xx; 5759215976Sjmallett struct cvmx_gmxx_txx_burst_s cn63xxp1; 5760215976Sjmallett}; 5761215976Sjmalletttypedef union cvmx_gmxx_txx_burst cvmx_gmxx_txx_burst_t; 5762215976Sjmallett 5763215976Sjmallett/** 5764215976Sjmallett * cvmx_gmx#_tx#_cbfc_xoff 5765215976Sjmallett */ 5766215976Sjmallettunion cvmx_gmxx_txx_cbfc_xoff 5767215976Sjmallett{ 5768215976Sjmallett uint64_t u64; 5769215976Sjmallett struct cvmx_gmxx_txx_cbfc_xoff_s 5770215976Sjmallett { 5771215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5772215976Sjmallett uint64_t reserved_16_63 : 48; 5773215976Sjmallett uint64_t xoff : 16; /**< Which ports to backpressure 5774215976Sjmallett Do not write in HiGig2 mode i.e. when 5775215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 5776215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 5777215976Sjmallett#else 5778215976Sjmallett uint64_t xoff : 16; 5779215976Sjmallett uint64_t reserved_16_63 : 48; 5780215976Sjmallett#endif 5781215976Sjmallett } s; 5782215976Sjmallett struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx; 5783215976Sjmallett struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx; 5784215976Sjmallett struct cvmx_gmxx_txx_cbfc_xoff_s cn63xx; 5785215976Sjmallett struct cvmx_gmxx_txx_cbfc_xoff_s cn63xxp1; 5786215976Sjmallett}; 5787215976Sjmalletttypedef union cvmx_gmxx_txx_cbfc_xoff cvmx_gmxx_txx_cbfc_xoff_t; 5788215976Sjmallett 5789215976Sjmallett/** 5790215976Sjmallett * cvmx_gmx#_tx#_cbfc_xon 5791215976Sjmallett */ 5792215976Sjmallettunion cvmx_gmxx_txx_cbfc_xon 5793215976Sjmallett{ 5794215976Sjmallett uint64_t u64; 5795215976Sjmallett struct cvmx_gmxx_txx_cbfc_xon_s 5796215976Sjmallett { 5797215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5798215976Sjmallett uint64_t reserved_16_63 : 48; 5799215976Sjmallett uint64_t xon : 16; /**< Which ports to stop backpressure 5800215976Sjmallett Do not write in HiGig2 mode i.e. when 5801215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 5802215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 5803215976Sjmallett#else 5804215976Sjmallett uint64_t xon : 16; 5805215976Sjmallett uint64_t reserved_16_63 : 48; 5806215976Sjmallett#endif 5807215976Sjmallett } s; 5808215976Sjmallett struct cvmx_gmxx_txx_cbfc_xon_s cn52xx; 5809215976Sjmallett struct cvmx_gmxx_txx_cbfc_xon_s cn56xx; 5810215976Sjmallett struct cvmx_gmxx_txx_cbfc_xon_s cn63xx; 5811215976Sjmallett struct cvmx_gmxx_txx_cbfc_xon_s cn63xxp1; 5812215976Sjmallett}; 5813215976Sjmalletttypedef union cvmx_gmxx_txx_cbfc_xon cvmx_gmxx_txx_cbfc_xon_t; 5814215976Sjmallett 5815215976Sjmallett/** 5816215976Sjmallett * cvmx_gmx#_tx#_clk 5817215976Sjmallett * 5818215976Sjmallett * Per Port 5819215976Sjmallett * 5820215976Sjmallett * 5821215976Sjmallett * GMX_TX_CLK = RGMII TX Clock Generation Register 5822215976Sjmallett * 5823215976Sjmallett * Notes: 5824215976Sjmallett * Programming Restrictions: 5825215976Sjmallett * (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1. 5826215976Sjmallett * (2) In MII mode, CLK_CNT == 1 5827215976Sjmallett * (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock. 5828215976Sjmallett * 5829215976Sjmallett * RGMII Example: 5830215976Sjmallett * Given a 125MHz PLL reference clock... 5831215976Sjmallett * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1) 5832215976Sjmallett * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5) 5833215976Sjmallett * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50) 5834215976Sjmallett */ 5835215976Sjmallettunion cvmx_gmxx_txx_clk 5836215976Sjmallett{ 5837215976Sjmallett uint64_t u64; 5838215976Sjmallett struct cvmx_gmxx_txx_clk_s 5839215976Sjmallett { 5840215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5841215976Sjmallett uint64_t reserved_6_63 : 58; 5842215976Sjmallett uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency 5843215976Sjmallett When PLL is used, TXC(phase) = 5844215976Sjmallett spi4_tx_pll_ref_clk(period)/2*CLK_CNT 5845215976Sjmallett When PLL bypass is used, TXC(phase) = 5846215976Sjmallett spi4_tx_pll_ref_clk(period)*2*CLK_CNT 5847215976Sjmallett NOTE: CLK_CNT==0 will not generate any clock 5848215976Sjmallett if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */ 5849215976Sjmallett#else 5850215976Sjmallett uint64_t clk_cnt : 6; 5851215976Sjmallett uint64_t reserved_6_63 : 58; 5852215976Sjmallett#endif 5853215976Sjmallett } s; 5854215976Sjmallett struct cvmx_gmxx_txx_clk_s cn30xx; 5855215976Sjmallett struct cvmx_gmxx_txx_clk_s cn31xx; 5856215976Sjmallett struct cvmx_gmxx_txx_clk_s cn38xx; 5857215976Sjmallett struct cvmx_gmxx_txx_clk_s cn38xxp2; 5858215976Sjmallett struct cvmx_gmxx_txx_clk_s cn50xx; 5859215976Sjmallett struct cvmx_gmxx_txx_clk_s cn58xx; 5860215976Sjmallett struct cvmx_gmxx_txx_clk_s cn58xxp1; 5861215976Sjmallett}; 5862215976Sjmalletttypedef union cvmx_gmxx_txx_clk cvmx_gmxx_txx_clk_t; 5863215976Sjmallett 5864215976Sjmallett/** 5865215976Sjmallett * cvmx_gmx#_tx#_ctl 5866215976Sjmallett * 5867215976Sjmallett * GMX_TX_CTL = TX Control register 5868215976Sjmallett * 5869215976Sjmallett */ 5870215976Sjmallettunion cvmx_gmxx_txx_ctl 5871215976Sjmallett{ 5872215976Sjmallett uint64_t u64; 5873215976Sjmallett struct cvmx_gmxx_txx_ctl_s 5874215976Sjmallett { 5875215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5876215976Sjmallett uint64_t reserved_2_63 : 62; 5877215976Sjmallett uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats 5878215976Sjmallett and interrupts 5879215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 5880215976Sjmallett uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats 5881215976Sjmallett and interrupts 5882215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 5883215976Sjmallett#else 5884215976Sjmallett uint64_t xscol_en : 1; 5885215976Sjmallett uint64_t xsdef_en : 1; 5886215976Sjmallett uint64_t reserved_2_63 : 62; 5887215976Sjmallett#endif 5888215976Sjmallett } s; 5889215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn30xx; 5890215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn31xx; 5891215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn38xx; 5892215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn38xxp2; 5893215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn50xx; 5894215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn52xx; 5895215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn52xxp1; 5896215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn56xx; 5897215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn56xxp1; 5898215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn58xx; 5899215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn58xxp1; 5900215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn63xx; 5901215976Sjmallett struct cvmx_gmxx_txx_ctl_s cn63xxp1; 5902215976Sjmallett}; 5903215976Sjmalletttypedef union cvmx_gmxx_txx_ctl cvmx_gmxx_txx_ctl_t; 5904215976Sjmallett 5905215976Sjmallett/** 5906215976Sjmallett * cvmx_gmx#_tx#_min_pkt 5907215976Sjmallett * 5908215976Sjmallett * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size) 5909215976Sjmallett * 5910215976Sjmallett */ 5911215976Sjmallettunion cvmx_gmxx_txx_min_pkt 5912215976Sjmallett{ 5913215976Sjmallett uint64_t u64; 5914215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s 5915215976Sjmallett { 5916215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5917215976Sjmallett uint64_t reserved_8_63 : 56; 5918215976Sjmallett uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied 5919215976Sjmallett Padding is only appened when GMX_TX_APPEND[PAD] 5920215976Sjmallett for the coresponding port is set. 5921215976Sjmallett In SGMII mode, packets will be padded to 5922215976Sjmallett MIN_SIZE+1. The reset value will pad to 60 bytes. 5923215976Sjmallett In XAUI mode, packets will be padded to 5924215976Sjmallett MIN(252,(MIN_SIZE+1 & ~0x3)) 5925215976Sjmallett When GMX_TX_XAUI_CTL[HG_EN] is set, the HiGig 5926215976Sjmallett header (12B or 16B) is normally added to the 5927215976Sjmallett packet, so MIN_SIZE should be 59+12=71B for 5928215976Sjmallett HiGig or 59+16=75B for HiGig2. */ 5929215976Sjmallett#else 5930215976Sjmallett uint64_t min_size : 8; 5931215976Sjmallett uint64_t reserved_8_63 : 56; 5932215976Sjmallett#endif 5933215976Sjmallett } s; 5934215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn30xx; 5935215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn31xx; 5936215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn38xx; 5937215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn38xxp2; 5938215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn50xx; 5939215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn52xx; 5940215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn52xxp1; 5941215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn56xx; 5942215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn56xxp1; 5943215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn58xx; 5944215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn58xxp1; 5945215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn63xx; 5946215976Sjmallett struct cvmx_gmxx_txx_min_pkt_s cn63xxp1; 5947215976Sjmallett}; 5948215976Sjmalletttypedef union cvmx_gmxx_txx_min_pkt cvmx_gmxx_txx_min_pkt_t; 5949215976Sjmallett 5950215976Sjmallett/** 5951215976Sjmallett * cvmx_gmx#_tx#_pause_pkt_interval 5952215976Sjmallett * 5953215976Sjmallett * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent 5954215976Sjmallett * 5955215976Sjmallett * 5956215976Sjmallett * Notes: 5957215976Sjmallett * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and 5958215976Sjmallett * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system 5959215976Sjmallett * designer. It is suggested that TIME be much greater than INTERVAL and 5960215976Sjmallett * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE 5961215976Sjmallett * count and then when the backpressure condition is lifted, a PAUSE packet 5962215976Sjmallett * with TIME==0 will be sent indicating that Octane is ready for additional 5963215976Sjmallett * data. 5964215976Sjmallett * 5965215976Sjmallett * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is 5966215976Sjmallett * suggested that TIME and INTERVAL are programmed such that they satisify the 5967215976Sjmallett * following rule... 5968215976Sjmallett * 5969215976Sjmallett * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) 5970215976Sjmallett * 5971215976Sjmallett * where largest_pkt_size is that largest packet that the system can send 5972215976Sjmallett * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size 5973215976Sjmallett * of the PAUSE packet (normally 64B). 5974215976Sjmallett */ 5975215976Sjmallettunion cvmx_gmxx_txx_pause_pkt_interval 5976215976Sjmallett{ 5977215976Sjmallett uint64_t u64; 5978215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s 5979215976Sjmallett { 5980215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 5981215976Sjmallett uint64_t reserved_16_63 : 48; 5982215976Sjmallett uint64_t interval : 16; /**< Arbitrate for a 802.3 pause packet, HiGig2 message, 5983215976Sjmallett or CBFC pause packet every (INTERVAL*512) 5984215976Sjmallett bit-times. 5985215976Sjmallett Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME 5986215976Sjmallett INTERVAL=0, will only send a single PAUSE packet 5987215976Sjmallett for each backpressure event */ 5988215976Sjmallett#else 5989215976Sjmallett uint64_t interval : 16; 5990215976Sjmallett uint64_t reserved_16_63 : 48; 5991215976Sjmallett#endif 5992215976Sjmallett } s; 5993215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx; 5994215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx; 5995215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx; 5996215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2; 5997215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx; 5998215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx; 5999215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1; 6000215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx; 6001215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1; 6002215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx; 6003215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1; 6004215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx; 6005215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1; 6006215976Sjmallett}; 6007215976Sjmalletttypedef union cvmx_gmxx_txx_pause_pkt_interval cvmx_gmxx_txx_pause_pkt_interval_t; 6008215976Sjmallett 6009215976Sjmallett/** 6010215976Sjmallett * cvmx_gmx#_tx#_pause_pkt_time 6011215976Sjmallett * 6012215976Sjmallett * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field 6013215976Sjmallett * 6014215976Sjmallett * 6015215976Sjmallett * Notes: 6016215976Sjmallett * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and 6017215976Sjmallett * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system 6018215976Sjmallett * designer. It is suggested that TIME be much greater than INTERVAL and 6019215976Sjmallett * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE 6020215976Sjmallett * count and then when the backpressure condition is lifted, a PAUSE packet 6021215976Sjmallett * with TIME==0 will be sent indicating that Octane is ready for additional 6022215976Sjmallett * data. 6023215976Sjmallett * 6024215976Sjmallett * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is 6025215976Sjmallett * suggested that TIME and INTERVAL are programmed such that they satisify the 6026215976Sjmallett * following rule... 6027215976Sjmallett * 6028215976Sjmallett * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) 6029215976Sjmallett * 6030215976Sjmallett * where largest_pkt_size is that largest packet that the system can send 6031215976Sjmallett * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size 6032215976Sjmallett * of the PAUSE packet (normally 64B). 6033215976Sjmallett */ 6034215976Sjmallettunion cvmx_gmxx_txx_pause_pkt_time 6035215976Sjmallett{ 6036215976Sjmallett uint64_t u64; 6037215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s 6038215976Sjmallett { 6039215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6040215976Sjmallett uint64_t reserved_16_63 : 48; 6041215976Sjmallett uint64_t time : 16; /**< The pause_time field placed in outbnd 802.3 pause 6042215976Sjmallett packets, HiGig2 messages, or CBFC pause packets. 6043215976Sjmallett pause_time is in 512 bit-times 6044215976Sjmallett Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */ 6045215976Sjmallett#else 6046215976Sjmallett uint64_t time : 16; 6047215976Sjmallett uint64_t reserved_16_63 : 48; 6048215976Sjmallett#endif 6049215976Sjmallett } s; 6050215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx; 6051215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx; 6052215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx; 6053215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2; 6054215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx; 6055215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx; 6056215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1; 6057215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx; 6058215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1; 6059215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx; 6060215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1; 6061215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx; 6062215976Sjmallett struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1; 6063215976Sjmallett}; 6064215976Sjmalletttypedef union cvmx_gmxx_txx_pause_pkt_time cvmx_gmxx_txx_pause_pkt_time_t; 6065215976Sjmallett 6066215976Sjmallett/** 6067215976Sjmallett * cvmx_gmx#_tx#_pause_togo 6068215976Sjmallett * 6069215976Sjmallett * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure 6070215976Sjmallett * 6071215976Sjmallett */ 6072215976Sjmallettunion cvmx_gmxx_txx_pause_togo 6073215976Sjmallett{ 6074215976Sjmallett uint64_t u64; 6075215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s 6076215976Sjmallett { 6077215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6078215976Sjmallett uint64_t reserved_32_63 : 32; 6079215976Sjmallett uint64_t msg_time : 16; /**< Amount of time remaining to backpressure 6080215976Sjmallett From the higig2 physical message pause timer 6081215976Sjmallett (only valid on port0) */ 6082215976Sjmallett uint64_t time : 16; /**< Amount of time remaining to backpressure 6083215976Sjmallett From the standard 802.3 pause timer */ 6084215976Sjmallett#else 6085215976Sjmallett uint64_t time : 16; 6086215976Sjmallett uint64_t msg_time : 16; 6087215976Sjmallett uint64_t reserved_32_63 : 32; 6088215976Sjmallett#endif 6089215976Sjmallett } s; 6090215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx 6091215976Sjmallett { 6092215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6093215976Sjmallett uint64_t reserved_16_63 : 48; 6094215976Sjmallett uint64_t time : 16; /**< Amount of time remaining to backpressure */ 6095215976Sjmallett#else 6096215976Sjmallett uint64_t time : 16; 6097215976Sjmallett uint64_t reserved_16_63 : 48; 6098215976Sjmallett#endif 6099215976Sjmallett } cn30xx; 6100215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx; 6101215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx; 6102215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2; 6103215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx; 6104215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s cn52xx; 6105215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s cn52xxp1; 6106215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s cn56xx; 6107215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1; 6108215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx; 6109215976Sjmallett struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1; 6110215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s cn63xx; 6111215976Sjmallett struct cvmx_gmxx_txx_pause_togo_s cn63xxp1; 6112215976Sjmallett}; 6113215976Sjmalletttypedef union cvmx_gmxx_txx_pause_togo cvmx_gmxx_txx_pause_togo_t; 6114215976Sjmallett 6115215976Sjmallett/** 6116215976Sjmallett * cvmx_gmx#_tx#_pause_zero 6117215976Sjmallett * 6118215976Sjmallett * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure 6119215976Sjmallett * 6120215976Sjmallett */ 6121215976Sjmallettunion cvmx_gmxx_txx_pause_zero 6122215976Sjmallett{ 6123215976Sjmallett uint64_t u64; 6124215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s 6125215976Sjmallett { 6126215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6127215976Sjmallett uint64_t reserved_1_63 : 63; 6128215976Sjmallett uint64_t send : 1; /**< When backpressure condition clear, send PAUSE 6129215976Sjmallett packet with pause_time of zero to enable the 6130215976Sjmallett channel */ 6131215976Sjmallett#else 6132215976Sjmallett uint64_t send : 1; 6133215976Sjmallett uint64_t reserved_1_63 : 63; 6134215976Sjmallett#endif 6135215976Sjmallett } s; 6136215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn30xx; 6137215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn31xx; 6138215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn38xx; 6139215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn38xxp2; 6140215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn50xx; 6141215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn52xx; 6142215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn52xxp1; 6143215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn56xx; 6144215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn56xxp1; 6145215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn58xx; 6146215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn58xxp1; 6147215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn63xx; 6148215976Sjmallett struct cvmx_gmxx_txx_pause_zero_s cn63xxp1; 6149215976Sjmallett}; 6150215976Sjmalletttypedef union cvmx_gmxx_txx_pause_zero cvmx_gmxx_txx_pause_zero_t; 6151215976Sjmallett 6152215976Sjmallett/** 6153215976Sjmallett * cvmx_gmx#_tx#_sgmii_ctl 6154215976Sjmallett */ 6155215976Sjmallettunion cvmx_gmxx_txx_sgmii_ctl 6156215976Sjmallett{ 6157215976Sjmallett uint64_t u64; 6158215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s 6159215976Sjmallett { 6160215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6161215976Sjmallett uint64_t reserved_1_63 : 63; 6162215976Sjmallett uint64_t align : 1; /**< Align the transmission to even cycles 6163215976Sjmallett 0 = Data can be sent on any cycle 6164215976Sjmallett Possible to for the TX PCS machine to drop 6165215976Sjmallett first byte of preamble 6166215976Sjmallett 1 = Data will only be sent on even cycles 6167215976Sjmallett There will be no loss of data 6168215976Sjmallett (SGMII/1000Base-X only) */ 6169215976Sjmallett#else 6170215976Sjmallett uint64_t align : 1; 6171215976Sjmallett uint64_t reserved_1_63 : 63; 6172215976Sjmallett#endif 6173215976Sjmallett } s; 6174215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx; 6175215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1; 6176215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx; 6177215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1; 6178215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn63xx; 6179215976Sjmallett struct cvmx_gmxx_txx_sgmii_ctl_s cn63xxp1; 6180215976Sjmallett}; 6181215976Sjmalletttypedef union cvmx_gmxx_txx_sgmii_ctl cvmx_gmxx_txx_sgmii_ctl_t; 6182215976Sjmallett 6183215976Sjmallett/** 6184215976Sjmallett * cvmx_gmx#_tx#_slot 6185215976Sjmallett * 6186215976Sjmallett * GMX_TX_SLOT = Packet TX Slottime Counter 6187215976Sjmallett * 6188215976Sjmallett */ 6189215976Sjmallettunion cvmx_gmxx_txx_slot 6190215976Sjmallett{ 6191215976Sjmallett uint64_t u64; 6192215976Sjmallett struct cvmx_gmxx_txx_slot_s 6193215976Sjmallett { 6194215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6195215976Sjmallett uint64_t reserved_10_63 : 54; 6196215976Sjmallett uint64_t slot : 10; /**< Slottime (refer to 802.3 to set correctly) 6197215976Sjmallett 10/100Mbs: 0x40 6198215976Sjmallett 1000Mbs: 0x200 6199215976Sjmallett (SGMII/1000Base-X only) */ 6200215976Sjmallett#else 6201215976Sjmallett uint64_t slot : 10; 6202215976Sjmallett uint64_t reserved_10_63 : 54; 6203215976Sjmallett#endif 6204215976Sjmallett } s; 6205215976Sjmallett struct cvmx_gmxx_txx_slot_s cn30xx; 6206215976Sjmallett struct cvmx_gmxx_txx_slot_s cn31xx; 6207215976Sjmallett struct cvmx_gmxx_txx_slot_s cn38xx; 6208215976Sjmallett struct cvmx_gmxx_txx_slot_s cn38xxp2; 6209215976Sjmallett struct cvmx_gmxx_txx_slot_s cn50xx; 6210215976Sjmallett struct cvmx_gmxx_txx_slot_s cn52xx; 6211215976Sjmallett struct cvmx_gmxx_txx_slot_s cn52xxp1; 6212215976Sjmallett struct cvmx_gmxx_txx_slot_s cn56xx; 6213215976Sjmallett struct cvmx_gmxx_txx_slot_s cn56xxp1; 6214215976Sjmallett struct cvmx_gmxx_txx_slot_s cn58xx; 6215215976Sjmallett struct cvmx_gmxx_txx_slot_s cn58xxp1; 6216215976Sjmallett struct cvmx_gmxx_txx_slot_s cn63xx; 6217215976Sjmallett struct cvmx_gmxx_txx_slot_s cn63xxp1; 6218215976Sjmallett}; 6219215976Sjmalletttypedef union cvmx_gmxx_txx_slot cvmx_gmxx_txx_slot_t; 6220215976Sjmallett 6221215976Sjmallett/** 6222215976Sjmallett * cvmx_gmx#_tx#_soft_pause 6223215976Sjmallett * 6224215976Sjmallett * GMX_TX_SOFT_PAUSE = Packet TX Software Pause 6225215976Sjmallett * 6226215976Sjmallett */ 6227215976Sjmallettunion cvmx_gmxx_txx_soft_pause 6228215976Sjmallett{ 6229215976Sjmallett uint64_t u64; 6230215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s 6231215976Sjmallett { 6232215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6233215976Sjmallett uint64_t reserved_16_63 : 48; 6234215976Sjmallett uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times */ 6235215976Sjmallett#else 6236215976Sjmallett uint64_t time : 16; 6237215976Sjmallett uint64_t reserved_16_63 : 48; 6238215976Sjmallett#endif 6239215976Sjmallett } s; 6240215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn30xx; 6241215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn31xx; 6242215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn38xx; 6243215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn38xxp2; 6244215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn50xx; 6245215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn52xx; 6246215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn52xxp1; 6247215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn56xx; 6248215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn56xxp1; 6249215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn58xx; 6250215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn58xxp1; 6251215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn63xx; 6252215976Sjmallett struct cvmx_gmxx_txx_soft_pause_s cn63xxp1; 6253215976Sjmallett}; 6254215976Sjmalletttypedef union cvmx_gmxx_txx_soft_pause cvmx_gmxx_txx_soft_pause_t; 6255215976Sjmallett 6256215976Sjmallett/** 6257215976Sjmallett * cvmx_gmx#_tx#_stat0 6258215976Sjmallett * 6259215976Sjmallett * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL 6260215976Sjmallett * 6261215976Sjmallett * 6262215976Sjmallett * Notes: 6263215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6264215976Sjmallett * - Counters will wrap 6265215976Sjmallett */ 6266215976Sjmallettunion cvmx_gmxx_txx_stat0 6267215976Sjmallett{ 6268215976Sjmallett uint64_t u64; 6269215976Sjmallett struct cvmx_gmxx_txx_stat0_s 6270215976Sjmallett { 6271215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6272215976Sjmallett uint64_t xsdef : 32; /**< Number of packets dropped (never successfully 6273215976Sjmallett sent) due to excessive deferal 6274215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 6275215976Sjmallett uint64_t xscol : 32; /**< Number of packets dropped (never successfully 6276215976Sjmallett sent) due to excessive collision. Defined by 6277215976Sjmallett GMX_TX_COL_ATTEMPT[LIMIT]. 6278215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 6279215976Sjmallett#else 6280215976Sjmallett uint64_t xscol : 32; 6281215976Sjmallett uint64_t xsdef : 32; 6282215976Sjmallett#endif 6283215976Sjmallett } s; 6284215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn30xx; 6285215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn31xx; 6286215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn38xx; 6287215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn38xxp2; 6288215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn50xx; 6289215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn52xx; 6290215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn52xxp1; 6291215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn56xx; 6292215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn56xxp1; 6293215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn58xx; 6294215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn58xxp1; 6295215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn63xx; 6296215976Sjmallett struct cvmx_gmxx_txx_stat0_s cn63xxp1; 6297215976Sjmallett}; 6298215976Sjmalletttypedef union cvmx_gmxx_txx_stat0 cvmx_gmxx_txx_stat0_t; 6299215976Sjmallett 6300215976Sjmallett/** 6301215976Sjmallett * cvmx_gmx#_tx#_stat1 6302215976Sjmallett * 6303215976Sjmallett * GMX_TX_STAT1 = GMX_TX_STATS_SCOL / GMX_TX_STATS_MCOL 6304215976Sjmallett * 6305215976Sjmallett * 6306215976Sjmallett * Notes: 6307215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6308215976Sjmallett * - Counters will wrap 6309215976Sjmallett */ 6310215976Sjmallettunion cvmx_gmxx_txx_stat1 6311215976Sjmallett{ 6312215976Sjmallett uint64_t u64; 6313215976Sjmallett struct cvmx_gmxx_txx_stat1_s 6314215976Sjmallett { 6315215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6316215976Sjmallett uint64_t scol : 32; /**< Number of packets sent with a single collision 6317215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 6318215976Sjmallett uint64_t mcol : 32; /**< Number of packets sent with multiple collisions 6319215976Sjmallett but < GMX_TX_COL_ATTEMPT[LIMIT]. 6320215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 6321215976Sjmallett#else 6322215976Sjmallett uint64_t mcol : 32; 6323215976Sjmallett uint64_t scol : 32; 6324215976Sjmallett#endif 6325215976Sjmallett } s; 6326215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn30xx; 6327215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn31xx; 6328215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn38xx; 6329215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn38xxp2; 6330215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn50xx; 6331215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn52xx; 6332215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn52xxp1; 6333215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn56xx; 6334215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn56xxp1; 6335215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn58xx; 6336215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn58xxp1; 6337215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn63xx; 6338215976Sjmallett struct cvmx_gmxx_txx_stat1_s cn63xxp1; 6339215976Sjmallett}; 6340215976Sjmalletttypedef union cvmx_gmxx_txx_stat1 cvmx_gmxx_txx_stat1_t; 6341215976Sjmallett 6342215976Sjmallett/** 6343215976Sjmallett * cvmx_gmx#_tx#_stat2 6344215976Sjmallett * 6345215976Sjmallett * GMX_TX_STAT2 = GMX_TX_STATS_OCTS 6346215976Sjmallett * 6347215976Sjmallett * 6348215976Sjmallett * Notes: 6349215976Sjmallett * - Octect counts are the sum of all data transmitted on the wire including 6350215976Sjmallett * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect 6351215976Sjmallett * counts do not include PREAMBLE byte or EXTEND cycles. 6352215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6353215976Sjmallett * - Counters will wrap 6354215976Sjmallett */ 6355215976Sjmallettunion cvmx_gmxx_txx_stat2 6356215976Sjmallett{ 6357215976Sjmallett uint64_t u64; 6358215976Sjmallett struct cvmx_gmxx_txx_stat2_s 6359215976Sjmallett { 6360215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6361215976Sjmallett uint64_t reserved_48_63 : 16; 6362215976Sjmallett uint64_t octs : 48; /**< Number of total octets sent on the interface. 6363215976Sjmallett Does not count octets from frames that were 6364215976Sjmallett truncated due to collisions in halfdup mode. */ 6365215976Sjmallett#else 6366215976Sjmallett uint64_t octs : 48; 6367215976Sjmallett uint64_t reserved_48_63 : 16; 6368215976Sjmallett#endif 6369215976Sjmallett } s; 6370215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn30xx; 6371215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn31xx; 6372215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn38xx; 6373215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn38xxp2; 6374215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn50xx; 6375215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn52xx; 6376215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn52xxp1; 6377215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn56xx; 6378215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn56xxp1; 6379215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn58xx; 6380215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn58xxp1; 6381215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn63xx; 6382215976Sjmallett struct cvmx_gmxx_txx_stat2_s cn63xxp1; 6383215976Sjmallett}; 6384215976Sjmalletttypedef union cvmx_gmxx_txx_stat2 cvmx_gmxx_txx_stat2_t; 6385215976Sjmallett 6386215976Sjmallett/** 6387215976Sjmallett * cvmx_gmx#_tx#_stat3 6388215976Sjmallett * 6389215976Sjmallett * GMX_TX_STAT3 = GMX_TX_STATS_PKTS 6390215976Sjmallett * 6391215976Sjmallett * 6392215976Sjmallett * Notes: 6393215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6394215976Sjmallett * - Counters will wrap 6395215976Sjmallett */ 6396215976Sjmallettunion cvmx_gmxx_txx_stat3 6397215976Sjmallett{ 6398215976Sjmallett uint64_t u64; 6399215976Sjmallett struct cvmx_gmxx_txx_stat3_s 6400215976Sjmallett { 6401215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6402215976Sjmallett uint64_t reserved_32_63 : 32; 6403215976Sjmallett uint64_t pkts : 32; /**< Number of total frames sent on the interface. 6404215976Sjmallett Does not count frames that were truncated due to 6405215976Sjmallett collisions in halfdup mode. */ 6406215976Sjmallett#else 6407215976Sjmallett uint64_t pkts : 32; 6408215976Sjmallett uint64_t reserved_32_63 : 32; 6409215976Sjmallett#endif 6410215976Sjmallett } s; 6411215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn30xx; 6412215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn31xx; 6413215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn38xx; 6414215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn38xxp2; 6415215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn50xx; 6416215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn52xx; 6417215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn52xxp1; 6418215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn56xx; 6419215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn56xxp1; 6420215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn58xx; 6421215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn58xxp1; 6422215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn63xx; 6423215976Sjmallett struct cvmx_gmxx_txx_stat3_s cn63xxp1; 6424215976Sjmallett}; 6425215976Sjmalletttypedef union cvmx_gmxx_txx_stat3 cvmx_gmxx_txx_stat3_t; 6426215976Sjmallett 6427215976Sjmallett/** 6428215976Sjmallett * cvmx_gmx#_tx#_stat4 6429215976Sjmallett * 6430215976Sjmallett * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64) 6431215976Sjmallett * 6432215976Sjmallett * 6433215976Sjmallett * Notes: 6434215976Sjmallett * - Packet length is the sum of all data transmitted on the wire for the given 6435215976Sjmallett * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 6436215976Sjmallett * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 6437215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6438215976Sjmallett * - Counters will wrap 6439215976Sjmallett */ 6440215976Sjmallettunion cvmx_gmxx_txx_stat4 6441215976Sjmallett{ 6442215976Sjmallett uint64_t u64; 6443215976Sjmallett struct cvmx_gmxx_txx_stat4_s 6444215976Sjmallett { 6445215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6446215976Sjmallett uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */ 6447215976Sjmallett uint64_t hist0 : 32; /**< Number of packets sent with an octet count 6448215976Sjmallett of < 64. */ 6449215976Sjmallett#else 6450215976Sjmallett uint64_t hist0 : 32; 6451215976Sjmallett uint64_t hist1 : 32; 6452215976Sjmallett#endif 6453215976Sjmallett } s; 6454215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn30xx; 6455215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn31xx; 6456215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn38xx; 6457215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn38xxp2; 6458215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn50xx; 6459215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn52xx; 6460215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn52xxp1; 6461215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn56xx; 6462215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn56xxp1; 6463215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn58xx; 6464215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn58xxp1; 6465215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn63xx; 6466215976Sjmallett struct cvmx_gmxx_txx_stat4_s cn63xxp1; 6467215976Sjmallett}; 6468215976Sjmalletttypedef union cvmx_gmxx_txx_stat4 cvmx_gmxx_txx_stat4_t; 6469215976Sjmallett 6470215976Sjmallett/** 6471215976Sjmallett * cvmx_gmx#_tx#_stat5 6472215976Sjmallett * 6473215976Sjmallett * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127) 6474215976Sjmallett * 6475215976Sjmallett * 6476215976Sjmallett * Notes: 6477215976Sjmallett * - Packet length is the sum of all data transmitted on the wire for the given 6478215976Sjmallett * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 6479215976Sjmallett * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 6480215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6481215976Sjmallett * - Counters will wrap 6482215976Sjmallett */ 6483215976Sjmallettunion cvmx_gmxx_txx_stat5 6484215976Sjmallett{ 6485215976Sjmallett uint64_t u64; 6486215976Sjmallett struct cvmx_gmxx_txx_stat5_s 6487215976Sjmallett { 6488215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6489215976Sjmallett uint64_t hist3 : 32; /**< Number of packets sent with an octet count of 6490215976Sjmallett 128 - 255. */ 6491215976Sjmallett uint64_t hist2 : 32; /**< Number of packets sent with an octet count of 6492215976Sjmallett 65 - 127. */ 6493215976Sjmallett#else 6494215976Sjmallett uint64_t hist2 : 32; 6495215976Sjmallett uint64_t hist3 : 32; 6496215976Sjmallett#endif 6497215976Sjmallett } s; 6498215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn30xx; 6499215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn31xx; 6500215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn38xx; 6501215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn38xxp2; 6502215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn50xx; 6503215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn52xx; 6504215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn52xxp1; 6505215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn56xx; 6506215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn56xxp1; 6507215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn58xx; 6508215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn58xxp1; 6509215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn63xx; 6510215976Sjmallett struct cvmx_gmxx_txx_stat5_s cn63xxp1; 6511215976Sjmallett}; 6512215976Sjmalletttypedef union cvmx_gmxx_txx_stat5 cvmx_gmxx_txx_stat5_t; 6513215976Sjmallett 6514215976Sjmallett/** 6515215976Sjmallett * cvmx_gmx#_tx#_stat6 6516215976Sjmallett * 6517215976Sjmallett * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511) 6518215976Sjmallett * 6519215976Sjmallett * 6520215976Sjmallett * Notes: 6521215976Sjmallett * - Packet length is the sum of all data transmitted on the wire for the given 6522215976Sjmallett * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 6523215976Sjmallett * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 6524215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6525215976Sjmallett * - Counters will wrap 6526215976Sjmallett */ 6527215976Sjmallettunion cvmx_gmxx_txx_stat6 6528215976Sjmallett{ 6529215976Sjmallett uint64_t u64; 6530215976Sjmallett struct cvmx_gmxx_txx_stat6_s 6531215976Sjmallett { 6532215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6533215976Sjmallett uint64_t hist5 : 32; /**< Number of packets sent with an octet count of 6534215976Sjmallett 512 - 1023. */ 6535215976Sjmallett uint64_t hist4 : 32; /**< Number of packets sent with an octet count of 6536215976Sjmallett 256 - 511. */ 6537215976Sjmallett#else 6538215976Sjmallett uint64_t hist4 : 32; 6539215976Sjmallett uint64_t hist5 : 32; 6540215976Sjmallett#endif 6541215976Sjmallett } s; 6542215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn30xx; 6543215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn31xx; 6544215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn38xx; 6545215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn38xxp2; 6546215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn50xx; 6547215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn52xx; 6548215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn52xxp1; 6549215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn56xx; 6550215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn56xxp1; 6551215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn58xx; 6552215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn58xxp1; 6553215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn63xx; 6554215976Sjmallett struct cvmx_gmxx_txx_stat6_s cn63xxp1; 6555215976Sjmallett}; 6556215976Sjmalletttypedef union cvmx_gmxx_txx_stat6 cvmx_gmxx_txx_stat6_t; 6557215976Sjmallett 6558215976Sjmallett/** 6559215976Sjmallett * cvmx_gmx#_tx#_stat7 6560215976Sjmallett * 6561215976Sjmallett * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518) 6562215976Sjmallett * 6563215976Sjmallett * 6564215976Sjmallett * Notes: 6565215976Sjmallett * - Packet length is the sum of all data transmitted on the wire for the given 6566215976Sjmallett * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam 6567215976Sjmallett * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. 6568215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6569215976Sjmallett * - Counters will wrap 6570215976Sjmallett */ 6571215976Sjmallettunion cvmx_gmxx_txx_stat7 6572215976Sjmallett{ 6573215976Sjmallett uint64_t u64; 6574215976Sjmallett struct cvmx_gmxx_txx_stat7_s 6575215976Sjmallett { 6576215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6577215976Sjmallett uint64_t hist7 : 32; /**< Number of packets sent with an octet count 6578215976Sjmallett of > 1518. */ 6579215976Sjmallett uint64_t hist6 : 32; /**< Number of packets sent with an octet count of 6580215976Sjmallett 1024 - 1518. */ 6581215976Sjmallett#else 6582215976Sjmallett uint64_t hist6 : 32; 6583215976Sjmallett uint64_t hist7 : 32; 6584215976Sjmallett#endif 6585215976Sjmallett } s; 6586215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn30xx; 6587215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn31xx; 6588215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn38xx; 6589215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn38xxp2; 6590215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn50xx; 6591215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn52xx; 6592215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn52xxp1; 6593215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn56xx; 6594215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn56xxp1; 6595215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn58xx; 6596215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn58xxp1; 6597215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn63xx; 6598215976Sjmallett struct cvmx_gmxx_txx_stat7_s cn63xxp1; 6599215976Sjmallett}; 6600215976Sjmalletttypedef union cvmx_gmxx_txx_stat7 cvmx_gmxx_txx_stat7_t; 6601215976Sjmallett 6602215976Sjmallett/** 6603215976Sjmallett * cvmx_gmx#_tx#_stat8 6604215976Sjmallett * 6605215976Sjmallett * GMX_TX_STAT8 = GMX_TX_STATS_MCST / GMX_TX_STATS_BCST 6606215976Sjmallett * 6607215976Sjmallett * 6608215976Sjmallett * Notes: 6609215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6610215976Sjmallett * - Counters will wrap 6611215976Sjmallett * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the 6612215976Sjmallett * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet 6613215976Sjmallett * as per the 802.3 frame definition. If the system requires additional data 6614215976Sjmallett * before the L2 header, then the MCST and BCST counters may not reflect 6615215976Sjmallett * reality and should be ignored by software. 6616215976Sjmallett */ 6617215976Sjmallettunion cvmx_gmxx_txx_stat8 6618215976Sjmallett{ 6619215976Sjmallett uint64_t u64; 6620215976Sjmallett struct cvmx_gmxx_txx_stat8_s 6621215976Sjmallett { 6622215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6623215976Sjmallett uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC. 6624215976Sjmallett Does not include BCST packets. */ 6625215976Sjmallett uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC. 6626215976Sjmallett Does not include MCST packets. */ 6627215976Sjmallett#else 6628215976Sjmallett uint64_t bcst : 32; 6629215976Sjmallett uint64_t mcst : 32; 6630215976Sjmallett#endif 6631215976Sjmallett } s; 6632215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn30xx; 6633215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn31xx; 6634215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn38xx; 6635215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn38xxp2; 6636215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn50xx; 6637215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn52xx; 6638215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn52xxp1; 6639215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn56xx; 6640215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn56xxp1; 6641215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn58xx; 6642215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn58xxp1; 6643215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn63xx; 6644215976Sjmallett struct cvmx_gmxx_txx_stat8_s cn63xxp1; 6645215976Sjmallett}; 6646215976Sjmalletttypedef union cvmx_gmxx_txx_stat8 cvmx_gmxx_txx_stat8_t; 6647215976Sjmallett 6648215976Sjmallett/** 6649215976Sjmallett * cvmx_gmx#_tx#_stat9 6650215976Sjmallett * 6651215976Sjmallett * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL 6652215976Sjmallett * 6653215976Sjmallett * 6654215976Sjmallett * Notes: 6655215976Sjmallett * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set 6656215976Sjmallett * - Counters will wrap 6657215976Sjmallett */ 6658215976Sjmallettunion cvmx_gmxx_txx_stat9 6659215976Sjmallett{ 6660215976Sjmallett uint64_t u64; 6661215976Sjmallett struct cvmx_gmxx_txx_stat9_s 6662215976Sjmallett { 6663215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6664215976Sjmallett uint64_t undflw : 32; /**< Number of underflow packets */ 6665215976Sjmallett uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control) 6666215976Sjmallett generated by GMX. It does not include control 6667215976Sjmallett packets forwarded or generated by the PP's. */ 6668215976Sjmallett#else 6669215976Sjmallett uint64_t ctl : 32; 6670215976Sjmallett uint64_t undflw : 32; 6671215976Sjmallett#endif 6672215976Sjmallett } s; 6673215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn30xx; 6674215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn31xx; 6675215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn38xx; 6676215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn38xxp2; 6677215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn50xx; 6678215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn52xx; 6679215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn52xxp1; 6680215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn56xx; 6681215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn56xxp1; 6682215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn58xx; 6683215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn58xxp1; 6684215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn63xx; 6685215976Sjmallett struct cvmx_gmxx_txx_stat9_s cn63xxp1; 6686215976Sjmallett}; 6687215976Sjmalletttypedef union cvmx_gmxx_txx_stat9 cvmx_gmxx_txx_stat9_t; 6688215976Sjmallett 6689215976Sjmallett/** 6690215976Sjmallett * cvmx_gmx#_tx#_stats_ctl 6691215976Sjmallett * 6692215976Sjmallett * GMX_TX_STATS_CTL = TX Stats Control register 6693215976Sjmallett * 6694215976Sjmallett */ 6695215976Sjmallettunion cvmx_gmxx_txx_stats_ctl 6696215976Sjmallett{ 6697215976Sjmallett uint64_t u64; 6698215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s 6699215976Sjmallett { 6700215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6701215976Sjmallett uint64_t reserved_1_63 : 63; 6702215976Sjmallett uint64_t rd_clr : 1; /**< Stats registers will clear on reads */ 6703215976Sjmallett#else 6704215976Sjmallett uint64_t rd_clr : 1; 6705215976Sjmallett uint64_t reserved_1_63 : 63; 6706215976Sjmallett#endif 6707215976Sjmallett } s; 6708215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn30xx; 6709215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn31xx; 6710215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn38xx; 6711215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2; 6712215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn50xx; 6713215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn52xx; 6714215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1; 6715215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn56xx; 6716215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1; 6717215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn58xx; 6718215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1; 6719215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn63xx; 6720215976Sjmallett struct cvmx_gmxx_txx_stats_ctl_s cn63xxp1; 6721215976Sjmallett}; 6722215976Sjmalletttypedef union cvmx_gmxx_txx_stats_ctl cvmx_gmxx_txx_stats_ctl_t; 6723215976Sjmallett 6724215976Sjmallett/** 6725215976Sjmallett * cvmx_gmx#_tx#_thresh 6726215976Sjmallett * 6727215976Sjmallett * Per Port 6728215976Sjmallett * 6729215976Sjmallett * 6730215976Sjmallett * GMX_TX_THRESH = Packet TX Threshold 6731215976Sjmallett * 6732215976Sjmallett * Notes: 6733215976Sjmallett * In XAUI mode, prt0 is used for checking. Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x100. 6734215976Sjmallett * 6735215976Sjmallett */ 6736215976Sjmallettunion cvmx_gmxx_txx_thresh 6737215976Sjmallett{ 6738215976Sjmallett uint64_t u64; 6739215976Sjmallett struct cvmx_gmxx_txx_thresh_s 6740215976Sjmallett { 6741215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6742215976Sjmallett uint64_t reserved_9_63 : 55; 6743215976Sjmallett uint64_t cnt : 9; /**< Number of 16B ticks to accumulate in the TX FIFO 6744215976Sjmallett before sending on the packet interface 6745215976Sjmallett This register should be large enough to prevent 6746215976Sjmallett underflow on the packet interface and must never 6747215976Sjmallett be set to zero. This register cannot exceed the 6748215976Sjmallett the TX FIFO depth which is... 6749215976Sjmallett GMX_TX_PRTS==0,1: CNT MAX = 0x100 6750215976Sjmallett GMX_TX_PRTS==2 : CNT MAX = 0x080 6751215976Sjmallett GMX_TX_PRTS==3,4: CNT MAX = 0x040 */ 6752215976Sjmallett#else 6753215976Sjmallett uint64_t cnt : 9; 6754215976Sjmallett uint64_t reserved_9_63 : 55; 6755215976Sjmallett#endif 6756215976Sjmallett } s; 6757215976Sjmallett struct cvmx_gmxx_txx_thresh_cn30xx 6758215976Sjmallett { 6759215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6760215976Sjmallett uint64_t reserved_7_63 : 57; 6761215976Sjmallett uint64_t cnt : 7; /**< Number of 16B ticks to accumulate in the TX FIFO 6762215976Sjmallett before sending on the RGMII interface 6763215976Sjmallett This register should be large enough to prevent 6764215976Sjmallett underflow on the RGMII interface and must never 6765215976Sjmallett be set below 4. This register cannot exceed the 6766215976Sjmallett the TX FIFO depth which is 64 16B entries. */ 6767215976Sjmallett#else 6768215976Sjmallett uint64_t cnt : 7; 6769215976Sjmallett uint64_t reserved_7_63 : 57; 6770215976Sjmallett#endif 6771215976Sjmallett } cn30xx; 6772215976Sjmallett struct cvmx_gmxx_txx_thresh_cn30xx cn31xx; 6773215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn38xx; 6774215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn38xxp2; 6775215976Sjmallett struct cvmx_gmxx_txx_thresh_cn30xx cn50xx; 6776215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn52xx; 6777215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn52xxp1; 6778215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn56xx; 6779215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn56xxp1; 6780215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn58xx; 6781215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn58xxp1; 6782215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn63xx; 6783215976Sjmallett struct cvmx_gmxx_txx_thresh_s cn63xxp1; 6784215976Sjmallett}; 6785215976Sjmalletttypedef union cvmx_gmxx_txx_thresh cvmx_gmxx_txx_thresh_t; 6786215976Sjmallett 6787215976Sjmallett/** 6788215976Sjmallett * cvmx_gmx#_tx_bp 6789215976Sjmallett * 6790215976Sjmallett * GMX_TX_BP = Packet Interface TX BackPressure Register 6791215976Sjmallett * 6792215976Sjmallett * 6793215976Sjmallett * Notes: 6794215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of BP is used. 6795215976Sjmallett * 6796215976Sjmallett */ 6797215976Sjmallettunion cvmx_gmxx_tx_bp 6798215976Sjmallett{ 6799215976Sjmallett uint64_t u64; 6800215976Sjmallett struct cvmx_gmxx_tx_bp_s 6801215976Sjmallett { 6802215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6803215976Sjmallett uint64_t reserved_4_63 : 60; 6804215976Sjmallett uint64_t bp : 4; /**< Per port BackPressure status 6805215976Sjmallett 0=Port is available 6806215976Sjmallett 1=Port should be back pressured */ 6807215976Sjmallett#else 6808215976Sjmallett uint64_t bp : 4; 6809215976Sjmallett uint64_t reserved_4_63 : 60; 6810215976Sjmallett#endif 6811215976Sjmallett } s; 6812215976Sjmallett struct cvmx_gmxx_tx_bp_cn30xx 6813215976Sjmallett { 6814215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6815215976Sjmallett uint64_t reserved_3_63 : 61; 6816215976Sjmallett uint64_t bp : 3; /**< Per port BackPressure status 6817215976Sjmallett 0=Port is available 6818215976Sjmallett 1=Port should be back pressured */ 6819215976Sjmallett#else 6820215976Sjmallett uint64_t bp : 3; 6821215976Sjmallett uint64_t reserved_3_63 : 61; 6822215976Sjmallett#endif 6823215976Sjmallett } cn30xx; 6824215976Sjmallett struct cvmx_gmxx_tx_bp_cn30xx cn31xx; 6825215976Sjmallett struct cvmx_gmxx_tx_bp_s cn38xx; 6826215976Sjmallett struct cvmx_gmxx_tx_bp_s cn38xxp2; 6827215976Sjmallett struct cvmx_gmxx_tx_bp_cn30xx cn50xx; 6828215976Sjmallett struct cvmx_gmxx_tx_bp_s cn52xx; 6829215976Sjmallett struct cvmx_gmxx_tx_bp_s cn52xxp1; 6830215976Sjmallett struct cvmx_gmxx_tx_bp_s cn56xx; 6831215976Sjmallett struct cvmx_gmxx_tx_bp_s cn56xxp1; 6832215976Sjmallett struct cvmx_gmxx_tx_bp_s cn58xx; 6833215976Sjmallett struct cvmx_gmxx_tx_bp_s cn58xxp1; 6834215976Sjmallett struct cvmx_gmxx_tx_bp_s cn63xx; 6835215976Sjmallett struct cvmx_gmxx_tx_bp_s cn63xxp1; 6836215976Sjmallett}; 6837215976Sjmalletttypedef union cvmx_gmxx_tx_bp cvmx_gmxx_tx_bp_t; 6838215976Sjmallett 6839215976Sjmallett/** 6840215976Sjmallett * cvmx_gmx#_tx_clk_msk# 6841215976Sjmallett * 6842215976Sjmallett * GMX_TX_CLK_MSK = GMX Clock Select 6843215976Sjmallett * 6844215976Sjmallett */ 6845215976Sjmallettunion cvmx_gmxx_tx_clk_mskx 6846215976Sjmallett{ 6847215976Sjmallett uint64_t u64; 6848215976Sjmallett struct cvmx_gmxx_tx_clk_mskx_s 6849215976Sjmallett { 6850215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6851215976Sjmallett uint64_t reserved_1_63 : 63; 6852215976Sjmallett uint64_t msk : 1; /**< Write this bit to a 1 when switching clks */ 6853215976Sjmallett#else 6854215976Sjmallett uint64_t msk : 1; 6855215976Sjmallett uint64_t reserved_1_63 : 63; 6856215976Sjmallett#endif 6857215976Sjmallett } s; 6858215976Sjmallett struct cvmx_gmxx_tx_clk_mskx_s cn30xx; 6859215976Sjmallett struct cvmx_gmxx_tx_clk_mskx_s cn50xx; 6860215976Sjmallett}; 6861215976Sjmalletttypedef union cvmx_gmxx_tx_clk_mskx cvmx_gmxx_tx_clk_mskx_t; 6862215976Sjmallett 6863215976Sjmallett/** 6864215976Sjmallett * cvmx_gmx#_tx_col_attempt 6865215976Sjmallett * 6866215976Sjmallett * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame 6867215976Sjmallett * 6868215976Sjmallett */ 6869215976Sjmallettunion cvmx_gmxx_tx_col_attempt 6870215976Sjmallett{ 6871215976Sjmallett uint64_t u64; 6872215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s 6873215976Sjmallett { 6874215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6875215976Sjmallett uint64_t reserved_5_63 : 59; 6876215976Sjmallett uint64_t limit : 5; /**< Collision Attempts 6877215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 6878215976Sjmallett#else 6879215976Sjmallett uint64_t limit : 5; 6880215976Sjmallett uint64_t reserved_5_63 : 59; 6881215976Sjmallett#endif 6882215976Sjmallett } s; 6883215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn30xx; 6884215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn31xx; 6885215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn38xx; 6886215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn38xxp2; 6887215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn50xx; 6888215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn52xx; 6889215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn52xxp1; 6890215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn56xx; 6891215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn56xxp1; 6892215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn58xx; 6893215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn58xxp1; 6894215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn63xx; 6895215976Sjmallett struct cvmx_gmxx_tx_col_attempt_s cn63xxp1; 6896215976Sjmallett}; 6897215976Sjmalletttypedef union cvmx_gmxx_tx_col_attempt cvmx_gmxx_tx_col_attempt_t; 6898215976Sjmallett 6899215976Sjmallett/** 6900215976Sjmallett * cvmx_gmx#_tx_corrupt 6901215976Sjmallett * 6902215976Sjmallett * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set 6903215976Sjmallett * 6904215976Sjmallett * 6905215976Sjmallett * Notes: 6906215976Sjmallett * Packets sent from PKO with the ERR wire asserted will be corrupted by 6907215976Sjmallett * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0). 6908215976Sjmallett * 6909215976Sjmallett * Corruption means that GMX will send a bad FCS value. If GMX_TX_APPEND[FCS] 6910215976Sjmallett * is clear then no FCS is sent and the GMX cannot corrupt it. The corrupt FCS 6911215976Sjmallett * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error 6912215976Sjmallett * propagation code in XAUI mode. 6913215976Sjmallett */ 6914215976Sjmallettunion cvmx_gmxx_tx_corrupt 6915215976Sjmallett{ 6916215976Sjmallett uint64_t u64; 6917215976Sjmallett struct cvmx_gmxx_tx_corrupt_s 6918215976Sjmallett { 6919215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6920215976Sjmallett uint64_t reserved_4_63 : 60; 6921215976Sjmallett uint64_t corrupt : 4; /**< Per port error propagation 6922215976Sjmallett 0=Never corrupt packets 6923215976Sjmallett 1=Corrupt packets with ERR */ 6924215976Sjmallett#else 6925215976Sjmallett uint64_t corrupt : 4; 6926215976Sjmallett uint64_t reserved_4_63 : 60; 6927215976Sjmallett#endif 6928215976Sjmallett } s; 6929215976Sjmallett struct cvmx_gmxx_tx_corrupt_cn30xx 6930215976Sjmallett { 6931215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6932215976Sjmallett uint64_t reserved_3_63 : 61; 6933215976Sjmallett uint64_t corrupt : 3; /**< Per port error propagation 6934215976Sjmallett 0=Never corrupt packets 6935215976Sjmallett 1=Corrupt packets with ERR */ 6936215976Sjmallett#else 6937215976Sjmallett uint64_t corrupt : 3; 6938215976Sjmallett uint64_t reserved_3_63 : 61; 6939215976Sjmallett#endif 6940215976Sjmallett } cn30xx; 6941215976Sjmallett struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx; 6942215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn38xx; 6943215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn38xxp2; 6944215976Sjmallett struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx; 6945215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn52xx; 6946215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn52xxp1; 6947215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn56xx; 6948215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn56xxp1; 6949215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn58xx; 6950215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn58xxp1; 6951215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn63xx; 6952215976Sjmallett struct cvmx_gmxx_tx_corrupt_s cn63xxp1; 6953215976Sjmallett}; 6954215976Sjmalletttypedef union cvmx_gmxx_tx_corrupt cvmx_gmxx_tx_corrupt_t; 6955215976Sjmallett 6956215976Sjmallett/** 6957215976Sjmallett * cvmx_gmx#_tx_hg2_reg1 6958215976Sjmallett * 6959215976Sjmallett * Notes: 6960215976Sjmallett * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in 6961215976Sjmallett * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of 6962215976Sjmallett * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of 6963215976Sjmallett * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior. 6964215976Sjmallett * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values. 6965215976Sjmallett */ 6966215976Sjmallettunion cvmx_gmxx_tx_hg2_reg1 6967215976Sjmallett{ 6968215976Sjmallett uint64_t u64; 6969215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s 6970215976Sjmallett { 6971215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 6972215976Sjmallett uint64_t reserved_16_63 : 48; 6973215976Sjmallett uint64_t tx_xof : 16; /**< TX HiGig2 message for logical link pause when any 6974215976Sjmallett bit value changes 6975215976Sjmallett Only write in HiGig2 mode i.e. when 6976215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 6977215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 6978215976Sjmallett#else 6979215976Sjmallett uint64_t tx_xof : 16; 6980215976Sjmallett uint64_t reserved_16_63 : 48; 6981215976Sjmallett#endif 6982215976Sjmallett } s; 6983215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s cn52xx; 6984215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1; 6985215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s cn56xx; 6986215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s cn63xx; 6987215976Sjmallett struct cvmx_gmxx_tx_hg2_reg1_s cn63xxp1; 6988215976Sjmallett}; 6989215976Sjmalletttypedef union cvmx_gmxx_tx_hg2_reg1 cvmx_gmxx_tx_hg2_reg1_t; 6990215976Sjmallett 6991215976Sjmallett/** 6992215976Sjmallett * cvmx_gmx#_tx_hg2_reg2 6993215976Sjmallett * 6994215976Sjmallett * Notes: 6995215976Sjmallett * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in 6996215976Sjmallett * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of 6997215976Sjmallett * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of 6998215976Sjmallett * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior. 6999215976Sjmallett * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values. 7000215976Sjmallett */ 7001215976Sjmallettunion cvmx_gmxx_tx_hg2_reg2 7002215976Sjmallett{ 7003215976Sjmallett uint64_t u64; 7004215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s 7005215976Sjmallett { 7006215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7007215976Sjmallett uint64_t reserved_16_63 : 48; 7008215976Sjmallett uint64_t tx_xon : 16; /**< TX HiGig2 message for logical link pause when any 7009215976Sjmallett bit value changes 7010215976Sjmallett Only write in HiGig2 mode i.e. when 7011215976Sjmallett GMX_TX_XAUI_CTL[HG_EN]=1 and 7012215976Sjmallett GMX_RX_UDD_SKP[SKIP]=16. */ 7013215976Sjmallett#else 7014215976Sjmallett uint64_t tx_xon : 16; 7015215976Sjmallett uint64_t reserved_16_63 : 48; 7016215976Sjmallett#endif 7017215976Sjmallett } s; 7018215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s cn52xx; 7019215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1; 7020215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s cn56xx; 7021215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s cn63xx; 7022215976Sjmallett struct cvmx_gmxx_tx_hg2_reg2_s cn63xxp1; 7023215976Sjmallett}; 7024215976Sjmalletttypedef union cvmx_gmxx_tx_hg2_reg2 cvmx_gmxx_tx_hg2_reg2_t; 7025215976Sjmallett 7026215976Sjmallett/** 7027215976Sjmallett * cvmx_gmx#_tx_ifg 7028215976Sjmallett * 7029215976Sjmallett * GMX_TX_IFG = Packet TX Interframe Gap 7030215976Sjmallett * 7031215976Sjmallett * 7032215976Sjmallett * Notes: 7033215976Sjmallett * * Programming IFG1 and IFG2. 7034215976Sjmallett * 7035215976Sjmallett * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3 7036215976Sjmallett * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range 7037215976Sjmallett * of 4-12, and the IFG1+IFG2 sum must be 12. 7038215976Sjmallett * 7039215976Sjmallett * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3 7040215976Sjmallett * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range 7041215976Sjmallett * of 1-11, and the IFG1+IFG2 sum must be 12. 7042215976Sjmallett * 7043215976Sjmallett * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the 7044215976Sjmallett * IFG1+IFG2 sum must be 12. IFG1[1:0] and IFG2[1:0] must be zero. 7045215976Sjmallett * 7046215976Sjmallett * For all other systems, IFG1 and IFG2 can be any value in the range of 7047215976Sjmallett * 1-15. Allowing for a total possible IFG sum of 2-30. 7048215976Sjmallett */ 7049215976Sjmallettunion cvmx_gmxx_tx_ifg 7050215976Sjmallett{ 7051215976Sjmallett uint64_t u64; 7052215976Sjmallett struct cvmx_gmxx_tx_ifg_s 7053215976Sjmallett { 7054215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7055215976Sjmallett uint64_t reserved_8_63 : 56; 7056215976Sjmallett uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing (in IFG2*8 bits) 7057215976Sjmallett If CRS is detected during IFG2, then the 7058215976Sjmallett interFrameSpacing timer is not reset and a frame 7059215976Sjmallett is transmited once the timer expires. */ 7060215976Sjmallett uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing (in IFG1*8 bits) 7061215976Sjmallett If CRS is detected during IFG1, then the 7062215976Sjmallett interFrameSpacing timer is reset and a frame is 7063215976Sjmallett not transmited. */ 7064215976Sjmallett#else 7065215976Sjmallett uint64_t ifg1 : 4; 7066215976Sjmallett uint64_t ifg2 : 4; 7067215976Sjmallett uint64_t reserved_8_63 : 56; 7068215976Sjmallett#endif 7069215976Sjmallett } s; 7070215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn30xx; 7071215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn31xx; 7072215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn38xx; 7073215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn38xxp2; 7074215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn50xx; 7075215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn52xx; 7076215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn52xxp1; 7077215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn56xx; 7078215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn56xxp1; 7079215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn58xx; 7080215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn58xxp1; 7081215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn63xx; 7082215976Sjmallett struct cvmx_gmxx_tx_ifg_s cn63xxp1; 7083215976Sjmallett}; 7084215976Sjmalletttypedef union cvmx_gmxx_tx_ifg cvmx_gmxx_tx_ifg_t; 7085215976Sjmallett 7086215976Sjmallett/** 7087215976Sjmallett * cvmx_gmx#_tx_int_en 7088215976Sjmallett * 7089215976Sjmallett * GMX_TX_INT_EN = Interrupt Enable 7090215976Sjmallett * 7091215976Sjmallett * 7092215976Sjmallett * Notes: 7093215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used. 7094215976Sjmallett * 7095215976Sjmallett */ 7096215976Sjmallettunion cvmx_gmxx_tx_int_en 7097215976Sjmallett{ 7098215976Sjmallett uint64_t u64; 7099215976Sjmallett struct cvmx_gmxx_tx_int_en_s 7100215976Sjmallett { 7101215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7102215976Sjmallett uint64_t reserved_24_63 : 40; 7103215976Sjmallett uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be 7104215976Sjmallett sent due to XSCOL */ 7105215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7106215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7107215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7108215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7109215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7110215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7111215976Sjmallett uint64_t reserved_6_7 : 2; 7112215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7113215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7114215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7115215976Sjmallett#else 7116215976Sjmallett uint64_t pko_nxa : 1; 7117215976Sjmallett uint64_t ncb_nxa : 1; 7118215976Sjmallett uint64_t undflw : 4; 7119215976Sjmallett uint64_t reserved_6_7 : 2; 7120215976Sjmallett uint64_t xscol : 4; 7121215976Sjmallett uint64_t xsdef : 4; 7122215976Sjmallett uint64_t late_col : 4; 7123215976Sjmallett uint64_t ptp_lost : 4; 7124215976Sjmallett uint64_t reserved_24_63 : 40; 7125215976Sjmallett#endif 7126215976Sjmallett } s; 7127215976Sjmallett struct cvmx_gmxx_tx_int_en_cn30xx 7128215976Sjmallett { 7129215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7130215976Sjmallett uint64_t reserved_19_63 : 45; 7131215976Sjmallett uint64_t late_col : 3; /**< TX Late Collision */ 7132215976Sjmallett uint64_t reserved_15_15 : 1; 7133215976Sjmallett uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7134215976Sjmallett uint64_t reserved_11_11 : 1; 7135215976Sjmallett uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7136215976Sjmallett uint64_t reserved_5_7 : 3; 7137215976Sjmallett uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ 7138215976Sjmallett uint64_t reserved_1_1 : 1; 7139215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7140215976Sjmallett#else 7141215976Sjmallett uint64_t pko_nxa : 1; 7142215976Sjmallett uint64_t reserved_1_1 : 1; 7143215976Sjmallett uint64_t undflw : 3; 7144215976Sjmallett uint64_t reserved_5_7 : 3; 7145215976Sjmallett uint64_t xscol : 3; 7146215976Sjmallett uint64_t reserved_11_11 : 1; 7147215976Sjmallett uint64_t xsdef : 3; 7148215976Sjmallett uint64_t reserved_15_15 : 1; 7149215976Sjmallett uint64_t late_col : 3; 7150215976Sjmallett uint64_t reserved_19_63 : 45; 7151215976Sjmallett#endif 7152215976Sjmallett } cn30xx; 7153215976Sjmallett struct cvmx_gmxx_tx_int_en_cn31xx 7154215976Sjmallett { 7155215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7156215976Sjmallett uint64_t reserved_15_63 : 49; 7157215976Sjmallett uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7158215976Sjmallett uint64_t reserved_11_11 : 1; 7159215976Sjmallett uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7160215976Sjmallett uint64_t reserved_5_7 : 3; 7161215976Sjmallett uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ 7162215976Sjmallett uint64_t reserved_1_1 : 1; 7163215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7164215976Sjmallett#else 7165215976Sjmallett uint64_t pko_nxa : 1; 7166215976Sjmallett uint64_t reserved_1_1 : 1; 7167215976Sjmallett uint64_t undflw : 3; 7168215976Sjmallett uint64_t reserved_5_7 : 3; 7169215976Sjmallett uint64_t xscol : 3; 7170215976Sjmallett uint64_t reserved_11_11 : 1; 7171215976Sjmallett uint64_t xsdef : 3; 7172215976Sjmallett uint64_t reserved_15_63 : 49; 7173215976Sjmallett#endif 7174215976Sjmallett } cn31xx; 7175215976Sjmallett struct cvmx_gmxx_tx_int_en_cn38xx 7176215976Sjmallett { 7177215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7178215976Sjmallett uint64_t reserved_20_63 : 44; 7179215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7180215976Sjmallett (PASS3 only) */ 7181215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7182215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7183215976Sjmallett uint64_t reserved_6_7 : 2; 7184215976Sjmallett uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ 7185215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7186215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7187215976Sjmallett#else 7188215976Sjmallett uint64_t pko_nxa : 1; 7189215976Sjmallett uint64_t ncb_nxa : 1; 7190215976Sjmallett uint64_t undflw : 4; 7191215976Sjmallett uint64_t reserved_6_7 : 2; 7192215976Sjmallett uint64_t xscol : 4; 7193215976Sjmallett uint64_t xsdef : 4; 7194215976Sjmallett uint64_t late_col : 4; 7195215976Sjmallett uint64_t reserved_20_63 : 44; 7196215976Sjmallett#endif 7197215976Sjmallett } cn38xx; 7198215976Sjmallett struct cvmx_gmxx_tx_int_en_cn38xxp2 7199215976Sjmallett { 7200215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7201215976Sjmallett uint64_t reserved_16_63 : 48; 7202215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7203215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7204215976Sjmallett uint64_t reserved_6_7 : 2; 7205215976Sjmallett uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ 7206215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7207215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7208215976Sjmallett#else 7209215976Sjmallett uint64_t pko_nxa : 1; 7210215976Sjmallett uint64_t ncb_nxa : 1; 7211215976Sjmallett uint64_t undflw : 4; 7212215976Sjmallett uint64_t reserved_6_7 : 2; 7213215976Sjmallett uint64_t xscol : 4; 7214215976Sjmallett uint64_t xsdef : 4; 7215215976Sjmallett uint64_t reserved_16_63 : 48; 7216215976Sjmallett#endif 7217215976Sjmallett } cn38xxp2; 7218215976Sjmallett struct cvmx_gmxx_tx_int_en_cn30xx cn50xx; 7219215976Sjmallett struct cvmx_gmxx_tx_int_en_cn52xx 7220215976Sjmallett { 7221215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7222215976Sjmallett uint64_t reserved_20_63 : 44; 7223215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7224215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7225215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7226215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7227215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7228215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7229215976Sjmallett uint64_t reserved_6_7 : 2; 7230215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7231215976Sjmallett uint64_t reserved_1_1 : 1; 7232215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7233215976Sjmallett#else 7234215976Sjmallett uint64_t pko_nxa : 1; 7235215976Sjmallett uint64_t reserved_1_1 : 1; 7236215976Sjmallett uint64_t undflw : 4; 7237215976Sjmallett uint64_t reserved_6_7 : 2; 7238215976Sjmallett uint64_t xscol : 4; 7239215976Sjmallett uint64_t xsdef : 4; 7240215976Sjmallett uint64_t late_col : 4; 7241215976Sjmallett uint64_t reserved_20_63 : 44; 7242215976Sjmallett#endif 7243215976Sjmallett } cn52xx; 7244215976Sjmallett struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1; 7245215976Sjmallett struct cvmx_gmxx_tx_int_en_cn52xx cn56xx; 7246215976Sjmallett struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1; 7247215976Sjmallett struct cvmx_gmxx_tx_int_en_cn38xx cn58xx; 7248215976Sjmallett struct cvmx_gmxx_tx_int_en_cn38xx cn58xxp1; 7249215976Sjmallett struct cvmx_gmxx_tx_int_en_cn63xx 7250215976Sjmallett { 7251215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7252215976Sjmallett uint64_t reserved_24_63 : 40; 7253215976Sjmallett uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be 7254215976Sjmallett sent due to XSCOL */ 7255215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7256215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7257215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7258215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7259215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7260215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7261215976Sjmallett uint64_t reserved_6_7 : 2; 7262215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7263215976Sjmallett uint64_t reserved_1_1 : 1; 7264215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7265215976Sjmallett#else 7266215976Sjmallett uint64_t pko_nxa : 1; 7267215976Sjmallett uint64_t reserved_1_1 : 1; 7268215976Sjmallett uint64_t undflw : 4; 7269215976Sjmallett uint64_t reserved_6_7 : 2; 7270215976Sjmallett uint64_t xscol : 4; 7271215976Sjmallett uint64_t xsdef : 4; 7272215976Sjmallett uint64_t late_col : 4; 7273215976Sjmallett uint64_t ptp_lost : 4; 7274215976Sjmallett uint64_t reserved_24_63 : 40; 7275215976Sjmallett#endif 7276215976Sjmallett } cn63xx; 7277215976Sjmallett struct cvmx_gmxx_tx_int_en_cn63xx cn63xxp1; 7278215976Sjmallett}; 7279215976Sjmalletttypedef union cvmx_gmxx_tx_int_en cvmx_gmxx_tx_int_en_t; 7280215976Sjmallett 7281215976Sjmallett/** 7282215976Sjmallett * cvmx_gmx#_tx_int_reg 7283215976Sjmallett * 7284215976Sjmallett * GMX_TX_INT_REG = Interrupt Register 7285215976Sjmallett * 7286215976Sjmallett * 7287215976Sjmallett * Notes: 7288215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used. 7289215976Sjmallett * 7290215976Sjmallett */ 7291215976Sjmallettunion cvmx_gmxx_tx_int_reg 7292215976Sjmallett{ 7293215976Sjmallett uint64_t u64; 7294215976Sjmallett struct cvmx_gmxx_tx_int_reg_s 7295215976Sjmallett { 7296215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7297215976Sjmallett uint64_t reserved_24_63 : 40; 7298215976Sjmallett uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be 7299215976Sjmallett sent due to XSCOL */ 7300215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7301215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7302215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7303215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7304215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7305215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7306215976Sjmallett uint64_t reserved_6_7 : 2; 7307215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7308215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7309215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7310215976Sjmallett#else 7311215976Sjmallett uint64_t pko_nxa : 1; 7312215976Sjmallett uint64_t ncb_nxa : 1; 7313215976Sjmallett uint64_t undflw : 4; 7314215976Sjmallett uint64_t reserved_6_7 : 2; 7315215976Sjmallett uint64_t xscol : 4; 7316215976Sjmallett uint64_t xsdef : 4; 7317215976Sjmallett uint64_t late_col : 4; 7318215976Sjmallett uint64_t ptp_lost : 4; 7319215976Sjmallett uint64_t reserved_24_63 : 40; 7320215976Sjmallett#endif 7321215976Sjmallett } s; 7322215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn30xx 7323215976Sjmallett { 7324215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7325215976Sjmallett uint64_t reserved_19_63 : 45; 7326215976Sjmallett uint64_t late_col : 3; /**< TX Late Collision */ 7327215976Sjmallett uint64_t reserved_15_15 : 1; 7328215976Sjmallett uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7329215976Sjmallett uint64_t reserved_11_11 : 1; 7330215976Sjmallett uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7331215976Sjmallett uint64_t reserved_5_7 : 3; 7332215976Sjmallett uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ 7333215976Sjmallett uint64_t reserved_1_1 : 1; 7334215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7335215976Sjmallett#else 7336215976Sjmallett uint64_t pko_nxa : 1; 7337215976Sjmallett uint64_t reserved_1_1 : 1; 7338215976Sjmallett uint64_t undflw : 3; 7339215976Sjmallett uint64_t reserved_5_7 : 3; 7340215976Sjmallett uint64_t xscol : 3; 7341215976Sjmallett uint64_t reserved_11_11 : 1; 7342215976Sjmallett uint64_t xsdef : 3; 7343215976Sjmallett uint64_t reserved_15_15 : 1; 7344215976Sjmallett uint64_t late_col : 3; 7345215976Sjmallett uint64_t reserved_19_63 : 45; 7346215976Sjmallett#endif 7347215976Sjmallett } cn30xx; 7348215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn31xx 7349215976Sjmallett { 7350215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7351215976Sjmallett uint64_t reserved_15_63 : 49; 7352215976Sjmallett uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7353215976Sjmallett uint64_t reserved_11_11 : 1; 7354215976Sjmallett uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7355215976Sjmallett uint64_t reserved_5_7 : 3; 7356215976Sjmallett uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ 7357215976Sjmallett uint64_t reserved_1_1 : 1; 7358215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7359215976Sjmallett#else 7360215976Sjmallett uint64_t pko_nxa : 1; 7361215976Sjmallett uint64_t reserved_1_1 : 1; 7362215976Sjmallett uint64_t undflw : 3; 7363215976Sjmallett uint64_t reserved_5_7 : 3; 7364215976Sjmallett uint64_t xscol : 3; 7365215976Sjmallett uint64_t reserved_11_11 : 1; 7366215976Sjmallett uint64_t xsdef : 3; 7367215976Sjmallett uint64_t reserved_15_63 : 49; 7368215976Sjmallett#endif 7369215976Sjmallett } cn31xx; 7370215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn38xx 7371215976Sjmallett { 7372215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7373215976Sjmallett uint64_t reserved_20_63 : 44; 7374215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7375215976Sjmallett (PASS3 only) */ 7376215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7377215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7378215976Sjmallett uint64_t reserved_6_7 : 2; 7379215976Sjmallett uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ 7380215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7381215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7382215976Sjmallett#else 7383215976Sjmallett uint64_t pko_nxa : 1; 7384215976Sjmallett uint64_t ncb_nxa : 1; 7385215976Sjmallett uint64_t undflw : 4; 7386215976Sjmallett uint64_t reserved_6_7 : 2; 7387215976Sjmallett uint64_t xscol : 4; 7388215976Sjmallett uint64_t xsdef : 4; 7389215976Sjmallett uint64_t late_col : 4; 7390215976Sjmallett uint64_t reserved_20_63 : 44; 7391215976Sjmallett#endif 7392215976Sjmallett } cn38xx; 7393215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn38xxp2 7394215976Sjmallett { 7395215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7396215976Sjmallett uint64_t reserved_16_63 : 48; 7397215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */ 7398215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */ 7399215976Sjmallett uint64_t reserved_6_7 : 2; 7400215976Sjmallett uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ 7401215976Sjmallett uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ 7402215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7403215976Sjmallett#else 7404215976Sjmallett uint64_t pko_nxa : 1; 7405215976Sjmallett uint64_t ncb_nxa : 1; 7406215976Sjmallett uint64_t undflw : 4; 7407215976Sjmallett uint64_t reserved_6_7 : 2; 7408215976Sjmallett uint64_t xscol : 4; 7409215976Sjmallett uint64_t xsdef : 4; 7410215976Sjmallett uint64_t reserved_16_63 : 48; 7411215976Sjmallett#endif 7412215976Sjmallett } cn38xxp2; 7413215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx; 7414215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn52xx 7415215976Sjmallett { 7416215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7417215976Sjmallett uint64_t reserved_20_63 : 44; 7418215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7419215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7420215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7421215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7422215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7423215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7424215976Sjmallett uint64_t reserved_6_7 : 2; 7425215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7426215976Sjmallett uint64_t reserved_1_1 : 1; 7427215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7428215976Sjmallett#else 7429215976Sjmallett uint64_t pko_nxa : 1; 7430215976Sjmallett uint64_t reserved_1_1 : 1; 7431215976Sjmallett uint64_t undflw : 4; 7432215976Sjmallett uint64_t reserved_6_7 : 2; 7433215976Sjmallett uint64_t xscol : 4; 7434215976Sjmallett uint64_t xsdef : 4; 7435215976Sjmallett uint64_t late_col : 4; 7436215976Sjmallett uint64_t reserved_20_63 : 44; 7437215976Sjmallett#endif 7438215976Sjmallett } cn52xx; 7439215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1; 7440215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx; 7441215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1; 7442215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn38xx cn58xx; 7443215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn38xx cn58xxp1; 7444215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn63xx 7445215976Sjmallett { 7446215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7447215976Sjmallett uint64_t reserved_24_63 : 40; 7448215976Sjmallett uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be 7449215976Sjmallett sent due to XSCOL */ 7450215976Sjmallett uint64_t late_col : 4; /**< TX Late Collision 7451215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7452215976Sjmallett uint64_t xsdef : 4; /**< TX Excessive deferral 7453215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7454215976Sjmallett uint64_t xscol : 4; /**< TX Excessive collisions 7455215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7456215976Sjmallett uint64_t reserved_6_7 : 2; 7457215976Sjmallett uint64_t undflw : 4; /**< TX Underflow */ 7458215976Sjmallett uint64_t reserved_1_1 : 1; 7459215976Sjmallett uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ 7460215976Sjmallett#else 7461215976Sjmallett uint64_t pko_nxa : 1; 7462215976Sjmallett uint64_t reserved_1_1 : 1; 7463215976Sjmallett uint64_t undflw : 4; 7464215976Sjmallett uint64_t reserved_6_7 : 2; 7465215976Sjmallett uint64_t xscol : 4; 7466215976Sjmallett uint64_t xsdef : 4; 7467215976Sjmallett uint64_t late_col : 4; 7468215976Sjmallett uint64_t ptp_lost : 4; 7469215976Sjmallett uint64_t reserved_24_63 : 40; 7470215976Sjmallett#endif 7471215976Sjmallett } cn63xx; 7472215976Sjmallett struct cvmx_gmxx_tx_int_reg_cn63xx cn63xxp1; 7473215976Sjmallett}; 7474215976Sjmalletttypedef union cvmx_gmxx_tx_int_reg cvmx_gmxx_tx_int_reg_t; 7475215976Sjmallett 7476215976Sjmallett/** 7477215976Sjmallett * cvmx_gmx#_tx_jam 7478215976Sjmallett * 7479215976Sjmallett * GMX_TX_JAM = Packet TX Jam Pattern 7480215976Sjmallett * 7481215976Sjmallett */ 7482215976Sjmallettunion cvmx_gmxx_tx_jam 7483215976Sjmallett{ 7484215976Sjmallett uint64_t u64; 7485215976Sjmallett struct cvmx_gmxx_tx_jam_s 7486215976Sjmallett { 7487215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7488215976Sjmallett uint64_t reserved_8_63 : 56; 7489215976Sjmallett uint64_t jam : 8; /**< Jam pattern 7490215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7491215976Sjmallett#else 7492215976Sjmallett uint64_t jam : 8; 7493215976Sjmallett uint64_t reserved_8_63 : 56; 7494215976Sjmallett#endif 7495215976Sjmallett } s; 7496215976Sjmallett struct cvmx_gmxx_tx_jam_s cn30xx; 7497215976Sjmallett struct cvmx_gmxx_tx_jam_s cn31xx; 7498215976Sjmallett struct cvmx_gmxx_tx_jam_s cn38xx; 7499215976Sjmallett struct cvmx_gmxx_tx_jam_s cn38xxp2; 7500215976Sjmallett struct cvmx_gmxx_tx_jam_s cn50xx; 7501215976Sjmallett struct cvmx_gmxx_tx_jam_s cn52xx; 7502215976Sjmallett struct cvmx_gmxx_tx_jam_s cn52xxp1; 7503215976Sjmallett struct cvmx_gmxx_tx_jam_s cn56xx; 7504215976Sjmallett struct cvmx_gmxx_tx_jam_s cn56xxp1; 7505215976Sjmallett struct cvmx_gmxx_tx_jam_s cn58xx; 7506215976Sjmallett struct cvmx_gmxx_tx_jam_s cn58xxp1; 7507215976Sjmallett struct cvmx_gmxx_tx_jam_s cn63xx; 7508215976Sjmallett struct cvmx_gmxx_tx_jam_s cn63xxp1; 7509215976Sjmallett}; 7510215976Sjmalletttypedef union cvmx_gmxx_tx_jam cvmx_gmxx_tx_jam_t; 7511215976Sjmallett 7512215976Sjmallett/** 7513215976Sjmallett * cvmx_gmx#_tx_lfsr 7514215976Sjmallett * 7515215976Sjmallett * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff 7516215976Sjmallett * 7517215976Sjmallett */ 7518215976Sjmallettunion cvmx_gmxx_tx_lfsr 7519215976Sjmallett{ 7520215976Sjmallett uint64_t u64; 7521215976Sjmallett struct cvmx_gmxx_tx_lfsr_s 7522215976Sjmallett { 7523215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7524215976Sjmallett uint64_t reserved_16_63 : 48; 7525215976Sjmallett uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random 7526215976Sjmallett numbers to compute truncated binary exponential 7527215976Sjmallett backoff. 7528215976Sjmallett (SGMII/1000Base-X half-duplex only) */ 7529215976Sjmallett#else 7530215976Sjmallett uint64_t lfsr : 16; 7531215976Sjmallett uint64_t reserved_16_63 : 48; 7532215976Sjmallett#endif 7533215976Sjmallett } s; 7534215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn30xx; 7535215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn31xx; 7536215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn38xx; 7537215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn38xxp2; 7538215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn50xx; 7539215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn52xx; 7540215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn52xxp1; 7541215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn56xx; 7542215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn56xxp1; 7543215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn58xx; 7544215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn58xxp1; 7545215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn63xx; 7546215976Sjmallett struct cvmx_gmxx_tx_lfsr_s cn63xxp1; 7547215976Sjmallett}; 7548215976Sjmalletttypedef union cvmx_gmxx_tx_lfsr cvmx_gmxx_tx_lfsr_t; 7549215976Sjmallett 7550215976Sjmallett/** 7551215976Sjmallett * cvmx_gmx#_tx_ovr_bp 7552215976Sjmallett * 7553215976Sjmallett * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure 7554215976Sjmallett * 7555215976Sjmallett * 7556215976Sjmallett * Notes: 7557215976Sjmallett * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used. 7558215976Sjmallett * 7559215976Sjmallett * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero 7560215976Sjmallett * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol 7561215976Sjmallett * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by 7562215976Sjmallett * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure 7563215976Sjmallett * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2 7564215976Sjmallett * protocol. 7565215976Sjmallett */ 7566215976Sjmallettunion cvmx_gmxx_tx_ovr_bp 7567215976Sjmallett{ 7568215976Sjmallett uint64_t u64; 7569215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s 7570215976Sjmallett { 7571215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7572215976Sjmallett uint64_t reserved_48_63 : 16; 7573215976Sjmallett uint64_t tx_prt_bp : 16; /**< Per port BP sent to PKO 7574215976Sjmallett 0=Port is available 7575215976Sjmallett 1=Port should be back pressured 7576215976Sjmallett TX_PRT_BP should not be set until 7577215976Sjmallett GMX_INF_MODE[EN] has been enabled */ 7578215976Sjmallett uint64_t reserved_12_31 : 20; 7579215976Sjmallett uint64_t en : 4; /**< Per port Enable back pressure override */ 7580215976Sjmallett uint64_t bp : 4; /**< Per port BackPressure status to use 7581215976Sjmallett 0=Port is available 7582215976Sjmallett 1=Port should be back pressured */ 7583215976Sjmallett uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */ 7584215976Sjmallett#else 7585215976Sjmallett uint64_t ign_full : 4; 7586215976Sjmallett uint64_t bp : 4; 7587215976Sjmallett uint64_t en : 4; 7588215976Sjmallett uint64_t reserved_12_31 : 20; 7589215976Sjmallett uint64_t tx_prt_bp : 16; 7590215976Sjmallett uint64_t reserved_48_63 : 16; 7591215976Sjmallett#endif 7592215976Sjmallett } s; 7593215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn30xx 7594215976Sjmallett { 7595215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7596215976Sjmallett uint64_t reserved_11_63 : 53; 7597215976Sjmallett uint64_t en : 3; /**< Per port Enable back pressure override */ 7598215976Sjmallett uint64_t reserved_7_7 : 1; 7599215976Sjmallett uint64_t bp : 3; /**< Per port BackPressure status to use 7600215976Sjmallett 0=Port is available 7601215976Sjmallett 1=Port should be back pressured */ 7602215976Sjmallett uint64_t reserved_3_3 : 1; 7603215976Sjmallett uint64_t ign_full : 3; /**< Ignore the RX FIFO full when computing BP */ 7604215976Sjmallett#else 7605215976Sjmallett uint64_t ign_full : 3; 7606215976Sjmallett uint64_t reserved_3_3 : 1; 7607215976Sjmallett uint64_t bp : 3; 7608215976Sjmallett uint64_t reserved_7_7 : 1; 7609215976Sjmallett uint64_t en : 3; 7610215976Sjmallett uint64_t reserved_11_63 : 53; 7611215976Sjmallett#endif 7612215976Sjmallett } cn30xx; 7613215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx; 7614215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn38xx 7615215976Sjmallett { 7616215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7617215976Sjmallett uint64_t reserved_12_63 : 52; 7618215976Sjmallett uint64_t en : 4; /**< Per port Enable back pressure override */ 7619215976Sjmallett uint64_t bp : 4; /**< Per port BackPressure status to use 7620215976Sjmallett 0=Port is available 7621215976Sjmallett 1=Port should be back pressured */ 7622215976Sjmallett uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */ 7623215976Sjmallett#else 7624215976Sjmallett uint64_t ign_full : 4; 7625215976Sjmallett uint64_t bp : 4; 7626215976Sjmallett uint64_t en : 4; 7627215976Sjmallett uint64_t reserved_12_63 : 52; 7628215976Sjmallett#endif 7629215976Sjmallett } cn38xx; 7630215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2; 7631215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx; 7632215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn52xx; 7633215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1; 7634215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn56xx; 7635215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1; 7636215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx; 7637215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1; 7638215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn63xx; 7639215976Sjmallett struct cvmx_gmxx_tx_ovr_bp_s cn63xxp1; 7640215976Sjmallett}; 7641215976Sjmalletttypedef union cvmx_gmxx_tx_ovr_bp cvmx_gmxx_tx_ovr_bp_t; 7642215976Sjmallett 7643215976Sjmallett/** 7644215976Sjmallett * cvmx_gmx#_tx_pause_pkt_dmac 7645215976Sjmallett * 7646215976Sjmallett * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field 7647215976Sjmallett * 7648215976Sjmallett */ 7649215976Sjmallettunion cvmx_gmxx_tx_pause_pkt_dmac 7650215976Sjmallett{ 7651215976Sjmallett uint64_t u64; 7652215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s 7653215976Sjmallett { 7654215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7655215976Sjmallett uint64_t reserved_48_63 : 16; 7656215976Sjmallett uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */ 7657215976Sjmallett#else 7658215976Sjmallett uint64_t dmac : 48; 7659215976Sjmallett uint64_t reserved_48_63 : 16; 7660215976Sjmallett#endif 7661215976Sjmallett } s; 7662215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx; 7663215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx; 7664215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx; 7665215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2; 7666215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx; 7667215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx; 7668215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1; 7669215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx; 7670215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1; 7671215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx; 7672215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1; 7673215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xx; 7674215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xxp1; 7675215976Sjmallett}; 7676215976Sjmalletttypedef union cvmx_gmxx_tx_pause_pkt_dmac cvmx_gmxx_tx_pause_pkt_dmac_t; 7677215976Sjmallett 7678215976Sjmallett/** 7679215976Sjmallett * cvmx_gmx#_tx_pause_pkt_type 7680215976Sjmallett * 7681215976Sjmallett * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field 7682215976Sjmallett * 7683215976Sjmallett */ 7684215976Sjmallettunion cvmx_gmxx_tx_pause_pkt_type 7685215976Sjmallett{ 7686215976Sjmallett uint64_t u64; 7687215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s 7688215976Sjmallett { 7689215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7690215976Sjmallett uint64_t reserved_16_63 : 48; 7691215976Sjmallett uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */ 7692215976Sjmallett#else 7693215976Sjmallett uint64_t type : 16; 7694215976Sjmallett uint64_t reserved_16_63 : 48; 7695215976Sjmallett#endif 7696215976Sjmallett } s; 7697215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx; 7698215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx; 7699215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx; 7700215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2; 7701215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx; 7702215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx; 7703215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1; 7704215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx; 7705215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1; 7706215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx; 7707215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1; 7708215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn63xx; 7709215976Sjmallett struct cvmx_gmxx_tx_pause_pkt_type_s cn63xxp1; 7710215976Sjmallett}; 7711215976Sjmalletttypedef union cvmx_gmxx_tx_pause_pkt_type cvmx_gmxx_tx_pause_pkt_type_t; 7712215976Sjmallett 7713215976Sjmallett/** 7714215976Sjmallett * cvmx_gmx#_tx_prts 7715215976Sjmallett * 7716215976Sjmallett * Common 7717215976Sjmallett * 7718215976Sjmallett * 7719215976Sjmallett * GMX_TX_PRTS = TX Ports 7720215976Sjmallett * 7721215976Sjmallett * Notes: 7722215976Sjmallett * * The value programmed for PRTS is the number of the highest architected 7723215976Sjmallett * port number on the interface, plus 1. For example, if port 2 is the 7724215976Sjmallett * highest architected port, then the programmed value should be 3 since 7725215976Sjmallett * there are 3 ports in the system - 0, 1, and 2. 7726215976Sjmallett */ 7727215976Sjmallettunion cvmx_gmxx_tx_prts 7728215976Sjmallett{ 7729215976Sjmallett uint64_t u64; 7730215976Sjmallett struct cvmx_gmxx_tx_prts_s 7731215976Sjmallett { 7732215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7733215976Sjmallett uint64_t reserved_5_63 : 59; 7734215976Sjmallett uint64_t prts : 5; /**< Number of ports allowed on the interface 7735215976Sjmallett (SGMII/1000Base-X only) */ 7736215976Sjmallett#else 7737215976Sjmallett uint64_t prts : 5; 7738215976Sjmallett uint64_t reserved_5_63 : 59; 7739215976Sjmallett#endif 7740215976Sjmallett } s; 7741215976Sjmallett struct cvmx_gmxx_tx_prts_s cn30xx; 7742215976Sjmallett struct cvmx_gmxx_tx_prts_s cn31xx; 7743215976Sjmallett struct cvmx_gmxx_tx_prts_s cn38xx; 7744215976Sjmallett struct cvmx_gmxx_tx_prts_s cn38xxp2; 7745215976Sjmallett struct cvmx_gmxx_tx_prts_s cn50xx; 7746215976Sjmallett struct cvmx_gmxx_tx_prts_s cn52xx; 7747215976Sjmallett struct cvmx_gmxx_tx_prts_s cn52xxp1; 7748215976Sjmallett struct cvmx_gmxx_tx_prts_s cn56xx; 7749215976Sjmallett struct cvmx_gmxx_tx_prts_s cn56xxp1; 7750215976Sjmallett struct cvmx_gmxx_tx_prts_s cn58xx; 7751215976Sjmallett struct cvmx_gmxx_tx_prts_s cn58xxp1; 7752215976Sjmallett struct cvmx_gmxx_tx_prts_s cn63xx; 7753215976Sjmallett struct cvmx_gmxx_tx_prts_s cn63xxp1; 7754215976Sjmallett}; 7755215976Sjmalletttypedef union cvmx_gmxx_tx_prts cvmx_gmxx_tx_prts_t; 7756215976Sjmallett 7757215976Sjmallett/** 7758215976Sjmallett * cvmx_gmx#_tx_spi_ctl 7759215976Sjmallett * 7760215976Sjmallett * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4 7761215976Sjmallett * 7762215976Sjmallett */ 7763215976Sjmallettunion cvmx_gmxx_tx_spi_ctl 7764215976Sjmallett{ 7765215976Sjmallett uint64_t u64; 7766215976Sjmallett struct cvmx_gmxx_tx_spi_ctl_s 7767215976Sjmallett { 7768215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7769215976Sjmallett uint64_t reserved_2_63 : 62; 7770215976Sjmallett uint64_t tpa_clr : 1; /**< TPA Clear Mode 7771215976Sjmallett Clear credit counter when satisifed status */ 7772215976Sjmallett uint64_t cont_pkt : 1; /**< Contiguous Packet Mode 7773215976Sjmallett Finish one packet before switching to another 7774215976Sjmallett Cannot be set in Spi4 pass-through mode */ 7775215976Sjmallett#else 7776215976Sjmallett uint64_t cont_pkt : 1; 7777215976Sjmallett uint64_t tpa_clr : 1; 7778215976Sjmallett uint64_t reserved_2_63 : 62; 7779215976Sjmallett#endif 7780215976Sjmallett } s; 7781215976Sjmallett struct cvmx_gmxx_tx_spi_ctl_s cn38xx; 7782215976Sjmallett struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2; 7783215976Sjmallett struct cvmx_gmxx_tx_spi_ctl_s cn58xx; 7784215976Sjmallett struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1; 7785215976Sjmallett}; 7786215976Sjmalletttypedef union cvmx_gmxx_tx_spi_ctl cvmx_gmxx_tx_spi_ctl_t; 7787215976Sjmallett 7788215976Sjmallett/** 7789215976Sjmallett * cvmx_gmx#_tx_spi_drain 7790215976Sjmallett * 7791215976Sjmallett * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO 7792215976Sjmallett * 7793215976Sjmallett */ 7794215976Sjmallettunion cvmx_gmxx_tx_spi_drain 7795215976Sjmallett{ 7796215976Sjmallett uint64_t u64; 7797215976Sjmallett struct cvmx_gmxx_tx_spi_drain_s 7798215976Sjmallett { 7799215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7800215976Sjmallett uint64_t reserved_16_63 : 48; 7801215976Sjmallett uint64_t drain : 16; /**< Per port drain control 7802215976Sjmallett 0=Normal operation 7803215976Sjmallett 1=GMX TX will be popped, but no valid data will 7804215976Sjmallett be sent to SPX. Credits are correctly returned 7805215976Sjmallett to PKO. STX_IGN_CAL should be set to ignore 7806215976Sjmallett TPA and not stall due to back-pressure. 7807215976Sjmallett (PASS3 only) */ 7808215976Sjmallett#else 7809215976Sjmallett uint64_t drain : 16; 7810215976Sjmallett uint64_t reserved_16_63 : 48; 7811215976Sjmallett#endif 7812215976Sjmallett } s; 7813215976Sjmallett struct cvmx_gmxx_tx_spi_drain_s cn38xx; 7814215976Sjmallett struct cvmx_gmxx_tx_spi_drain_s cn58xx; 7815215976Sjmallett struct cvmx_gmxx_tx_spi_drain_s cn58xxp1; 7816215976Sjmallett}; 7817215976Sjmalletttypedef union cvmx_gmxx_tx_spi_drain cvmx_gmxx_tx_spi_drain_t; 7818215976Sjmallett 7819215976Sjmallett/** 7820215976Sjmallett * cvmx_gmx#_tx_spi_max 7821215976Sjmallett * 7822215976Sjmallett * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX 7823215976Sjmallett * 7824215976Sjmallett */ 7825215976Sjmallettunion cvmx_gmxx_tx_spi_max 7826215976Sjmallett{ 7827215976Sjmallett uint64_t u64; 7828215976Sjmallett struct cvmx_gmxx_tx_spi_max_s 7829215976Sjmallett { 7830215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7831215976Sjmallett uint64_t reserved_23_63 : 41; 7832215976Sjmallett uint64_t slice : 7; /**< Number of 16B blocks to transmit in a burst before 7833215976Sjmallett switching to the next port. SLICE does not always 7834215976Sjmallett limit the burst length transmitted by OCTEON. 7835215976Sjmallett Depending on the traffic pattern and 7836215976Sjmallett GMX_TX_SPI_ROUND programming, the next port could 7837215976Sjmallett be the same as the current port. In this case, 7838215976Sjmallett OCTEON may merge multiple sub-SLICE bursts into 7839215976Sjmallett one contiguous burst that is longer than SLICE 7840215976Sjmallett (as long as the burst does not cross a packet 7841215976Sjmallett boundary). 7842215976Sjmallett SLICE must be programmed to be >= 7843215976Sjmallett GMX_TX_SPI_THRESH[THRESH] 7844215976Sjmallett If SLICE==0, then the transmitter will tend to 7845215976Sjmallett send the complete packet. The port will only 7846215976Sjmallett switch if credits are exhausted or PKO cannot 7847215976Sjmallett keep up. 7848215976Sjmallett (90nm ONLY) */ 7849215976Sjmallett uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */ 7850215976Sjmallett uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec) 7851215976Sjmallett MAX1 >= GMX_TX_SPI_THRESH[THRESH] */ 7852215976Sjmallett#else 7853215976Sjmallett uint64_t max1 : 8; 7854215976Sjmallett uint64_t max2 : 8; 7855215976Sjmallett uint64_t slice : 7; 7856215976Sjmallett uint64_t reserved_23_63 : 41; 7857215976Sjmallett#endif 7858215976Sjmallett } s; 7859215976Sjmallett struct cvmx_gmxx_tx_spi_max_cn38xx 7860215976Sjmallett { 7861215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7862215976Sjmallett uint64_t reserved_16_63 : 48; 7863215976Sjmallett uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */ 7864215976Sjmallett uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec) 7865215976Sjmallett MAX1 >= GMX_TX_SPI_THRESH[THRESH] */ 7866215976Sjmallett#else 7867215976Sjmallett uint64_t max1 : 8; 7868215976Sjmallett uint64_t max2 : 8; 7869215976Sjmallett uint64_t reserved_16_63 : 48; 7870215976Sjmallett#endif 7871215976Sjmallett } cn38xx; 7872215976Sjmallett struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2; 7873215976Sjmallett struct cvmx_gmxx_tx_spi_max_s cn58xx; 7874215976Sjmallett struct cvmx_gmxx_tx_spi_max_s cn58xxp1; 7875215976Sjmallett}; 7876215976Sjmalletttypedef union cvmx_gmxx_tx_spi_max cvmx_gmxx_tx_spi_max_t; 7877215976Sjmallett 7878215976Sjmallett/** 7879215976Sjmallett * cvmx_gmx#_tx_spi_round# 7880215976Sjmallett * 7881215976Sjmallett * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration 7882215976Sjmallett * 7883215976Sjmallett */ 7884215976Sjmallettunion cvmx_gmxx_tx_spi_roundx 7885215976Sjmallett{ 7886215976Sjmallett uint64_t u64; 7887215976Sjmallett struct cvmx_gmxx_tx_spi_roundx_s 7888215976Sjmallett { 7889215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7890215976Sjmallett uint64_t reserved_16_63 : 48; 7891215976Sjmallett uint64_t round : 16; /**< Which Spi ports participate in each arbitration 7892215976Sjmallett round. Each bit corresponds to a spi port 7893215976Sjmallett - 0: this port will arb in this round 7894215976Sjmallett - 1: this port will not arb in this round 7895215976Sjmallett (90nm ONLY) */ 7896215976Sjmallett#else 7897215976Sjmallett uint64_t round : 16; 7898215976Sjmallett uint64_t reserved_16_63 : 48; 7899215976Sjmallett#endif 7900215976Sjmallett } s; 7901215976Sjmallett struct cvmx_gmxx_tx_spi_roundx_s cn58xx; 7902215976Sjmallett struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1; 7903215976Sjmallett}; 7904215976Sjmalletttypedef union cvmx_gmxx_tx_spi_roundx cvmx_gmxx_tx_spi_roundx_t; 7905215976Sjmallett 7906215976Sjmallett/** 7907215976Sjmallett * cvmx_gmx#_tx_spi_thresh 7908215976Sjmallett * 7909215976Sjmallett * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold 7910215976Sjmallett * 7911215976Sjmallett * 7912215976Sjmallett * Notes: 7913215976Sjmallett * Note: zero will map to 0x20 7914215976Sjmallett * 7915215976Sjmallett * This will normally creates Spi4 traffic bursts at least THRESH in length. 7916215976Sjmallett * If dclk > eclk, then this rule may not always hold and Octeon may split 7917215976Sjmallett * transfers into smaller bursts - some of which could be as short as 16B. 7918215976Sjmallett * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is 7919215976Sjmallett * not a multiple of 16B. 7920215976Sjmallett */ 7921215976Sjmallettunion cvmx_gmxx_tx_spi_thresh 7922215976Sjmallett{ 7923215976Sjmallett uint64_t u64; 7924215976Sjmallett struct cvmx_gmxx_tx_spi_thresh_s 7925215976Sjmallett { 7926215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7927215976Sjmallett uint64_t reserved_6_63 : 58; 7928215976Sjmallett uint64_t thresh : 6; /**< Transmit threshold in 16B blocks - cannot be zero 7929215976Sjmallett THRESH <= TX_FIFO size (in non-passthrough mode) 7930215976Sjmallett THRESH <= TX_FIFO size-2 (in passthrough mode) 7931215976Sjmallett THRESH <= GMX_TX_SPI_MAX[MAX1] 7932215976Sjmallett THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it 7933215976Sjmallett possible for Octeon to send a Spi4 data burst of 7934215976Sjmallett MAX2 <= burst <= THRESH 16B ticks 7935215976Sjmallett GMX_TX_SPI_MAX[SLICE] must be programmed to be >= 7936215976Sjmallett THRESH */ 7937215976Sjmallett#else 7938215976Sjmallett uint64_t thresh : 6; 7939215976Sjmallett uint64_t reserved_6_63 : 58; 7940215976Sjmallett#endif 7941215976Sjmallett } s; 7942215976Sjmallett struct cvmx_gmxx_tx_spi_thresh_s cn38xx; 7943215976Sjmallett struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2; 7944215976Sjmallett struct cvmx_gmxx_tx_spi_thresh_s cn58xx; 7945215976Sjmallett struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1; 7946215976Sjmallett}; 7947215976Sjmalletttypedef union cvmx_gmxx_tx_spi_thresh cvmx_gmxx_tx_spi_thresh_t; 7948215976Sjmallett 7949215976Sjmallett/** 7950215976Sjmallett * cvmx_gmx#_tx_xaui_ctl 7951215976Sjmallett */ 7952215976Sjmallettunion cvmx_gmxx_tx_xaui_ctl 7953215976Sjmallett{ 7954215976Sjmallett uint64_t u64; 7955215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s 7956215976Sjmallett { 7957215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 7958215976Sjmallett uint64_t reserved_11_63 : 53; 7959215976Sjmallett uint64_t hg_pause_hgi : 2; /**< HGI Field for HW generated HiGig pause packets 7960215976Sjmallett (XAUI mode only) */ 7961215976Sjmallett uint64_t hg_en : 1; /**< Enable HiGig Mode 7962215976Sjmallett When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=12 7963215976Sjmallett the interface is in HiGig/HiGig+ mode and the 7964215976Sjmallett following must be set: 7965215976Sjmallett GMX_RX_FRM_CTL[PRE_CHK] == 0 7966215976Sjmallett GMX_RX_UDD_SKP[FCSSEL] == 0 7967215976Sjmallett GMX_RX_UDD_SKP[SKIP] == 12 7968215976Sjmallett GMX_TX_APPEND[PREAMBLE] == 0 7969215976Sjmallett When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=16 7970215976Sjmallett the interface is in HiGig2 mode and the 7971215976Sjmallett following must be set: 7972215976Sjmallett GMX_RX_FRM_CTL[PRE_CHK] == 0 7973215976Sjmallett GMX_RX_UDD_SKP[FCSSEL] == 0 7974215976Sjmallett GMX_RX_UDD_SKP[SKIP] == 16 7975215976Sjmallett GMX_TX_APPEND[PREAMBLE] == 0 7976215976Sjmallett GMX_PRT0_CBFC_CTL[RX_EN] == 0 7977215976Sjmallett GMX_PRT0_CBFC_CTL[TX_EN] == 0 7978215976Sjmallett (XAUI mode only) */ 7979215976Sjmallett uint64_t reserved_7_7 : 1; 7980215976Sjmallett uint64_t ls_byp : 1; /**< Bypass the link status as determined by the XGMII 7981215976Sjmallett receiver and set the link status of the 7982215976Sjmallett transmitter to LS. 7983215976Sjmallett (XAUI mode only) */ 7984215976Sjmallett uint64_t ls : 2; /**< Link Status 7985215976Sjmallett 0 = Link Ok 7986215976Sjmallett Link runs normally. RS passes MAC data to PCS 7987215976Sjmallett 1 = Local Fault 7988215976Sjmallett RS layer sends continuous remote fault 7989215976Sjmallett sequences. 7990215976Sjmallett 2 = Remote Fault 7991215976Sjmallett RS layer sends continuous idles sequences 7992215976Sjmallett 3 = Link Drain 7993215976Sjmallett RS layer drops full packets to allow GMX and 7994215976Sjmallett PKO to drain their FIFOs 7995215976Sjmallett (XAUI mode only) */ 7996215976Sjmallett uint64_t reserved_2_3 : 2; 7997215976Sjmallett uint64_t uni_en : 1; /**< Enable Unidirectional Mode (IEEE Clause 66) 7998215976Sjmallett (XAUI mode only) */ 7999215976Sjmallett uint64_t dic_en : 1; /**< Enable the deficit idle counter for IFG averaging 8000215976Sjmallett (XAUI mode only) */ 8001215976Sjmallett#else 8002215976Sjmallett uint64_t dic_en : 1; 8003215976Sjmallett uint64_t uni_en : 1; 8004215976Sjmallett uint64_t reserved_2_3 : 2; 8005215976Sjmallett uint64_t ls : 2; 8006215976Sjmallett uint64_t ls_byp : 1; 8007215976Sjmallett uint64_t reserved_7_7 : 1; 8008215976Sjmallett uint64_t hg_en : 1; 8009215976Sjmallett uint64_t hg_pause_hgi : 2; 8010215976Sjmallett uint64_t reserved_11_63 : 53; 8011215976Sjmallett#endif 8012215976Sjmallett } s; 8013215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn52xx; 8014215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1; 8015215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn56xx; 8016215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1; 8017215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn63xx; 8018215976Sjmallett struct cvmx_gmxx_tx_xaui_ctl_s cn63xxp1; 8019215976Sjmallett}; 8020215976Sjmalletttypedef union cvmx_gmxx_tx_xaui_ctl cvmx_gmxx_tx_xaui_ctl_t; 8021215976Sjmallett 8022215976Sjmallett/** 8023215976Sjmallett * cvmx_gmx#_xaui_ext_loopback 8024215976Sjmallett */ 8025215976Sjmallettunion cvmx_gmxx_xaui_ext_loopback 8026215976Sjmallett{ 8027215976Sjmallett uint64_t u64; 8028215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s 8029215976Sjmallett { 8030215976Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 8031215976Sjmallett uint64_t reserved_5_63 : 59; 8032215976Sjmallett uint64_t en : 1; /**< Loopback enable 8033215976Sjmallett Puts the packet interface in external loopback 8034215976Sjmallett mode on the XAUI bus in which the RX lines are 8035215976Sjmallett reflected on the TX lines. 8036215976Sjmallett (XAUI mode only) */ 8037215976Sjmallett uint64_t thresh : 4; /**< Threshhold on the TX FIFO 8038215976Sjmallett SW must only write the typical value. Any other 8039215976Sjmallett value will cause loopback mode not to function 8040215976Sjmallett correctly. 8041215976Sjmallett (XAUI mode only) */ 8042215976Sjmallett#else 8043215976Sjmallett uint64_t thresh : 4; 8044215976Sjmallett uint64_t en : 1; 8045215976Sjmallett uint64_t reserved_5_63 : 59; 8046215976Sjmallett#endif 8047215976Sjmallett } s; 8048215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn52xx; 8049215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1; 8050215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn56xx; 8051215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1; 8052215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn63xx; 8053215976Sjmallett struct cvmx_gmxx_xaui_ext_loopback_s cn63xxp1; 8054215976Sjmallett}; 8055215976Sjmalletttypedef union cvmx_gmxx_xaui_ext_loopback cvmx_gmxx_xaui_ext_loopback_t; 8056215976Sjmallett 8057215976Sjmallett#endif 8058