1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * cvmx-gmxx-defs.h
43 *
44 * Configuration and status register (CSR) type definitions for
45 * Octeon gmxx.
46 *
47 * This file is auto generated. Do not edit.
48 *
49 * <hr>$Revision$<hr>
50 *
51 */
52#ifndef __CVMX_GMXX_TYPEDEFS_H__
53#define __CVMX_GMXX_TYPEDEFS_H__
54
55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
57{
58	if (!(
59	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
60	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
61	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
62	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
63	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
64	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
65	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
66	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
67		cvmx_warn("CVMX_GMXX_BAD_REG(%lu) is invalid on this chip\n", block_id);
68	return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull;
69}
70#else
71#define CVMX_GMXX_BAD_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull)
72#endif
73#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
74static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
75{
76	if (!(
77	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
78	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
79	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
80	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
81	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
82	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
83	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
84	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
85		cvmx_warn("CVMX_GMXX_BIST(%lu) is invalid on this chip\n", block_id);
86	return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull;
87}
88#else
89#define CVMX_GMXX_BIST(block_id) (CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull)
90#endif
91#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
92static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
93{
94	if (!(
95	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
96	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
97	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
98		cvmx_warn("CVMX_GMXX_CLK_EN(%lu) is invalid on this chip\n", block_id);
99	return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull;
100}
101#else
102#define CVMX_GMXX_CLK_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull)
103#endif
104#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
105static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
106{
107	if (!(
108	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
109	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
110	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
111		cvmx_warn("CVMX_GMXX_HG2_CONTROL(%lu) is invalid on this chip\n", block_id);
112	return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull;
113}
114#else
115#define CVMX_GMXX_HG2_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull)
116#endif
117#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
118static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
119{
120	if (!(
121	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
122	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
123	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
124	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
125	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
126	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
127	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
128	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
129		cvmx_warn("CVMX_GMXX_INF_MODE(%lu) is invalid on this chip\n", block_id);
130	return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull;
131}
132#else
133#define CVMX_GMXX_INF_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull)
134#endif
135#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
136static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
137{
138	if (!(
139	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
140	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
141	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
142	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
143	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
144	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
145	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
146	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
147		cvmx_warn("CVMX_GMXX_NXA_ADR(%lu) is invalid on this chip\n", block_id);
148	return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull;
149}
150#else
151#define CVMX_GMXX_NXA_ADR(block_id) (CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull)
152#endif
153#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
154static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
155{
156	if (!(
157	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
158	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
159	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
160		cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
161	return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull;
162}
163#else
164#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull)
165#endif
166#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
167static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
168{
169	if (!(
170	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
171	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
172	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
173	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
174	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
175	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
176	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
177	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
178		cvmx_warn("CVMX_GMXX_PRTX_CFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
179	return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
180}
181#else
182#define CVMX_GMXX_PRTX_CFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
183#endif
184#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
185static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
186{
187	if (!(
188	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
189	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
190	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
191	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
192	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
193	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
194	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
195	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
196		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0(%lu,%lu) is invalid on this chip\n", offset, block_id);
197	return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
198}
199#else
200#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
201#endif
202#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
203static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
204{
205	if (!(
206	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
207	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
208	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
209	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
210	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
211	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
212	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
213	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
214		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1(%lu,%lu) is invalid on this chip\n", offset, block_id);
215	return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
216}
217#else
218#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
219#endif
220#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
222{
223	if (!(
224	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
225	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
226	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
227	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
228	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
229	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
230	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
231	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
232		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2(%lu,%lu) is invalid on this chip\n", offset, block_id);
233	return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
234}
235#else
236#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
237#endif
238#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
239static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
240{
241	if (!(
242	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
243	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
244	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
245	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
246	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
247	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
248	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
249	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
250		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3(%lu,%lu) is invalid on this chip\n", offset, block_id);
251	return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
252}
253#else
254#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
255#endif
256#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
257static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
258{
259	if (!(
260	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
261	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
262	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
263	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
264	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
265	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
266	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
267	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
268		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4(%lu,%lu) is invalid on this chip\n", offset, block_id);
269	return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
270}
271#else
272#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
273#endif
274#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
275static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
276{
277	if (!(
278	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
279	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
280	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
281	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
282	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
283	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
284	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
285	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
286		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5(%lu,%lu) is invalid on this chip\n", offset, block_id);
287	return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
288}
289#else
290#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
291#endif
292#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
293static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
294{
295	if (!(
296	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
297	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
298	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
299	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
300	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
301	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
302	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
303	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
304		cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
305	return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
306}
307#else
308#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
309#endif
310#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
311static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
312{
313	if (!(
314	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
315	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
316	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
317	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
318	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
319	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
320	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
321	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
322		cvmx_warn("CVMX_GMXX_RXX_ADR_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
323	return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
324}
325#else
326#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
327#endif
328#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
329static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
330{
331	if (!(
332	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
333	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
334	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
335	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
336	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
337	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
338	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
339	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
340		cvmx_warn("CVMX_GMXX_RXX_DECISION(%lu,%lu) is invalid on this chip\n", offset, block_id);
341	return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
342}
343#else
344#define CVMX_GMXX_RXX_DECISION(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
345#endif
346#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
347static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
348{
349	if (!(
350	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
351	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
352	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
353	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
354	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
355	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
356	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
357	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
358		cvmx_warn("CVMX_GMXX_RXX_FRM_CHK(%lu,%lu) is invalid on this chip\n", offset, block_id);
359	return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
360}
361#else
362#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
363#endif
364#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
365static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
366{
367	if (!(
368	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
369	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
370	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
371	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
372	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
373	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
374	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
375	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
376		cvmx_warn("CVMX_GMXX_RXX_FRM_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
377	return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
378}
379#else
380#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
381#endif
382#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
383static inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id)
384{
385	if (!(
386	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
387	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
388	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
389	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
390		cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id);
391	return CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
392}
393#else
394#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
395#endif
396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397static inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id)
398{
399	if (!(
400	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
401	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
402	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
403	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
404		cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id);
405	return CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
406}
407#else
408#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
409#endif
410#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
411static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
412{
413	if (!(
414	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
415	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
416	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
417	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
418	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
419	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
420	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
421	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
422		cvmx_warn("CVMX_GMXX_RXX_IFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
423	return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
424}
425#else
426#define CVMX_GMXX_RXX_IFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
427#endif
428#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
429static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
430{
431	if (!(
432	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
433	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
434	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
435	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
436	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
437	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
438	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
439	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
440		cvmx_warn("CVMX_GMXX_RXX_INT_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
441	return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
442}
443#else
444#define CVMX_GMXX_RXX_INT_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
445#endif
446#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
447static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
448{
449	if (!(
450	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
451	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
452	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
453	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
454	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
455	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
456	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
457	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
458		cvmx_warn("CVMX_GMXX_RXX_INT_REG(%lu,%lu) is invalid on this chip\n", offset, block_id);
459	return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
460}
461#else
462#define CVMX_GMXX_RXX_INT_REG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
463#endif
464#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
465static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
466{
467	if (!(
468	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
469	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
470	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
471	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
472	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
473	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
474	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
475	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
476		cvmx_warn("CVMX_GMXX_RXX_JABBER(%lu,%lu) is invalid on this chip\n", offset, block_id);
477	return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
478}
479#else
480#define CVMX_GMXX_RXX_JABBER(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
481#endif
482#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
483static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
484{
485	if (!(
486	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
487	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
488	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
489	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
490	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
491		cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
492	return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
493}
494#else
495#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
496#endif
497#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
498static inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id)
499{
500	if (!(
501	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
502	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
503	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
504	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
505	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
506		cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id);
507	return CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
508}
509#else
510#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
511#endif
512#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
513static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
514{
515	if (!(
516	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
517	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
518	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
519	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
520	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
521	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
522	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
523	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
524		cvmx_warn("CVMX_GMXX_RXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
525	return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
526}
527#else
528#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
529#endif
530#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
531static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
532{
533	if (!(
534	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
535	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
536	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
537	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
538	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
539	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
540	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
541	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
542		cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
543	return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
544}
545#else
546#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
547#endif
548#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
549static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
550{
551	if (!(
552	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
553	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
554	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
555	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
556	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
557	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
558	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
559	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
560		cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
561	return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
562}
563#else
564#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
565#endif
566#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
567static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
568{
569	if (!(
570	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
571	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
572	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
573	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
574	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
575	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
576	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
577	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
578		cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
579	return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
580}
581#else
582#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
583#endif
584#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
585static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
586{
587	if (!(
588	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
589	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
590	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
591	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
592	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
593	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
594	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
595	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
596		cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
597	return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
598}
599#else
600#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
601#endif
602#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
603static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
604{
605	if (!(
606	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
607	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
608	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
609	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
610	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
611	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
612	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
613	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
614		cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
615	return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
616}
617#else
618#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
619#endif
620#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
621static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
622{
623	if (!(
624	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
625	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
626	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
627	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
628	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
629	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
630	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
631	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
632		cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD(%lu,%lu) is invalid on this chip\n", offset, block_id);
633	return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
634}
635#else
636#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
637#endif
638#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
639static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
640{
641	if (!(
642	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
643	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
644	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
645	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
646	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
647	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
648	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
649	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
650		cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
651	return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
652}
653#else
654#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
655#endif
656#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
657static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
658{
659	if (!(
660	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
661	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
662	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
663	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
664	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
665	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
666	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
667	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
668		cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
669	return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
670}
671#else
672#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
673#endif
674#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
675static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
676{
677	if (!(
678	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
679	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
680	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
681	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
682	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
683	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
684	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
685	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
686		cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
687	return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
688}
689#else
690#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
691#endif
692#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
693static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
694{
695	if (!(
696	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
697	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
698	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
699	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
700	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
701	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
702	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
703	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
704		cvmx_warn("CVMX_GMXX_RXX_UDD_SKP(%lu,%lu) is invalid on this chip\n", offset, block_id);
705	return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
706}
707#else
708#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
709#endif
710#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
711static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
712{
713	if (!(
714	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
715	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
716	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
717	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
718	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
719	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
720	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
721	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
722		cvmx_warn("CVMX_GMXX_RX_BP_DROPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
723	return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
724}
725#else
726#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
727#endif
728#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
729static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
730{
731	if (!(
732	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
733	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
734	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
735	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
736	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
737	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
738	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
739	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
740		cvmx_warn("CVMX_GMXX_RX_BP_OFFX(%lu,%lu) is invalid on this chip\n", offset, block_id);
741	return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
742}
743#else
744#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
745#endif
746#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
747static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
748{
749	if (!(
750	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
751	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
752	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
753	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
754	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
755	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
756	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
757	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
758		cvmx_warn("CVMX_GMXX_RX_BP_ONX(%lu,%lu) is invalid on this chip\n", offset, block_id);
759	return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
760}
761#else
762#define CVMX_GMXX_RX_BP_ONX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
763#endif
764#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
765static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
766{
767	if (!(
768	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
769	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
770	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
771		cvmx_warn("CVMX_GMXX_RX_HG2_STATUS(%lu) is invalid on this chip\n", block_id);
772	return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull;
773}
774#else
775#define CVMX_GMXX_RX_HG2_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull)
776#endif
777#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
778static inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id)
779{
780	if (!(
781	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
782	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
783		cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id);
784	return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull;
785}
786#else
787#define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull)
788#endif
789#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
790static inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id)
791{
792	if (!(
793	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
794	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1))))))
795		cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
796	return CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8;
797}
798#else
799#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8)
800#endif
801#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
802static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
803{
804	if (!(
805	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
806	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
807	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
808	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
809	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
810	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
811	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
812	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
813		cvmx_warn("CVMX_GMXX_RX_PRTS(%lu) is invalid on this chip\n", block_id);
814	return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull;
815}
816#else
817#define CVMX_GMXX_RX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull)
818#endif
819#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
820static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
821{
822	if (!(
823	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
824	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
825	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
826	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
827	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
828	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
829	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
830	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
831		cvmx_warn("CVMX_GMXX_RX_PRT_INFO(%lu) is invalid on this chip\n", block_id);
832	return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull;
833}
834#else
835#define CVMX_GMXX_RX_PRT_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull)
836#endif
837#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
838static inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)
839{
840	if (!(
841	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
842	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
843	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
844		cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
845	return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
846}
847#else
848#define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
849#endif
850#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
851static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
852{
853	if (!(
854	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
855	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
856	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
857		cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL(%lu) is invalid on this chip\n", block_id);
858	return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull;
859}
860#else
861#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull)
862#endif
863#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
864static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
865{
866	if (!(
867	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
868	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
869	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
870		cvmx_warn("CVMX_GMXX_RX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
871	return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull;
872}
873#else
874#define CVMX_GMXX_RX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull)
875#endif
876#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
877static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
878{
879	if (!(
880	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
881	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
882	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
883	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
884	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
885	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
886	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
887	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
888		cvmx_warn("CVMX_GMXX_SMACX(%lu,%lu) is invalid on this chip\n", offset, block_id);
889	return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
890}
891#else
892#define CVMX_GMXX_SMACX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
893#endif
894#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
895static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id)
896{
897	if (!(
898	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
899		cvmx_warn("CVMX_GMXX_SOFT_BIST(%lu) is invalid on this chip\n", block_id);
900	return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
901}
902#else
903#define CVMX_GMXX_SOFT_BIST(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
904#endif
905#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
906static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
907{
908	if (!(
909	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
910	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
911	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
912	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
913	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
914	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
915	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
916	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
917		cvmx_warn("CVMX_GMXX_STAT_BP(%lu) is invalid on this chip\n", block_id);
918	return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull;
919}
920#else
921#define CVMX_GMXX_STAT_BP(block_id) (CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull)
922#endif
923#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
924static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
925{
926	if (!(
927	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
928	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
929	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
930	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
931	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
932	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
933	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
934	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
935		cvmx_warn("CVMX_GMXX_TXX_APPEND(%lu,%lu) is invalid on this chip\n", offset, block_id);
936	return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
937}
938#else
939#define CVMX_GMXX_TXX_APPEND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
940#endif
941#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
942static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
943{
944	if (!(
945	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
946	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
947	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
948	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
949	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
950	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
951	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
952	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
953		cvmx_warn("CVMX_GMXX_TXX_BURST(%lu,%lu) is invalid on this chip\n", offset, block_id);
954	return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
955}
956#else
957#define CVMX_GMXX_TXX_BURST(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
958#endif
959#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
960static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
961{
962	if (!(
963	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
964	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
965	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
966		cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF(%lu,%lu) is invalid on this chip\n", offset, block_id);
967	return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull;
968}
969#else
970#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull)
971#endif
972#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
973static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
974{
975	if (!(
976	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
977	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
978	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
979		cvmx_warn("CVMX_GMXX_TXX_CBFC_XON(%lu,%lu) is invalid on this chip\n", offset, block_id);
980	return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull;
981}
982#else
983#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull)
984#endif
985#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
986static inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id)
987{
988	if (!(
989	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
990	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
991	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
992	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
993	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
994		cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id);
995	return CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
996}
997#else
998#define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
999#endif
1000#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1001static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
1002{
1003	if (!(
1004	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1005	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1006	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1007	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1008	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1009	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1010	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1011	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1012		cvmx_warn("CVMX_GMXX_TXX_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1013	return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1014}
1015#else
1016#define CVMX_GMXX_TXX_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1017#endif
1018#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1019static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
1020{
1021	if (!(
1022	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1023	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1024	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1025	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1026	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1027	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1028	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1029	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1030		cvmx_warn("CVMX_GMXX_TXX_MIN_PKT(%lu,%lu) is invalid on this chip\n", offset, block_id);
1031	return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1032}
1033#else
1034#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1035#endif
1036#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1037static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
1038{
1039	if (!(
1040	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1041	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1042	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1043	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1044	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1045	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1046	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1047	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1048		cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1049	return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1050}
1051#else
1052#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1053#endif
1054#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1055static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
1056{
1057	if (!(
1058	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1059	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1060	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1061	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1062	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1063	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1064	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1065	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1066		cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
1067	return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1068}
1069#else
1070#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1071#endif
1072#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1073static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
1074{
1075	if (!(
1076	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1077	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1078	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1079	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1080	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1081	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1082	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1083	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1084		cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO(%lu,%lu) is invalid on this chip\n", offset, block_id);
1085	return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1086}
1087#else
1088#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1089#endif
1090#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1091static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
1092{
1093	if (!(
1094	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1095	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1096	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1097	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1098	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1099	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1100	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1101	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1102		cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO(%lu,%lu) is invalid on this chip\n", offset, block_id);
1103	return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1104}
1105#else
1106#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1107#endif
1108#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1109static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
1110{
1111	if (!(
1112	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1113	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1114	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1115		cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1116	return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1117}
1118#else
1119#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1120#endif
1121#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1122static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
1123{
1124	if (!(
1125	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1126	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1127	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1128	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1129	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1130	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1131	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1132	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1133		cvmx_warn("CVMX_GMXX_TXX_SLOT(%lu,%lu) is invalid on this chip\n", offset, block_id);
1134	return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1135}
1136#else
1137#define CVMX_GMXX_TXX_SLOT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1138#endif
1139#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1140static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
1141{
1142	if (!(
1143	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1144	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1145	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1146	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1147	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1148	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1149	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1150	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1151		cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE(%lu,%lu) is invalid on this chip\n", offset, block_id);
1152	return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1153}
1154#else
1155#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1156#endif
1157#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1158static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
1159{
1160	if (!(
1161	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1162	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1163	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1164	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1165	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1166	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1167	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1168	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1169		cvmx_warn("CVMX_GMXX_TXX_STAT0(%lu,%lu) is invalid on this chip\n", offset, block_id);
1170	return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1171}
1172#else
1173#define CVMX_GMXX_TXX_STAT0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1174#endif
1175#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1176static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
1177{
1178	if (!(
1179	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1180	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1181	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1182	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1183	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1184	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1185	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1186	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1187		cvmx_warn("CVMX_GMXX_TXX_STAT1(%lu,%lu) is invalid on this chip\n", offset, block_id);
1188	return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1189}
1190#else
1191#define CVMX_GMXX_TXX_STAT1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1192#endif
1193#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1194static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
1195{
1196	if (!(
1197	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1198	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1199	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1200	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1201	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1202	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1203	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1204	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1205		cvmx_warn("CVMX_GMXX_TXX_STAT2(%lu,%lu) is invalid on this chip\n", offset, block_id);
1206	return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1207}
1208#else
1209#define CVMX_GMXX_TXX_STAT2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1210#endif
1211#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1212static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
1213{
1214	if (!(
1215	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1216	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1217	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1218	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1219	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1220	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1221	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1222	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1223		cvmx_warn("CVMX_GMXX_TXX_STAT3(%lu,%lu) is invalid on this chip\n", offset, block_id);
1224	return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1225}
1226#else
1227#define CVMX_GMXX_TXX_STAT3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1228#endif
1229#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1230static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
1231{
1232	if (!(
1233	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1234	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1235	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1236	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1237	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1238	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1239	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1240	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1241		cvmx_warn("CVMX_GMXX_TXX_STAT4(%lu,%lu) is invalid on this chip\n", offset, block_id);
1242	return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1243}
1244#else
1245#define CVMX_GMXX_TXX_STAT4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1246#endif
1247#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1248static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
1249{
1250	if (!(
1251	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1252	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1253	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1254	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1255	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1256	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1257	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1258	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1259		cvmx_warn("CVMX_GMXX_TXX_STAT5(%lu,%lu) is invalid on this chip\n", offset, block_id);
1260	return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1261}
1262#else
1263#define CVMX_GMXX_TXX_STAT5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1264#endif
1265#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1266static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
1267{
1268	if (!(
1269	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1270	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1271	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1272	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1273	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1274	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1275	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1276	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1277		cvmx_warn("CVMX_GMXX_TXX_STAT6(%lu,%lu) is invalid on this chip\n", offset, block_id);
1278	return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1279}
1280#else
1281#define CVMX_GMXX_TXX_STAT6(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1282#endif
1283#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1284static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
1285{
1286	if (!(
1287	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1288	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1289	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1290	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1291	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1292	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1293	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1294	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1295		cvmx_warn("CVMX_GMXX_TXX_STAT7(%lu,%lu) is invalid on this chip\n", offset, block_id);
1296	return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1297}
1298#else
1299#define CVMX_GMXX_TXX_STAT7(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1300#endif
1301#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1302static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
1303{
1304	if (!(
1305	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1306	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1307	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1308	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1309	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1310	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1311	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1312	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1313		cvmx_warn("CVMX_GMXX_TXX_STAT8(%lu,%lu) is invalid on this chip\n", offset, block_id);
1314	return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1315}
1316#else
1317#define CVMX_GMXX_TXX_STAT8(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1318#endif
1319#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1320static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
1321{
1322	if (!(
1323	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1324	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1325	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1326	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1327	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1328	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1329	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1330	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1331		cvmx_warn("CVMX_GMXX_TXX_STAT9(%lu,%lu) is invalid on this chip\n", offset, block_id);
1332	return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1333}
1334#else
1335#define CVMX_GMXX_TXX_STAT9(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1336#endif
1337#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1338static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
1339{
1340	if (!(
1341	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1342	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1343	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1344	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1345	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1346	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1347	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1348	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1349		cvmx_warn("CVMX_GMXX_TXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1350	return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1351}
1352#else
1353#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1354#endif
1355#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1356static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
1357{
1358	if (!(
1359	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1360	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1361	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1362	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1363	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1364	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1365	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1366	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1367		cvmx_warn("CVMX_GMXX_TXX_THRESH(%lu,%lu) is invalid on this chip\n", offset, block_id);
1368	return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1369}
1370#else
1371#define CVMX_GMXX_TXX_THRESH(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1372#endif
1373#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1374static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
1375{
1376	if (!(
1377	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1378	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1379	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1380	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1381	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1382	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1383	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1384	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1385		cvmx_warn("CVMX_GMXX_TX_BP(%lu) is invalid on this chip\n", block_id);
1386	return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull;
1387}
1388#else
1389#define CVMX_GMXX_TX_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull)
1390#endif
1391#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1392static inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id)
1393{
1394	if (!(
1395	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) ||
1396	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0))))))
1397		cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1398	return CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
1399}
1400#else
1401#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
1402#endif
1403#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1404static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
1405{
1406	if (!(
1407	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1408	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1409	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1410	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1411	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1412	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1413	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1414	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1415		cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT(%lu) is invalid on this chip\n", block_id);
1416	return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull;
1417}
1418#else
1419#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) (CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull)
1420#endif
1421#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1422static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
1423{
1424	if (!(
1425	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1426	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1427	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1428	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1429	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1430	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1431	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1432	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1433		cvmx_warn("CVMX_GMXX_TX_CORRUPT(%lu) is invalid on this chip\n", block_id);
1434	return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull;
1435}
1436#else
1437#define CVMX_GMXX_TX_CORRUPT(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull)
1438#endif
1439#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1440static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
1441{
1442	if (!(
1443	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1444	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1445	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1446		cvmx_warn("CVMX_GMXX_TX_HG2_REG1(%lu) is invalid on this chip\n", block_id);
1447	return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull;
1448}
1449#else
1450#define CVMX_GMXX_TX_HG2_REG1(block_id) (CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull)
1451#endif
1452#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1453static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
1454{
1455	if (!(
1456	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1457	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1458	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1459		cvmx_warn("CVMX_GMXX_TX_HG2_REG2(%lu) is invalid on this chip\n", block_id);
1460	return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull;
1461}
1462#else
1463#define CVMX_GMXX_TX_HG2_REG2(block_id) (CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull)
1464#endif
1465#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1466static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
1467{
1468	if (!(
1469	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1470	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1471	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1472	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1473	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1474	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1475	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1476	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1477		cvmx_warn("CVMX_GMXX_TX_IFG(%lu) is invalid on this chip\n", block_id);
1478	return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull;
1479}
1480#else
1481#define CVMX_GMXX_TX_IFG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull)
1482#endif
1483#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1484static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
1485{
1486	if (!(
1487	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1488	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1489	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1490	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1491	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1492	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1493	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1494	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1495		cvmx_warn("CVMX_GMXX_TX_INT_EN(%lu) is invalid on this chip\n", block_id);
1496	return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull;
1497}
1498#else
1499#define CVMX_GMXX_TX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull)
1500#endif
1501#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1502static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
1503{
1504	if (!(
1505	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1506	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1507	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1508	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1509	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1510	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1511	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1512	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1513		cvmx_warn("CVMX_GMXX_TX_INT_REG(%lu) is invalid on this chip\n", block_id);
1514	return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull;
1515}
1516#else
1517#define CVMX_GMXX_TX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull)
1518#endif
1519#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1520static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
1521{
1522	if (!(
1523	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1524	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1525	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1526	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1527	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1528	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1529	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1530	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1531		cvmx_warn("CVMX_GMXX_TX_JAM(%lu) is invalid on this chip\n", block_id);
1532	return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull;
1533}
1534#else
1535#define CVMX_GMXX_TX_JAM(block_id) (CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull)
1536#endif
1537#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1538static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
1539{
1540	if (!(
1541	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1542	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1543	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1544	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1545	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1546	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1547	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1548	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1549		cvmx_warn("CVMX_GMXX_TX_LFSR(%lu) is invalid on this chip\n", block_id);
1550	return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull;
1551}
1552#else
1553#define CVMX_GMXX_TX_LFSR(block_id) (CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull)
1554#endif
1555#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1556static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
1557{
1558	if (!(
1559	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1560	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1561	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1562	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1563	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1564	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1565	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1566	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1567		cvmx_warn("CVMX_GMXX_TX_OVR_BP(%lu) is invalid on this chip\n", block_id);
1568	return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull;
1569}
1570#else
1571#define CVMX_GMXX_TX_OVR_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull)
1572#endif
1573#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1574static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
1575{
1576	if (!(
1577	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1578	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1579	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1580	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1581	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1582	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1583	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1584	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1585		cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC(%lu) is invalid on this chip\n", block_id);
1586	return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull;
1587}
1588#else
1589#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull)
1590#endif
1591#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1592static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
1593{
1594	if (!(
1595	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1596	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1597	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1598	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1599	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1600	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1601	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1602	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1603		cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE(%lu) is invalid on this chip\n", block_id);
1604	return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull;
1605}
1606#else
1607#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull)
1608#endif
1609#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1610static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
1611{
1612	if (!(
1613	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1614	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1615	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1616	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1617	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1618	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1619	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1620	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1621		cvmx_warn("CVMX_GMXX_TX_PRTS(%lu) is invalid on this chip\n", block_id);
1622	return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull;
1623}
1624#else
1625#define CVMX_GMXX_TX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull)
1626#endif
1627#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1628static inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)
1629{
1630	if (!(
1631	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1632	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1633		cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id);
1634	return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull;
1635}
1636#else
1637#define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
1638#endif
1639#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1640static inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)
1641{
1642	if (!(
1643	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1644	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1645		cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id);
1646	return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull;
1647}
1648#else
1649#define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull)
1650#endif
1651#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1652static inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)
1653{
1654	if (!(
1655	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1656	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1657		cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id);
1658	return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull;
1659}
1660#else
1661#define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
1662#endif
1663#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1664static inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id)
1665{
1666	if (!(
1667	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
1668		cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1669	return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
1670}
1671#else
1672#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
1673#endif
1674#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1675static inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)
1676{
1677	if (!(
1678	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1679	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1680		cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id);
1681	return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull;
1682}
1683#else
1684#define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
1685#endif
1686#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1687static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
1688{
1689	if (!(
1690	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1691	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1692	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1693		cvmx_warn("CVMX_GMXX_TX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
1694	return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull;
1695}
1696#else
1697#define CVMX_GMXX_TX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull)
1698#endif
1699#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1700static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
1701{
1702	if (!(
1703	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1704	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1705	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1706		cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK(%lu) is invalid on this chip\n", block_id);
1707	return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull;
1708}
1709#else
1710#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull)
1711#endif
1712
1713/**
1714 * cvmx_gmx#_bad_reg
1715 *
1716 * GMX_BAD_REG = A collection of things that have gone very, very wrong
1717 *
1718 *
1719 * Notes:
1720 * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used.
1721 *
1722 */
1723union cvmx_gmxx_bad_reg
1724{
1725	uint64_t u64;
1726	struct cvmx_gmxx_bad_reg_s
1727	{
1728#if __BYTE_ORDER == __BIG_ENDIAN
1729	uint64_t reserved_31_63               : 33;
1730	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
1731	uint64_t statovr                      : 1;  /**< TX Statistics overflow
1732                                                         The common FIFO to SGMII and XAUI had an overflow
1733                                                         TX Stats are corrupted */
1734	uint64_t loststat                     : 4;  /**< TX Statistics data was over-written
1735                                                         In SGMII, one bit per port
1736                                                         In XAUI, only port0 is used
1737                                                         TX Stats are corrupted */
1738	uint64_t reserved_18_21               : 4;
1739	uint64_t out_ovr                      : 16; /**< Outbound data FIFO overflow (per port) */
1740	uint64_t ncb_ovr                      : 1;  /**< Outbound NCB FIFO Overflow */
1741	uint64_t out_col                      : 1;  /**< Outbound collision occured between PKO and NCB */
1742#else
1743	uint64_t out_col                      : 1;
1744	uint64_t ncb_ovr                      : 1;
1745	uint64_t out_ovr                      : 16;
1746	uint64_t reserved_18_21               : 4;
1747	uint64_t loststat                     : 4;
1748	uint64_t statovr                      : 1;
1749	uint64_t inb_nxa                      : 4;
1750	uint64_t reserved_31_63               : 33;
1751#endif
1752	} s;
1753	struct cvmx_gmxx_bad_reg_cn30xx
1754	{
1755#if __BYTE_ORDER == __BIG_ENDIAN
1756	uint64_t reserved_31_63               : 33;
1757	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
1758	uint64_t statovr                      : 1;  /**< TX Statistics overflow */
1759	uint64_t reserved_25_25               : 1;
1760	uint64_t loststat                     : 3;  /**< TX Statistics data was over-written (per RGM port)
1761                                                         TX Stats are corrupted */
1762	uint64_t reserved_5_21                : 17;
1763	uint64_t out_ovr                      : 3;  /**< Outbound data FIFO overflow (per port) */
1764	uint64_t reserved_0_1                 : 2;
1765#else
1766	uint64_t reserved_0_1                 : 2;
1767	uint64_t out_ovr                      : 3;
1768	uint64_t reserved_5_21                : 17;
1769	uint64_t loststat                     : 3;
1770	uint64_t reserved_25_25               : 1;
1771	uint64_t statovr                      : 1;
1772	uint64_t inb_nxa                      : 4;
1773	uint64_t reserved_31_63               : 33;
1774#endif
1775	} cn30xx;
1776	struct cvmx_gmxx_bad_reg_cn30xx       cn31xx;
1777	struct cvmx_gmxx_bad_reg_s            cn38xx;
1778	struct cvmx_gmxx_bad_reg_s            cn38xxp2;
1779	struct cvmx_gmxx_bad_reg_cn30xx       cn50xx;
1780	struct cvmx_gmxx_bad_reg_cn52xx
1781	{
1782#if __BYTE_ORDER == __BIG_ENDIAN
1783	uint64_t reserved_31_63               : 33;
1784	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
1785	uint64_t statovr                      : 1;  /**< TX Statistics overflow
1786                                                         The common FIFO to SGMII and XAUI had an overflow
1787                                                         TX Stats are corrupted */
1788	uint64_t loststat                     : 4;  /**< TX Statistics data was over-written
1789                                                         In SGMII, one bit per port
1790                                                         In XAUI, only port0 is used
1791                                                         TX Stats are corrupted */
1792	uint64_t reserved_6_21                : 16;
1793	uint64_t out_ovr                      : 4;  /**< Outbound data FIFO overflow (per port) */
1794	uint64_t reserved_0_1                 : 2;
1795#else
1796	uint64_t reserved_0_1                 : 2;
1797	uint64_t out_ovr                      : 4;
1798	uint64_t reserved_6_21                : 16;
1799	uint64_t loststat                     : 4;
1800	uint64_t statovr                      : 1;
1801	uint64_t inb_nxa                      : 4;
1802	uint64_t reserved_31_63               : 33;
1803#endif
1804	} cn52xx;
1805	struct cvmx_gmxx_bad_reg_cn52xx       cn52xxp1;
1806	struct cvmx_gmxx_bad_reg_cn52xx       cn56xx;
1807	struct cvmx_gmxx_bad_reg_cn52xx       cn56xxp1;
1808	struct cvmx_gmxx_bad_reg_s            cn58xx;
1809	struct cvmx_gmxx_bad_reg_s            cn58xxp1;
1810	struct cvmx_gmxx_bad_reg_cn52xx       cn63xx;
1811	struct cvmx_gmxx_bad_reg_cn52xx       cn63xxp1;
1812};
1813typedef union cvmx_gmxx_bad_reg cvmx_gmxx_bad_reg_t;
1814
1815/**
1816 * cvmx_gmx#_bist
1817 *
1818 * GMX_BIST = GMX BIST Results
1819 *
1820 */
1821union cvmx_gmxx_bist
1822{
1823	uint64_t u64;
1824	struct cvmx_gmxx_bist_s
1825	{
1826#if __BYTE_ORDER == __BIG_ENDIAN
1827	uint64_t reserved_25_63               : 39;
1828	uint64_t status                       : 25; /**< BIST Results.
1829                                                         HW sets a bit in BIST for for memory that fails
1830                                                         - 0: gmx#.inb.fif_bnk0
1831                                                         - 1: gmx#.inb.fif_bnk1
1832                                                         - 2: gmx#.inb.fif_bnk2
1833                                                         - 3: gmx#.inb.fif_bnk3
1834                                                         - 4: gmx#.inb.fif_bnk_ext0
1835                                                         - 5: gmx#.inb.fif_bnk_ext1
1836                                                         - 6: gmx#.inb.fif_bnk_ext2
1837                                                         - 7: gmx#.inb.fif_bnk_ext3
1838                                                         - 8: gmx#.outb.fif.fif_bnk0
1839                                                         - 9: gmx#.outb.fif.fif_bnk1
1840                                                         - 10: gmx#.outb.fif.fif_bnk2
1841                                                         - 11: gmx#.outb.fif.fif_bnk3
1842                                                         - 12: gmx#.outb.fif.fif_bnk_ext0
1843                                                         - 13: gmx#.outb.fif.fif_bnk_ext1
1844                                                         - 14: gmx#.outb.fif.fif_bnk_ext2
1845                                                         - 15: gmx#.outb.fif.fif_bnk_ext3
1846                                                         - 16: gmx#.csr.gmi0.srf8x64m1_bist
1847                                                         - 17: gmx#.csr.gmi1.srf8x64m1_bist
1848                                                         - 18: gmx#.csr.gmi2.srf8x64m1_bist
1849                                                         - 19: gmx#.csr.gmi3.srf8x64m1_bist
1850                                                         - 20: gmx#.csr.drf20x32m2_bist
1851                                                         - 21: gmx#.csr.drf20x48m2_bist
1852                                                         - 22: gmx#.outb.stat.drf16x27m1_bist
1853                                                         - 23: gmx#.outb.stat.drf40x64m1_bist
1854                                                         - 24: xgmii.tx.drf16x38m1_async_bist */
1855#else
1856	uint64_t status                       : 25;
1857	uint64_t reserved_25_63               : 39;
1858#endif
1859	} s;
1860	struct cvmx_gmxx_bist_cn30xx
1861	{
1862#if __BYTE_ORDER == __BIG_ENDIAN
1863	uint64_t reserved_10_63               : 54;
1864	uint64_t status                       : 10; /**< BIST Results.
1865                                                          HW sets a bit in BIST for for memory that fails
1866                                                         - 0: gmx#.inb.dpr512x78m4_bist
1867                                                         - 1: gmx#.outb.fif.dpr512x71m4_bist
1868                                                         - 2: gmx#.csr.gmi0.srf8x64m1_bist
1869                                                         - 3: gmx#.csr.gmi1.srf8x64m1_bist
1870                                                         - 4: gmx#.csr.gmi2.srf8x64m1_bist
1871                                                         - 5: 0
1872                                                         - 6: gmx#.csr.drf20x80m1_bist
1873                                                         - 7: gmx#.outb.stat.drf16x27m1_bist
1874                                                         - 8: gmx#.outb.stat.drf40x64m1_bist
1875                                                         - 9: 0 */
1876#else
1877	uint64_t status                       : 10;
1878	uint64_t reserved_10_63               : 54;
1879#endif
1880	} cn30xx;
1881	struct cvmx_gmxx_bist_cn30xx          cn31xx;
1882	struct cvmx_gmxx_bist_cn30xx          cn38xx;
1883	struct cvmx_gmxx_bist_cn30xx          cn38xxp2;
1884	struct cvmx_gmxx_bist_cn50xx
1885	{
1886#if __BYTE_ORDER == __BIG_ENDIAN
1887	uint64_t reserved_12_63               : 52;
1888	uint64_t status                       : 12; /**< BIST Results.
1889                                                         HW sets a bit in BIST for for memory that fails */
1890#else
1891	uint64_t status                       : 12;
1892	uint64_t reserved_12_63               : 52;
1893#endif
1894	} cn50xx;
1895	struct cvmx_gmxx_bist_cn52xx
1896	{
1897#if __BYTE_ORDER == __BIG_ENDIAN
1898	uint64_t reserved_16_63               : 48;
1899	uint64_t status                       : 16; /**< BIST Results.
1900                                                         HW sets a bit in BIST for for memory that fails
1901                                                         - 0: gmx#.inb.fif_bnk0
1902                                                         - 1: gmx#.inb.fif_bnk1
1903                                                         - 2: gmx#.inb.fif_bnk2
1904                                                         - 3: gmx#.inb.fif_bnk3
1905                                                         - 4: gmx#.outb.fif.fif_bnk0
1906                                                         - 5: gmx#.outb.fif.fif_bnk1
1907                                                         - 6: gmx#.outb.fif.fif_bnk2
1908                                                         - 7: gmx#.outb.fif.fif_bnk3
1909                                                         - 8: gmx#.csr.gmi0.srf8x64m1_bist
1910                                                         - 9: gmx#.csr.gmi1.srf8x64m1_bist
1911                                                         - 10: gmx#.csr.gmi2.srf8x64m1_bist
1912                                                         - 11: gmx#.csr.gmi3.srf8x64m1_bist
1913                                                         - 12: gmx#.csr.drf20x80m1_bist
1914                                                         - 13: gmx#.outb.stat.drf16x27m1_bist
1915                                                         - 14: gmx#.outb.stat.drf40x64m1_bist
1916                                                         - 15: xgmii.tx.drf16x38m1_async_bist */
1917#else
1918	uint64_t status                       : 16;
1919	uint64_t reserved_16_63               : 48;
1920#endif
1921	} cn52xx;
1922	struct cvmx_gmxx_bist_cn52xx          cn52xxp1;
1923	struct cvmx_gmxx_bist_cn52xx          cn56xx;
1924	struct cvmx_gmxx_bist_cn52xx          cn56xxp1;
1925	struct cvmx_gmxx_bist_cn58xx
1926	{
1927#if __BYTE_ORDER == __BIG_ENDIAN
1928	uint64_t reserved_17_63               : 47;
1929	uint64_t status                       : 17; /**< BIST Results.
1930                                                         HW sets a bit in BIST for for memory that fails
1931                                                         - 0: gmx#.inb.fif_bnk0
1932                                                         - 1: gmx#.inb.fif_bnk1
1933                                                         - 2: gmx#.inb.fif_bnk2
1934                                                         - 3: gmx#.inb.fif_bnk3
1935                                                         - 4: gmx#.outb.fif.fif_bnk0
1936                                                         - 5: gmx#.outb.fif.fif_bnk1
1937                                                         - 6: gmx#.outb.fif.fif_bnk2
1938                                                         - 7: gmx#.outb.fif.fif_bnk3
1939                                                         - 8: gmx#.csr.gmi0.srf8x64m1_bist
1940                                                         - 9: gmx#.csr.gmi1.srf8x64m1_bist
1941                                                         - 10: gmx#.csr.gmi2.srf8x64m1_bist
1942                                                         - 11: gmx#.csr.gmi3.srf8x64m1_bist
1943                                                         - 12: gmx#.csr.drf20x80m1_bist
1944                                                         - 13: gmx#.outb.stat.drf16x27m1_bist
1945                                                         - 14: gmx#.outb.stat.drf40x64m1_bist
1946                                                         - 15: gmx#.outb.ncb.drf16x76m1_bist
1947                                                         - 16: gmx#.outb.fif.srf32x16m2_bist */
1948#else
1949	uint64_t status                       : 17;
1950	uint64_t reserved_17_63               : 47;
1951#endif
1952	} cn58xx;
1953	struct cvmx_gmxx_bist_cn58xx          cn58xxp1;
1954	struct cvmx_gmxx_bist_s               cn63xx;
1955	struct cvmx_gmxx_bist_s               cn63xxp1;
1956};
1957typedef union cvmx_gmxx_bist cvmx_gmxx_bist_t;
1958
1959/**
1960 * cvmx_gmx#_clk_en
1961 *
1962 * DO NOT DOCUMENT THIS REGISTER - IT IS NOT OFFICIAL
1963 *
1964 */
1965union cvmx_gmxx_clk_en
1966{
1967	uint64_t u64;
1968	struct cvmx_gmxx_clk_en_s
1969	{
1970#if __BYTE_ORDER == __BIG_ENDIAN
1971	uint64_t reserved_1_63                : 63;
1972	uint64_t clk_en                       : 1;  /**< Force the clock enables on */
1973#else
1974	uint64_t clk_en                       : 1;
1975	uint64_t reserved_1_63                : 63;
1976#endif
1977	} s;
1978	struct cvmx_gmxx_clk_en_s             cn52xx;
1979	struct cvmx_gmxx_clk_en_s             cn52xxp1;
1980	struct cvmx_gmxx_clk_en_s             cn56xx;
1981	struct cvmx_gmxx_clk_en_s             cn56xxp1;
1982	struct cvmx_gmxx_clk_en_s             cn63xx;
1983	struct cvmx_gmxx_clk_en_s             cn63xxp1;
1984};
1985typedef union cvmx_gmxx_clk_en cvmx_gmxx_clk_en_t;
1986
1987/**
1988 * cvmx_gmx#_hg2_control
1989 *
1990 * Notes:
1991 * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However
1992 * setting just the TX or RX bit will result in only the HG2 message transmit or the receive
1993 * capability.
1994 * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received
1995 * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages
1996 * are disabled.
1997 *
1998 * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN
1999 * are set.
2000 *
2001 * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN
2002 * are set.
2003 *
2004 * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
2005 * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when
2006 * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1
2007 * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages
2008 * (optionally, when HG2TX_EN=1) with the HiGig2 protocol.
2009 */
2010union cvmx_gmxx_hg2_control
2011{
2012	uint64_t u64;
2013	struct cvmx_gmxx_hg2_control_s
2014	{
2015#if __BYTE_ORDER == __BIG_ENDIAN
2016	uint64_t reserved_19_63               : 45;
2017	uint64_t hg2tx_en                     : 1;  /**< Enable Transmission of HG2 phys and logl messages
2018                                                         When set, also disables HW auto-generated (802.3
2019                                                         and CBFC) pause frames. (OCTEON cannot generate
2020                                                         proper 802.3 or CBFC pause frames in HiGig2 mode.) */
2021	uint64_t hg2rx_en                     : 1;  /**< Enable extraction and processing of HG2 message
2022                                                         packet from RX flow. Physical logical pause info
2023                                                         is used to pause physical link, back pressure PKO
2024                                                         HG2RX_EN must be set when HiGig2 messages are
2025                                                         present in the receive stream. */
2026	uint64_t phys_en                      : 1;  /**< 1 bit physical link pause enable for recevied
2027                                                         HiGig2 physical pause message */
2028	uint64_t logl_en                      : 16; /**< 16 bit xof enables for recevied HiGig2 messages
2029                                                         or CBFC packets */
2030#else
2031	uint64_t logl_en                      : 16;
2032	uint64_t phys_en                      : 1;
2033	uint64_t hg2rx_en                     : 1;
2034	uint64_t hg2tx_en                     : 1;
2035	uint64_t reserved_19_63               : 45;
2036#endif
2037	} s;
2038	struct cvmx_gmxx_hg2_control_s        cn52xx;
2039	struct cvmx_gmxx_hg2_control_s        cn52xxp1;
2040	struct cvmx_gmxx_hg2_control_s        cn56xx;
2041	struct cvmx_gmxx_hg2_control_s        cn63xx;
2042	struct cvmx_gmxx_hg2_control_s        cn63xxp1;
2043};
2044typedef union cvmx_gmxx_hg2_control cvmx_gmxx_hg2_control_t;
2045
2046/**
2047 * cvmx_gmx#_inf_mode
2048 *
2049 * GMX_INF_MODE = Interface Mode
2050 *
2051 */
2052union cvmx_gmxx_inf_mode
2053{
2054	uint64_t u64;
2055	struct cvmx_gmxx_inf_mode_s
2056	{
2057#if __BYTE_ORDER == __BIG_ENDIAN
2058	uint64_t reserved_12_63               : 52;
2059	uint64_t speed                        : 4;  /**< Interface Speed */
2060	uint64_t reserved_6_7                 : 2;
2061	uint64_t mode                         : 2;  /**< Interface Electrical Operating Mode
2062                                                         - 0: SGMII (v1.8)
2063                                                         - 1: XAUI (IEEE 802.3-2005) */
2064	uint64_t reserved_3_3                 : 1;
2065	uint64_t p0mii                        : 1;  /**< Port 0 Interface Mode
2066                                                         - 0: Port 0 is RGMII
2067                                                         - 1: Port 0 is MII */
2068	uint64_t en                           : 1;  /**< Interface Enable
2069                                                         Must be set to enable the packet interface.
2070                                                         Should be enabled before any other requests to
2071                                                         GMX including enabling port back pressure with
2072                                                         IPD_CTL_STATUS[PBP_EN] */
2073	uint64_t type                         : 1;  /**< Interface Protocol Type
2074                                                         - 0: SGMII/1000Base-X
2075                                                         - 1: XAUI */
2076#else
2077	uint64_t type                         : 1;
2078	uint64_t en                           : 1;
2079	uint64_t p0mii                        : 1;
2080	uint64_t reserved_3_3                 : 1;
2081	uint64_t mode                         : 2;
2082	uint64_t reserved_6_7                 : 2;
2083	uint64_t speed                        : 4;
2084	uint64_t reserved_12_63               : 52;
2085#endif
2086	} s;
2087	struct cvmx_gmxx_inf_mode_cn30xx
2088	{
2089#if __BYTE_ORDER == __BIG_ENDIAN
2090	uint64_t reserved_3_63                : 61;
2091	uint64_t p0mii                        : 1;  /**< Port 0 Interface Mode
2092                                                         - 0: Port 0 is RGMII
2093                                                         - 1: Port 0 is MII */
2094	uint64_t en                           : 1;  /**< Interface Enable
2095                                                         Must be set to enable the packet interface.
2096                                                         Should be enabled before any other requests to
2097                                                         GMX including enabling port back pressure with
2098                                                         IPD_CTL_STATUS[PBP_EN] */
2099	uint64_t type                         : 1;  /**< Port 1/2 Interface Mode
2100                                                         - 0: Ports 1 and 2 are RGMII
2101                                                         - 1: Port  1 is GMII/MII, Port 2 is unused
2102                                                             GMII/MII is selected by GMX_PRT1_CFG[SPEED] */
2103#else
2104	uint64_t type                         : 1;
2105	uint64_t en                           : 1;
2106	uint64_t p0mii                        : 1;
2107	uint64_t reserved_3_63                : 61;
2108#endif
2109	} cn30xx;
2110	struct cvmx_gmxx_inf_mode_cn31xx
2111	{
2112#if __BYTE_ORDER == __BIG_ENDIAN
2113	uint64_t reserved_2_63                : 62;
2114	uint64_t en                           : 1;  /**< Interface Enable
2115                                                         Must be set to enable the packet interface.
2116                                                         Should be enabled before any other requests to
2117                                                         GMX including enabling port back pressure with
2118                                                         IPD_CTL_STATUS[PBP_EN] */
2119	uint64_t type                         : 1;  /**< Interface Mode
2120                                                         - 0: All three ports are RGMII ports
2121                                                         - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */
2122#else
2123	uint64_t type                         : 1;
2124	uint64_t en                           : 1;
2125	uint64_t reserved_2_63                : 62;
2126#endif
2127	} cn31xx;
2128	struct cvmx_gmxx_inf_mode_cn31xx      cn38xx;
2129	struct cvmx_gmxx_inf_mode_cn31xx      cn38xxp2;
2130	struct cvmx_gmxx_inf_mode_cn30xx      cn50xx;
2131	struct cvmx_gmxx_inf_mode_cn52xx
2132	{
2133#if __BYTE_ORDER == __BIG_ENDIAN
2134	uint64_t reserved_10_63               : 54;
2135	uint64_t speed                        : 2;  /**< Interface Speed
2136                                                         - 0: 1.250GHz
2137                                                         - 1: 2.500GHz
2138                                                         - 2: 3.125GHz
2139                                                         - 3: 3.750GHz */
2140	uint64_t reserved_6_7                 : 2;
2141	uint64_t mode                         : 2;  /**< Interface Electrical Operating Mode
2142                                                         - 0: Disabled (PCIe)
2143                                                         - 1: XAUI (IEEE 802.3-2005)
2144                                                         - 2: SGMII (v1.8)
2145                                                         - 3: PICMG3.1 */
2146	uint64_t reserved_2_3                 : 2;
2147	uint64_t en                           : 1;  /**< Interface Enable
2148                                                         Must be set to enable the packet interface.
2149                                                         Should be enabled before any other requests to
2150                                                         GMX including enabling port back pressure with
2151                                                         IPD_CTL_STATUS[PBP_EN] */
2152	uint64_t type                         : 1;  /**< Interface Protocol Type
2153                                                         - 0: SGMII/1000Base-X
2154                                                         - 1: XAUI */
2155#else
2156	uint64_t type                         : 1;
2157	uint64_t en                           : 1;
2158	uint64_t reserved_2_3                 : 2;
2159	uint64_t mode                         : 2;
2160	uint64_t reserved_6_7                 : 2;
2161	uint64_t speed                        : 2;
2162	uint64_t reserved_10_63               : 54;
2163#endif
2164	} cn52xx;
2165	struct cvmx_gmxx_inf_mode_cn52xx      cn52xxp1;
2166	struct cvmx_gmxx_inf_mode_cn52xx      cn56xx;
2167	struct cvmx_gmxx_inf_mode_cn52xx      cn56xxp1;
2168	struct cvmx_gmxx_inf_mode_cn31xx      cn58xx;
2169	struct cvmx_gmxx_inf_mode_cn31xx      cn58xxp1;
2170	struct cvmx_gmxx_inf_mode_cn63xx
2171	{
2172#if __BYTE_ORDER == __BIG_ENDIAN
2173	uint64_t reserved_12_63               : 52;
2174	uint64_t speed                        : 4;  /**< Interface Speed */
2175	uint64_t reserved_5_7                 : 3;
2176	uint64_t mode                         : 1;  /**< Interface Electrical Operating Mode
2177                                                         - 0: SGMII (v1.8)
2178                                                         - 1: XAUI (IEEE 802.3-2005) */
2179	uint64_t reserved_2_3                 : 2;
2180	uint64_t en                           : 1;  /**< Interface Enable
2181                                                         Must be set to enable the packet interface.
2182                                                         Should be enabled before any other requests to
2183                                                         GMX including enabling port back pressure with
2184                                                         IPD_CTL_STATUS[PBP_EN] */
2185	uint64_t type                         : 1;  /**< Interface Protocol Type
2186                                                         - 0: SGMII/1000Base-X
2187                                                         - 1: XAUI */
2188#else
2189	uint64_t type                         : 1;
2190	uint64_t en                           : 1;
2191	uint64_t reserved_2_3                 : 2;
2192	uint64_t mode                         : 1;
2193	uint64_t reserved_5_7                 : 3;
2194	uint64_t speed                        : 4;
2195	uint64_t reserved_12_63               : 52;
2196#endif
2197	} cn63xx;
2198	struct cvmx_gmxx_inf_mode_cn63xx      cn63xxp1;
2199};
2200typedef union cvmx_gmxx_inf_mode cvmx_gmxx_inf_mode_t;
2201
2202/**
2203 * cvmx_gmx#_nxa_adr
2204 *
2205 * GMX_NXA_ADR = NXA Port Address
2206 *
2207 */
2208union cvmx_gmxx_nxa_adr
2209{
2210	uint64_t u64;
2211	struct cvmx_gmxx_nxa_adr_s
2212	{
2213#if __BYTE_ORDER == __BIG_ENDIAN
2214	uint64_t reserved_6_63                : 58;
2215	uint64_t prt                          : 6;  /**< Logged address for NXA exceptions
2216                                                         The logged address will be from the first
2217                                                         exception that caused the problem.  NCB has
2218                                                         higher priority than PKO and will win.
2219                                                         (only PRT[3:0]) */
2220#else
2221	uint64_t prt                          : 6;
2222	uint64_t reserved_6_63                : 58;
2223#endif
2224	} s;
2225	struct cvmx_gmxx_nxa_adr_s            cn30xx;
2226	struct cvmx_gmxx_nxa_adr_s            cn31xx;
2227	struct cvmx_gmxx_nxa_adr_s            cn38xx;
2228	struct cvmx_gmxx_nxa_adr_s            cn38xxp2;
2229	struct cvmx_gmxx_nxa_adr_s            cn50xx;
2230	struct cvmx_gmxx_nxa_adr_s            cn52xx;
2231	struct cvmx_gmxx_nxa_adr_s            cn52xxp1;
2232	struct cvmx_gmxx_nxa_adr_s            cn56xx;
2233	struct cvmx_gmxx_nxa_adr_s            cn56xxp1;
2234	struct cvmx_gmxx_nxa_adr_s            cn58xx;
2235	struct cvmx_gmxx_nxa_adr_s            cn58xxp1;
2236	struct cvmx_gmxx_nxa_adr_s            cn63xx;
2237	struct cvmx_gmxx_nxa_adr_s            cn63xxp1;
2238};
2239typedef union cvmx_gmxx_nxa_adr cvmx_gmxx_nxa_adr_t;
2240
2241/**
2242 * cvmx_gmx#_prt#_cbfc_ctl
2243 *
2244 * ** HG2 message CSRs end
2245 *
2246 *
2247 * Notes:
2248 * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>)
2249 *
2250 */
2251union cvmx_gmxx_prtx_cbfc_ctl
2252{
2253	uint64_t u64;
2254	struct cvmx_gmxx_prtx_cbfc_ctl_s
2255	{
2256#if __BYTE_ORDER == __BIG_ENDIAN
2257	uint64_t phys_en                      : 16; /**< Determines which ports will have physical
2258                                                         backpressure pause packets.
2259                                                         The value pplaced in the Class Enable Vector
2260                                                         field of the CBFC pause packet will be
2261                                                         PHYS_EN | LOGL_EN */
2262	uint64_t logl_en                      : 16; /**< Determines which ports will have logical
2263                                                         backpressure pause packets.
2264                                                         The value pplaced in the Class Enable Vector
2265                                                         field of the CBFC pause packet will be
2266                                                         PHYS_EN | LOGL_EN */
2267	uint64_t phys_bp                      : 16; /**< When RX_EN is set and the HW is backpressuring any
2268                                                         ports (from either CBFC pause packets or the
2269                                                         GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports
2270                                                         indiciated by PHYS_BP are backpressured, simulate
2271                                                         physical backpressure by defering all packets on
2272                                                         the transmitter. */
2273	uint64_t reserved_4_15                : 12;
2274	uint64_t bck_en                       : 1;  /**< Forward CBFC Pause information to BP block */
2275	uint64_t drp_en                       : 1;  /**< Drop Control CBFC Pause Frames */
2276	uint64_t tx_en                        : 1;  /**< When set, allow for CBFC Pause Packets
2277                                                         Must be clear in HiGig2 mode i.e. when
2278                                                         GMX_TX_XAUI_CTL[HG_EN]=1 and
2279                                                         GMX_RX_UDD_SKP[SKIP]=16. */
2280	uint64_t rx_en                        : 1;  /**< When set, allow for CBFC Pause Packets
2281                                                         Must be clear in HiGig2 mode i.e. when
2282                                                         GMX_TX_XAUI_CTL[HG_EN]=1 and
2283                                                         GMX_RX_UDD_SKP[SKIP]=16. */
2284#else
2285	uint64_t rx_en                        : 1;
2286	uint64_t tx_en                        : 1;
2287	uint64_t drp_en                       : 1;
2288	uint64_t bck_en                       : 1;
2289	uint64_t reserved_4_15                : 12;
2290	uint64_t phys_bp                      : 16;
2291	uint64_t logl_en                      : 16;
2292	uint64_t phys_en                      : 16;
2293#endif
2294	} s;
2295	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn52xx;
2296	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn56xx;
2297	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn63xx;
2298	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn63xxp1;
2299};
2300typedef union cvmx_gmxx_prtx_cbfc_ctl cvmx_gmxx_prtx_cbfc_ctl_t;
2301
2302/**
2303 * cvmx_gmx#_prt#_cfg
2304 *
2305 * GMX_PRT_CFG = Port description
2306 *
2307 */
2308union cvmx_gmxx_prtx_cfg
2309{
2310	uint64_t u64;
2311	struct cvmx_gmxx_prtx_cfg_s
2312	{
2313#if __BYTE_ORDER == __BIG_ENDIAN
2314	uint64_t reserved_14_63               : 50;
2315	uint64_t tx_idle                      : 1;  /**< TX Machine is idle */
2316	uint64_t rx_idle                      : 1;  /**< RX Machine is idle */
2317	uint64_t reserved_9_11                : 3;
2318	uint64_t speed_msb                    : 1;  /**< Link Speed MSB [SPEED_MSB:SPEED]
2319                                                         10 = 10Mbs operation
2320                                                         00 = 100Mbs operation
2321                                                         01 = 1000Mbs operation
2322                                                         11 = Reserved
2323                                                         (SGMII/1000Base-X only) */
2324	uint64_t reserved_4_7                 : 4;
2325	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
2326                                                         0 = 512 bitimes (10/100Mbs operation)
2327                                                         1 = 4096 bitimes (1000Mbs operation)
2328                                                         (SGMII/1000Base-X only) */
2329	uint64_t duplex                       : 1;  /**< Duplex
2330                                                         0 = Half Duplex (collisions/extentions/bursts)
2331                                                         1 = Full Duplex
2332                                                         (SGMII/1000Base-X only) */
2333	uint64_t speed                        : 1;  /**< Link Speed LSB [SPEED_MSB:SPEED]
2334                                                         10 = 10Mbs operation
2335                                                         00 = 100Mbs operation
2336                                                         01 = 1000Mbs operation
2337                                                         11 = Reserved
2338                                                         (SGMII/1000Base-X only) */
2339	uint64_t en                           : 1;  /**< Link Enable
2340                                                         When EN is clear, packets will not be received
2341                                                         or transmitted (including PAUSE and JAM packets).
2342                                                         If EN is cleared while a packet is currently
2343                                                         being received or transmitted, the packet will
2344                                                         be allowed to complete before the bus is idled.
2345                                                         On the RX side, subsequent packets in a burst
2346                                                         will be ignored. */
2347#else
2348	uint64_t en                           : 1;
2349	uint64_t speed                        : 1;
2350	uint64_t duplex                       : 1;
2351	uint64_t slottime                     : 1;
2352	uint64_t reserved_4_7                 : 4;
2353	uint64_t speed_msb                    : 1;
2354	uint64_t reserved_9_11                : 3;
2355	uint64_t rx_idle                      : 1;
2356	uint64_t tx_idle                      : 1;
2357	uint64_t reserved_14_63               : 50;
2358#endif
2359	} s;
2360	struct cvmx_gmxx_prtx_cfg_cn30xx
2361	{
2362#if __BYTE_ORDER == __BIG_ENDIAN
2363	uint64_t reserved_4_63                : 60;
2364	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
2365                                                         0 = 512 bitimes (10/100Mbs operation)
2366                                                         1 = 4096 bitimes (1000Mbs operation) */
2367	uint64_t duplex                       : 1;  /**< Duplex
2368                                                         0 = Half Duplex (collisions/extentions/bursts)
2369                                                         1 = Full Duplex */
2370	uint64_t speed                        : 1;  /**< Link Speed
2371                                                         0 = 10/100Mbs operation
2372                                                             (in RGMII mode, GMX_TX_CLK[CLK_CNT] >  1)
2373                                                             (in MII   mode, GMX_TX_CLK[CLK_CNT] == 1)
2374                                                         1 = 1000Mbs operation */
2375	uint64_t en                           : 1;  /**< Link Enable
2376                                                         When EN is clear, packets will not be received
2377                                                         or transmitted (including PAUSE and JAM packets).
2378                                                         If EN is cleared while a packet is currently
2379                                                         being received or transmitted, the packet will
2380                                                         be allowed to complete before the bus is idled.
2381                                                         On the RX side, subsequent packets in a burst
2382                                                         will be ignored. */
2383#else
2384	uint64_t en                           : 1;
2385	uint64_t speed                        : 1;
2386	uint64_t duplex                       : 1;
2387	uint64_t slottime                     : 1;
2388	uint64_t reserved_4_63                : 60;
2389#endif
2390	} cn30xx;
2391	struct cvmx_gmxx_prtx_cfg_cn30xx      cn31xx;
2392	struct cvmx_gmxx_prtx_cfg_cn30xx      cn38xx;
2393	struct cvmx_gmxx_prtx_cfg_cn30xx      cn38xxp2;
2394	struct cvmx_gmxx_prtx_cfg_cn30xx      cn50xx;
2395	struct cvmx_gmxx_prtx_cfg_s           cn52xx;
2396	struct cvmx_gmxx_prtx_cfg_s           cn52xxp1;
2397	struct cvmx_gmxx_prtx_cfg_s           cn56xx;
2398	struct cvmx_gmxx_prtx_cfg_s           cn56xxp1;
2399	struct cvmx_gmxx_prtx_cfg_cn30xx      cn58xx;
2400	struct cvmx_gmxx_prtx_cfg_cn30xx      cn58xxp1;
2401	struct cvmx_gmxx_prtx_cfg_s           cn63xx;
2402	struct cvmx_gmxx_prtx_cfg_s           cn63xxp1;
2403};
2404typedef union cvmx_gmxx_prtx_cfg cvmx_gmxx_prtx_cfg_t;
2405
2406/**
2407 * cvmx_gmx#_rx#_adr_cam0
2408 *
2409 * GMX_RX_ADR_CAM = Address Filtering Control
2410 *
2411 */
2412union cvmx_gmxx_rxx_adr_cam0
2413{
2414	uint64_t u64;
2415	struct cvmx_gmxx_rxx_adr_cam0_s
2416	{
2417#if __BYTE_ORDER == __BIG_ENDIAN
2418	uint64_t adr                          : 64; /**< The DMAC address to match on
2419                                                         Each entry contributes 8bits to one of 8 matchers
2420                                                         Write transactions to GMX_RX_ADR_CAM will not
2421                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2422                                                         The CAM matches against unicst or multicst DMAC
2423                                                         addresses.
2424                                                         In XAUI mode, all ports will reflect the data
2425                                                         written to port0. */
2426#else
2427	uint64_t adr                          : 64;
2428#endif
2429	} s;
2430	struct cvmx_gmxx_rxx_adr_cam0_s       cn30xx;
2431	struct cvmx_gmxx_rxx_adr_cam0_s       cn31xx;
2432	struct cvmx_gmxx_rxx_adr_cam0_s       cn38xx;
2433	struct cvmx_gmxx_rxx_adr_cam0_s       cn38xxp2;
2434	struct cvmx_gmxx_rxx_adr_cam0_s       cn50xx;
2435	struct cvmx_gmxx_rxx_adr_cam0_s       cn52xx;
2436	struct cvmx_gmxx_rxx_adr_cam0_s       cn52xxp1;
2437	struct cvmx_gmxx_rxx_adr_cam0_s       cn56xx;
2438	struct cvmx_gmxx_rxx_adr_cam0_s       cn56xxp1;
2439	struct cvmx_gmxx_rxx_adr_cam0_s       cn58xx;
2440	struct cvmx_gmxx_rxx_adr_cam0_s       cn58xxp1;
2441	struct cvmx_gmxx_rxx_adr_cam0_s       cn63xx;
2442	struct cvmx_gmxx_rxx_adr_cam0_s       cn63xxp1;
2443};
2444typedef union cvmx_gmxx_rxx_adr_cam0 cvmx_gmxx_rxx_adr_cam0_t;
2445
2446/**
2447 * cvmx_gmx#_rx#_adr_cam1
2448 *
2449 * GMX_RX_ADR_CAM = Address Filtering Control
2450 *
2451 */
2452union cvmx_gmxx_rxx_adr_cam1
2453{
2454	uint64_t u64;
2455	struct cvmx_gmxx_rxx_adr_cam1_s
2456	{
2457#if __BYTE_ORDER == __BIG_ENDIAN
2458	uint64_t adr                          : 64; /**< The DMAC address to match on
2459                                                         Each entry contributes 8bits to one of 8 matchers
2460                                                         Write transactions to GMX_RX_ADR_CAM will not
2461                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2462                                                         The CAM matches against unicst or multicst DMAC
2463                                                         addresses.
2464                                                         In XAUI mode, all ports will reflect the data
2465                                                         written to port0. */
2466#else
2467	uint64_t adr                          : 64;
2468#endif
2469	} s;
2470	struct cvmx_gmxx_rxx_adr_cam1_s       cn30xx;
2471	struct cvmx_gmxx_rxx_adr_cam1_s       cn31xx;
2472	struct cvmx_gmxx_rxx_adr_cam1_s       cn38xx;
2473	struct cvmx_gmxx_rxx_adr_cam1_s       cn38xxp2;
2474	struct cvmx_gmxx_rxx_adr_cam1_s       cn50xx;
2475	struct cvmx_gmxx_rxx_adr_cam1_s       cn52xx;
2476	struct cvmx_gmxx_rxx_adr_cam1_s       cn52xxp1;
2477	struct cvmx_gmxx_rxx_adr_cam1_s       cn56xx;
2478	struct cvmx_gmxx_rxx_adr_cam1_s       cn56xxp1;
2479	struct cvmx_gmxx_rxx_adr_cam1_s       cn58xx;
2480	struct cvmx_gmxx_rxx_adr_cam1_s       cn58xxp1;
2481	struct cvmx_gmxx_rxx_adr_cam1_s       cn63xx;
2482	struct cvmx_gmxx_rxx_adr_cam1_s       cn63xxp1;
2483};
2484typedef union cvmx_gmxx_rxx_adr_cam1 cvmx_gmxx_rxx_adr_cam1_t;
2485
2486/**
2487 * cvmx_gmx#_rx#_adr_cam2
2488 *
2489 * GMX_RX_ADR_CAM = Address Filtering Control
2490 *
2491 */
2492union cvmx_gmxx_rxx_adr_cam2
2493{
2494	uint64_t u64;
2495	struct cvmx_gmxx_rxx_adr_cam2_s
2496	{
2497#if __BYTE_ORDER == __BIG_ENDIAN
2498	uint64_t adr                          : 64; /**< The DMAC address to match on
2499                                                         Each entry contributes 8bits to one of 8 matchers
2500                                                         Write transactions to GMX_RX_ADR_CAM will not
2501                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2502                                                         The CAM matches against unicst or multicst DMAC
2503                                                         addresses.
2504                                                         In XAUI mode, all ports will reflect the data
2505                                                         written to port0. */
2506#else
2507	uint64_t adr                          : 64;
2508#endif
2509	} s;
2510	struct cvmx_gmxx_rxx_adr_cam2_s       cn30xx;
2511	struct cvmx_gmxx_rxx_adr_cam2_s       cn31xx;
2512	struct cvmx_gmxx_rxx_adr_cam2_s       cn38xx;
2513	struct cvmx_gmxx_rxx_adr_cam2_s       cn38xxp2;
2514	struct cvmx_gmxx_rxx_adr_cam2_s       cn50xx;
2515	struct cvmx_gmxx_rxx_adr_cam2_s       cn52xx;
2516	struct cvmx_gmxx_rxx_adr_cam2_s       cn52xxp1;
2517	struct cvmx_gmxx_rxx_adr_cam2_s       cn56xx;
2518	struct cvmx_gmxx_rxx_adr_cam2_s       cn56xxp1;
2519	struct cvmx_gmxx_rxx_adr_cam2_s       cn58xx;
2520	struct cvmx_gmxx_rxx_adr_cam2_s       cn58xxp1;
2521	struct cvmx_gmxx_rxx_adr_cam2_s       cn63xx;
2522	struct cvmx_gmxx_rxx_adr_cam2_s       cn63xxp1;
2523};
2524typedef union cvmx_gmxx_rxx_adr_cam2 cvmx_gmxx_rxx_adr_cam2_t;
2525
2526/**
2527 * cvmx_gmx#_rx#_adr_cam3
2528 *
2529 * GMX_RX_ADR_CAM = Address Filtering Control
2530 *
2531 */
2532union cvmx_gmxx_rxx_adr_cam3
2533{
2534	uint64_t u64;
2535	struct cvmx_gmxx_rxx_adr_cam3_s
2536	{
2537#if __BYTE_ORDER == __BIG_ENDIAN
2538	uint64_t adr                          : 64; /**< The DMAC address to match on
2539                                                         Each entry contributes 8bits to one of 8 matchers
2540                                                         Write transactions to GMX_RX_ADR_CAM will not
2541                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2542                                                         The CAM matches against unicst or multicst DMAC
2543                                                         addresses.
2544                                                         In XAUI mode, all ports will reflect the data
2545                                                         written to port0. */
2546#else
2547	uint64_t adr                          : 64;
2548#endif
2549	} s;
2550	struct cvmx_gmxx_rxx_adr_cam3_s       cn30xx;
2551	struct cvmx_gmxx_rxx_adr_cam3_s       cn31xx;
2552	struct cvmx_gmxx_rxx_adr_cam3_s       cn38xx;
2553	struct cvmx_gmxx_rxx_adr_cam3_s       cn38xxp2;
2554	struct cvmx_gmxx_rxx_adr_cam3_s       cn50xx;
2555	struct cvmx_gmxx_rxx_adr_cam3_s       cn52xx;
2556	struct cvmx_gmxx_rxx_adr_cam3_s       cn52xxp1;
2557	struct cvmx_gmxx_rxx_adr_cam3_s       cn56xx;
2558	struct cvmx_gmxx_rxx_adr_cam3_s       cn56xxp1;
2559	struct cvmx_gmxx_rxx_adr_cam3_s       cn58xx;
2560	struct cvmx_gmxx_rxx_adr_cam3_s       cn58xxp1;
2561	struct cvmx_gmxx_rxx_adr_cam3_s       cn63xx;
2562	struct cvmx_gmxx_rxx_adr_cam3_s       cn63xxp1;
2563};
2564typedef union cvmx_gmxx_rxx_adr_cam3 cvmx_gmxx_rxx_adr_cam3_t;
2565
2566/**
2567 * cvmx_gmx#_rx#_adr_cam4
2568 *
2569 * GMX_RX_ADR_CAM = Address Filtering Control
2570 *
2571 */
2572union cvmx_gmxx_rxx_adr_cam4
2573{
2574	uint64_t u64;
2575	struct cvmx_gmxx_rxx_adr_cam4_s
2576	{
2577#if __BYTE_ORDER == __BIG_ENDIAN
2578	uint64_t adr                          : 64; /**< The DMAC address to match on
2579                                                         Each entry contributes 8bits to one of 8 matchers
2580                                                         Write transactions to GMX_RX_ADR_CAM will not
2581                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2582                                                         The CAM matches against unicst or multicst DMAC
2583                                                         addresses.
2584                                                         In XAUI mode, all ports will reflect the data
2585                                                         written to port0. */
2586#else
2587	uint64_t adr                          : 64;
2588#endif
2589	} s;
2590	struct cvmx_gmxx_rxx_adr_cam4_s       cn30xx;
2591	struct cvmx_gmxx_rxx_adr_cam4_s       cn31xx;
2592	struct cvmx_gmxx_rxx_adr_cam4_s       cn38xx;
2593	struct cvmx_gmxx_rxx_adr_cam4_s       cn38xxp2;
2594	struct cvmx_gmxx_rxx_adr_cam4_s       cn50xx;
2595	struct cvmx_gmxx_rxx_adr_cam4_s       cn52xx;
2596	struct cvmx_gmxx_rxx_adr_cam4_s       cn52xxp1;
2597	struct cvmx_gmxx_rxx_adr_cam4_s       cn56xx;
2598	struct cvmx_gmxx_rxx_adr_cam4_s       cn56xxp1;
2599	struct cvmx_gmxx_rxx_adr_cam4_s       cn58xx;
2600	struct cvmx_gmxx_rxx_adr_cam4_s       cn58xxp1;
2601	struct cvmx_gmxx_rxx_adr_cam4_s       cn63xx;
2602	struct cvmx_gmxx_rxx_adr_cam4_s       cn63xxp1;
2603};
2604typedef union cvmx_gmxx_rxx_adr_cam4 cvmx_gmxx_rxx_adr_cam4_t;
2605
2606/**
2607 * cvmx_gmx#_rx#_adr_cam5
2608 *
2609 * GMX_RX_ADR_CAM = Address Filtering Control
2610 *
2611 */
2612union cvmx_gmxx_rxx_adr_cam5
2613{
2614	uint64_t u64;
2615	struct cvmx_gmxx_rxx_adr_cam5_s
2616	{
2617#if __BYTE_ORDER == __BIG_ENDIAN
2618	uint64_t adr                          : 64; /**< The DMAC address to match on
2619                                                         Each entry contributes 8bits to one of 8 matchers
2620                                                         Write transactions to GMX_RX_ADR_CAM will not
2621                                                         change the CSR when GMX_PRT_CFG[EN] is enabled
2622                                                         The CAM matches against unicst or multicst DMAC
2623                                                         addresses.
2624                                                         In XAUI mode, all ports will reflect the data
2625                                                         written to port0. */
2626#else
2627	uint64_t adr                          : 64;
2628#endif
2629	} s;
2630	struct cvmx_gmxx_rxx_adr_cam5_s       cn30xx;
2631	struct cvmx_gmxx_rxx_adr_cam5_s       cn31xx;
2632	struct cvmx_gmxx_rxx_adr_cam5_s       cn38xx;
2633	struct cvmx_gmxx_rxx_adr_cam5_s       cn38xxp2;
2634	struct cvmx_gmxx_rxx_adr_cam5_s       cn50xx;
2635	struct cvmx_gmxx_rxx_adr_cam5_s       cn52xx;
2636	struct cvmx_gmxx_rxx_adr_cam5_s       cn52xxp1;
2637	struct cvmx_gmxx_rxx_adr_cam5_s       cn56xx;
2638	struct cvmx_gmxx_rxx_adr_cam5_s       cn56xxp1;
2639	struct cvmx_gmxx_rxx_adr_cam5_s       cn58xx;
2640	struct cvmx_gmxx_rxx_adr_cam5_s       cn58xxp1;
2641	struct cvmx_gmxx_rxx_adr_cam5_s       cn63xx;
2642	struct cvmx_gmxx_rxx_adr_cam5_s       cn63xxp1;
2643};
2644typedef union cvmx_gmxx_rxx_adr_cam5 cvmx_gmxx_rxx_adr_cam5_t;
2645
2646/**
2647 * cvmx_gmx#_rx#_adr_cam_en
2648 *
2649 * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
2650 *
2651 */
2652union cvmx_gmxx_rxx_adr_cam_en
2653{
2654	uint64_t u64;
2655	struct cvmx_gmxx_rxx_adr_cam_en_s
2656	{
2657#if __BYTE_ORDER == __BIG_ENDIAN
2658	uint64_t reserved_8_63                : 56;
2659	uint64_t en                           : 8;  /**< CAM Entry Enables */
2660#else
2661	uint64_t en                           : 8;
2662	uint64_t reserved_8_63                : 56;
2663#endif
2664	} s;
2665	struct cvmx_gmxx_rxx_adr_cam_en_s     cn30xx;
2666	struct cvmx_gmxx_rxx_adr_cam_en_s     cn31xx;
2667	struct cvmx_gmxx_rxx_adr_cam_en_s     cn38xx;
2668	struct cvmx_gmxx_rxx_adr_cam_en_s     cn38xxp2;
2669	struct cvmx_gmxx_rxx_adr_cam_en_s     cn50xx;
2670	struct cvmx_gmxx_rxx_adr_cam_en_s     cn52xx;
2671	struct cvmx_gmxx_rxx_adr_cam_en_s     cn52xxp1;
2672	struct cvmx_gmxx_rxx_adr_cam_en_s     cn56xx;
2673	struct cvmx_gmxx_rxx_adr_cam_en_s     cn56xxp1;
2674	struct cvmx_gmxx_rxx_adr_cam_en_s     cn58xx;
2675	struct cvmx_gmxx_rxx_adr_cam_en_s     cn58xxp1;
2676	struct cvmx_gmxx_rxx_adr_cam_en_s     cn63xx;
2677	struct cvmx_gmxx_rxx_adr_cam_en_s     cn63xxp1;
2678};
2679typedef union cvmx_gmxx_rxx_adr_cam_en cvmx_gmxx_rxx_adr_cam_en_t;
2680
2681/**
2682 * cvmx_gmx#_rx#_adr_ctl
2683 *
2684 * GMX_RX_ADR_CTL = Address Filtering Control
2685 *
2686 *
2687 * Notes:
2688 * * ALGORITHM
2689 * Here is some pseudo code that represents the address filter behavior.
2690 *
2691 *    @verbatim
2692 *    bool dmac_addr_filter(uint8 prt, uint48 dmac) [
2693 *      ASSERT(prt >= 0 && prt <= 3);
2694 *      if (is_bcst(dmac))                               // broadcast accept
2695 *        return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
2696 *      if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1)   // multicast reject
2697 *        return REJECT;
2698 *      if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2)   // multicast accept
2699 *        return ACCEPT;
2700 *
2701 *      cam_hit = 0;
2702 *
2703 *      for (i=0; i<8; i++) [
2704 *        if (GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
2705 *          continue;
2706 *        uint48 unswizzled_mac_adr = 0x0;
2707 *        for (j=5; j>=0; j--) [
2708 *           unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
2709 *        ]
2710 *        if (unswizzled_mac_adr == dmac) [
2711 *          cam_hit = 1;
2712 *          break;
2713 *        ]
2714 *      ]
2715 *
2716 *      if (cam_hit)
2717 *        return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
2718 *      else
2719 *        return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
2720 *    ]
2721 *    @endverbatim
2722 */
2723union cvmx_gmxx_rxx_adr_ctl
2724{
2725	uint64_t u64;
2726	struct cvmx_gmxx_rxx_adr_ctl_s
2727	{
2728#if __BYTE_ORDER == __BIG_ENDIAN
2729	uint64_t reserved_4_63                : 60;
2730	uint64_t cam_mode                     : 1;  /**< Allow or deny DMAC address filter
2731                                                         0 = reject the packet on DMAC address match
2732                                                         1 = accept the packet on DMAC address match */
2733	uint64_t mcst                         : 2;  /**< Multicast Mode
2734                                                         0 = Use the Address Filter CAM
2735                                                         1 = Force reject all multicast packets
2736                                                         2 = Force accept all multicast packets
2737                                                         3 = Reserved */
2738	uint64_t bcst                         : 1;  /**< Accept All Broadcast Packets */
2739#else
2740	uint64_t bcst                         : 1;
2741	uint64_t mcst                         : 2;
2742	uint64_t cam_mode                     : 1;
2743	uint64_t reserved_4_63                : 60;
2744#endif
2745	} s;
2746	struct cvmx_gmxx_rxx_adr_ctl_s        cn30xx;
2747	struct cvmx_gmxx_rxx_adr_ctl_s        cn31xx;
2748	struct cvmx_gmxx_rxx_adr_ctl_s        cn38xx;
2749	struct cvmx_gmxx_rxx_adr_ctl_s        cn38xxp2;
2750	struct cvmx_gmxx_rxx_adr_ctl_s        cn50xx;
2751	struct cvmx_gmxx_rxx_adr_ctl_s        cn52xx;
2752	struct cvmx_gmxx_rxx_adr_ctl_s        cn52xxp1;
2753	struct cvmx_gmxx_rxx_adr_ctl_s        cn56xx;
2754	struct cvmx_gmxx_rxx_adr_ctl_s        cn56xxp1;
2755	struct cvmx_gmxx_rxx_adr_ctl_s        cn58xx;
2756	struct cvmx_gmxx_rxx_adr_ctl_s        cn58xxp1;
2757	struct cvmx_gmxx_rxx_adr_ctl_s        cn63xx;
2758	struct cvmx_gmxx_rxx_adr_ctl_s        cn63xxp1;
2759};
2760typedef union cvmx_gmxx_rxx_adr_ctl cvmx_gmxx_rxx_adr_ctl_t;
2761
2762/**
2763 * cvmx_gmx#_rx#_decision
2764 *
2765 * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
2766 *
2767 *
2768 * Notes:
2769 * As each byte in a packet is received by GMX, the L2 byte count is compared
2770 * against the GMX_RX_DECISION[CNT].  The L2 byte count is the number of bytes
2771 * from the beginning of the L2 header (DMAC).  In normal operation, the L2
2772 * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any
2773 * optional UDD skip data (GMX_RX_UDD_SKP[LEN]).
2774 *
2775 * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
2776 * packet and would require UDD skip length to account for them.
2777 *
2778 *                                                 L2 Size
2779 * Port Mode             <GMX_RX_DECISION bytes (default=24)       >=GMX_RX_DECISION bytes (default=24)
2780 *
2781 * Full Duplex           accept packet                             apply filters
2782 *                       no filtering is applied                   accept packet based on DMAC and PAUSE packet filters
2783 *
2784 * Half Duplex           drop packet                               apply filters
2785 *                       packet is unconditionally dropped         accept packet based on DMAC
2786 *
2787 * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
2788 */
2789union cvmx_gmxx_rxx_decision
2790{
2791	uint64_t u64;
2792	struct cvmx_gmxx_rxx_decision_s
2793	{
2794#if __BYTE_ORDER == __BIG_ENDIAN
2795	uint64_t reserved_5_63                : 59;
2796	uint64_t cnt                          : 5;  /**< The byte count to decide when to accept or filter
2797                                                         a packet. */
2798#else
2799	uint64_t cnt                          : 5;
2800	uint64_t reserved_5_63                : 59;
2801#endif
2802	} s;
2803	struct cvmx_gmxx_rxx_decision_s       cn30xx;
2804	struct cvmx_gmxx_rxx_decision_s       cn31xx;
2805	struct cvmx_gmxx_rxx_decision_s       cn38xx;
2806	struct cvmx_gmxx_rxx_decision_s       cn38xxp2;
2807	struct cvmx_gmxx_rxx_decision_s       cn50xx;
2808	struct cvmx_gmxx_rxx_decision_s       cn52xx;
2809	struct cvmx_gmxx_rxx_decision_s       cn52xxp1;
2810	struct cvmx_gmxx_rxx_decision_s       cn56xx;
2811	struct cvmx_gmxx_rxx_decision_s       cn56xxp1;
2812	struct cvmx_gmxx_rxx_decision_s       cn58xx;
2813	struct cvmx_gmxx_rxx_decision_s       cn58xxp1;
2814	struct cvmx_gmxx_rxx_decision_s       cn63xx;
2815	struct cvmx_gmxx_rxx_decision_s       cn63xxp1;
2816};
2817typedef union cvmx_gmxx_rxx_decision cvmx_gmxx_rxx_decision_t;
2818
2819/**
2820 * cvmx_gmx#_rx#_frm_chk
2821 *
2822 * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
2823 *
2824 *
2825 * Notes:
2826 * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
2827 *
2828 * In XAUI mode prt0 is used for checking.
2829 */
2830union cvmx_gmxx_rxx_frm_chk
2831{
2832	uint64_t u64;
2833	struct cvmx_gmxx_rxx_frm_chk_s
2834	{
2835#if __BYTE_ORDER == __BIG_ENDIAN
2836	uint64_t reserved_10_63               : 54;
2837	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
2838	uint64_t skperr                       : 1;  /**< Skipper error */
2839	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
2840	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
2841	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2842	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2843	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2844	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
2845	uint64_t carext                       : 1;  /**< Carrier extend error
2846                                                         (SGMII/1000Base-X only) */
2847	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
2848#else
2849	uint64_t minerr                       : 1;
2850	uint64_t carext                       : 1;
2851	uint64_t maxerr                       : 1;
2852	uint64_t jabber                       : 1;
2853	uint64_t fcserr                       : 1;
2854	uint64_t alnerr                       : 1;
2855	uint64_t lenerr                       : 1;
2856	uint64_t rcverr                       : 1;
2857	uint64_t skperr                       : 1;
2858	uint64_t niberr                       : 1;
2859	uint64_t reserved_10_63               : 54;
2860#endif
2861	} s;
2862	struct cvmx_gmxx_rxx_frm_chk_s        cn30xx;
2863	struct cvmx_gmxx_rxx_frm_chk_s        cn31xx;
2864	struct cvmx_gmxx_rxx_frm_chk_s        cn38xx;
2865	struct cvmx_gmxx_rxx_frm_chk_s        cn38xxp2;
2866	struct cvmx_gmxx_rxx_frm_chk_cn50xx
2867	{
2868#if __BYTE_ORDER == __BIG_ENDIAN
2869	uint64_t reserved_10_63               : 54;
2870	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
2871	uint64_t skperr                       : 1;  /**< Skipper error */
2872	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
2873	uint64_t reserved_6_6                 : 1;
2874	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2875	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2876	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2877	uint64_t reserved_2_2                 : 1;
2878	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
2879	uint64_t reserved_0_0                 : 1;
2880#else
2881	uint64_t reserved_0_0                 : 1;
2882	uint64_t carext                       : 1;
2883	uint64_t reserved_2_2                 : 1;
2884	uint64_t jabber                       : 1;
2885	uint64_t fcserr                       : 1;
2886	uint64_t alnerr                       : 1;
2887	uint64_t reserved_6_6                 : 1;
2888	uint64_t rcverr                       : 1;
2889	uint64_t skperr                       : 1;
2890	uint64_t niberr                       : 1;
2891	uint64_t reserved_10_63               : 54;
2892#endif
2893	} cn50xx;
2894	struct cvmx_gmxx_rxx_frm_chk_cn52xx
2895	{
2896#if __BYTE_ORDER == __BIG_ENDIAN
2897	uint64_t reserved_9_63                : 55;
2898	uint64_t skperr                       : 1;  /**< Skipper error */
2899	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
2900	uint64_t reserved_5_6                 : 2;
2901	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2902	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2903	uint64_t reserved_2_2                 : 1;
2904	uint64_t carext                       : 1;  /**< Carrier extend error
2905                                                         (SGMII/1000Base-X only) */
2906	uint64_t reserved_0_0                 : 1;
2907#else
2908	uint64_t reserved_0_0                 : 1;
2909	uint64_t carext                       : 1;
2910	uint64_t reserved_2_2                 : 1;
2911	uint64_t jabber                       : 1;
2912	uint64_t fcserr                       : 1;
2913	uint64_t reserved_5_6                 : 2;
2914	uint64_t rcverr                       : 1;
2915	uint64_t skperr                       : 1;
2916	uint64_t reserved_9_63                : 55;
2917#endif
2918	} cn52xx;
2919	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn52xxp1;
2920	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn56xx;
2921	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn56xxp1;
2922	struct cvmx_gmxx_rxx_frm_chk_s        cn58xx;
2923	struct cvmx_gmxx_rxx_frm_chk_s        cn58xxp1;
2924	struct cvmx_gmxx_rxx_frm_chk_cn63xx
2925	{
2926#if __BYTE_ORDER == __BIG_ENDIAN
2927	uint64_t reserved_9_63                : 55;
2928	uint64_t skperr                       : 1;  /**< Skipper error */
2929	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
2930	uint64_t reserved_5_6                 : 2;
2931	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2932	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2933	uint64_t reserved_2_2                 : 1;
2934	uint64_t carext                       : 1;  /**< Carrier extend error
2935                                                         (SGMII/1000Base-X only) */
2936	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
2937#else
2938	uint64_t minerr                       : 1;
2939	uint64_t carext                       : 1;
2940	uint64_t reserved_2_2                 : 1;
2941	uint64_t jabber                       : 1;
2942	uint64_t fcserr                       : 1;
2943	uint64_t reserved_5_6                 : 2;
2944	uint64_t rcverr                       : 1;
2945	uint64_t skperr                       : 1;
2946	uint64_t reserved_9_63                : 55;
2947#endif
2948	} cn63xx;
2949	struct cvmx_gmxx_rxx_frm_chk_cn63xx   cn63xxp1;
2950};
2951typedef union cvmx_gmxx_rxx_frm_chk cvmx_gmxx_rxx_frm_chk_t;
2952
2953/**
2954 * cvmx_gmx#_rx#_frm_ctl
2955 *
2956 * GMX_RX_FRM_CTL = Frame Control
2957 *
2958 *
2959 * Notes:
2960 * * PRE_STRP
2961 *   When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
2962 *   determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
2963 *   core as part of the packet.
2964 *
2965 *   In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
2966 *   size when checking against the MIN and MAX bounds.  Furthermore, the bytes
2967 *   are skipped when locating the start of the L2 header for DMAC and Control
2968 *   frame recognition.
2969 *
2970 * * CTL_BCK/CTL_DRP
2971 *   These bits control how the HW handles incoming PAUSE packets.  Here are
2972 *   the most common modes of operation:
2973 *     CTL_BCK=1,CTL_DRP=1   - HW does it all
2974 *     CTL_BCK=0,CTL_DRP=0   - SW sees all pause frames
2975 *     CTL_BCK=0,CTL_DRP=1   - all pause frames are completely ignored
2976 *
2977 *   These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
2978 *   Since PAUSE packets only apply to fulldup operation, any PAUSE packet
2979 *   would constitute an exception which should be handled by the processing
2980 *   cores.  PAUSE packets should not be forwarded.
2981 */
2982union cvmx_gmxx_rxx_frm_ctl
2983{
2984	uint64_t u64;
2985	struct cvmx_gmxx_rxx_frm_ctl_s
2986	{
2987#if __BYTE_ORDER == __BIG_ENDIAN
2988	uint64_t reserved_13_63               : 51;
2989	uint64_t ptp_mode                     : 1;  /**< Timestamp mode
2990                                                         When PTP_MODE is set, a 64-bit timestamp will be
2991                                                         prepended to every incoming packet. The timestamp
2992                                                         bytes are added to the packet in such a way as to
2993                                                         not modify the packet's receive byte count.  This
2994                                                         implies that the GMX_RX_JABBER, MINERR,
2995                                                         GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
2996                                                         GMX_RX_STATS_* do not require any adjustment as
2997                                                         they operate on the received packet size.
2998                                                         When the packet reaches PKI, its size will
2999                                                         reflect the additional bytes and is subject to
3000                                                         the restrictions below.
3001                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
3002                                                         If PTP_MODE=1,
3003                                                          PIP_PRT_CFGx[SKIP] should be increased by 8.
3004                                                          PIP_PRT_CFGx[HIGIG_EN] should be 0.
3005                                                          PIP_FRM_CHKx[MAXLEN] should be increased by 8.
3006                                                          PIP_FRM_CHKx[MINLEN] should be increased by 8.
3007                                                          PIP_TAG_INCx[EN] should be adjusted. */
3008	uint64_t reserved_11_11               : 1;
3009	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
3010                                                         due to PARITAL packets */
3011	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
3012                                                         regardless of the number of previous PREAMBLE
3013                                                         nibbles.  In this mode, PRE_STRP should be set to
3014                                                         account for the variable nature of the PREAMBLE.
3015                                                         PRE_CHK must be set to enable this and all
3016                                                         PREAMBLE features.
3017                                                         (SGMII at 10/100Mbs only) */
3018	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
3019                                                         sized pkts with padding in the client data
3020                                                         (PASS3 Only) */
3021	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
3022	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
3023                                                         GMX will begin the frame at the first SFD.
3024                                                         PRE_CHK must be set to enable this and all
3025                                                         PREAMBLE features.
3026                                                         (SGMII/1000Base-X only) */
3027	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3028	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3029                                                         Multicast address */
3030	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3031	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3032	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3033                                                         0=PREAMBLE+SFD is sent to core as part of frame
3034                                                         1=PREAMBLE+SFD is dropped
3035                                                         PRE_CHK must be set to enable this and all
3036                                                         PREAMBLE features.
3037                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3038	uint64_t pre_chk                      : 1;  /**< This port is configured to send a valid 802.3
3039                                                         PREAMBLE to begin every frame. GMX checks that a
3040                                                         valid PREAMBLE is received (based on PRE_FREE).
3041                                                         When a problem does occur within the PREAMBLE
3042                                                         seqeunce, the frame is marked as bad and not sent
3043                                                         into the core.  The GMX_GMX_RX_INT_REG[PCTERR]
3044                                                         interrupt is also raised.
3045                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3046                                                         must be zero.
3047                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3048#else
3049	uint64_t pre_chk                      : 1;
3050	uint64_t pre_strp                     : 1;
3051	uint64_t ctl_drp                      : 1;
3052	uint64_t ctl_bck                      : 1;
3053	uint64_t ctl_mcst                     : 1;
3054	uint64_t ctl_smac                     : 1;
3055	uint64_t pre_free                     : 1;
3056	uint64_t vlan_len                     : 1;
3057	uint64_t pad_len                      : 1;
3058	uint64_t pre_align                    : 1;
3059	uint64_t null_dis                     : 1;
3060	uint64_t reserved_11_11               : 1;
3061	uint64_t ptp_mode                     : 1;
3062	uint64_t reserved_13_63               : 51;
3063#endif
3064	} s;
3065	struct cvmx_gmxx_rxx_frm_ctl_cn30xx
3066	{
3067#if __BYTE_ORDER == __BIG_ENDIAN
3068	uint64_t reserved_9_63                : 55;
3069	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
3070                                                         sized pkts with padding in the client data */
3071	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
3072	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
3073                                                         0-7 cycles of PREAMBLE followed by SFD (pass 1.0)
3074                                                         0-254 cycles of PREAMBLE followed by SFD (else) */
3075	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3076	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3077                                                         Multicast address */
3078	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3079	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3080	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3081                                                         0=PREAMBLE+SFD is sent to core as part of frame
3082                                                         1=PREAMBLE+SFD is dropped */
3083	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
3084                                                         to begin every frame.  GMX checks that the
3085                                                         PREAMBLE is sent correctly */
3086#else
3087	uint64_t pre_chk                      : 1;
3088	uint64_t pre_strp                     : 1;
3089	uint64_t ctl_drp                      : 1;
3090	uint64_t ctl_bck                      : 1;
3091	uint64_t ctl_mcst                     : 1;
3092	uint64_t ctl_smac                     : 1;
3093	uint64_t pre_free                     : 1;
3094	uint64_t vlan_len                     : 1;
3095	uint64_t pad_len                      : 1;
3096	uint64_t reserved_9_63                : 55;
3097#endif
3098	} cn30xx;
3099	struct cvmx_gmxx_rxx_frm_ctl_cn31xx
3100	{
3101#if __BYTE_ORDER == __BIG_ENDIAN
3102	uint64_t reserved_8_63                : 56;
3103	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
3104	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
3105                                                         0 - 7 cycles of PREAMBLE followed by SFD (pass1.0)
3106                                                         0 - 254 cycles of PREAMBLE followed by SFD (else) */
3107	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3108	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3109                                                         Multicast address */
3110	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3111	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3112	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3113                                                         0=PREAMBLE+SFD is sent to core as part of frame
3114                                                         1=PREAMBLE+SFD is dropped */
3115	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
3116                                                         to begin every frame.  GMX checks that the
3117                                                         PREAMBLE is sent correctly */
3118#else
3119	uint64_t pre_chk                      : 1;
3120	uint64_t pre_strp                     : 1;
3121	uint64_t ctl_drp                      : 1;
3122	uint64_t ctl_bck                      : 1;
3123	uint64_t ctl_mcst                     : 1;
3124	uint64_t ctl_smac                     : 1;
3125	uint64_t pre_free                     : 1;
3126	uint64_t vlan_len                     : 1;
3127	uint64_t reserved_8_63                : 56;
3128#endif
3129	} cn31xx;
3130	struct cvmx_gmxx_rxx_frm_ctl_cn30xx   cn38xx;
3131	struct cvmx_gmxx_rxx_frm_ctl_cn31xx   cn38xxp2;
3132	struct cvmx_gmxx_rxx_frm_ctl_cn50xx
3133	{
3134#if __BYTE_ORDER == __BIG_ENDIAN
3135	uint64_t reserved_11_63               : 53;
3136	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
3137                                                         due to PARITAL packets */
3138	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
3139                                                         regardless of the number of previous PREAMBLE
3140                                                         nibbles.  In this mode, PREAMBLE can be consumed
3141                                                         by the HW so when PRE_ALIGN is set, PRE_FREE,
3142                                                         PRE_STRP must be set for correct operation.
3143                                                         PRE_CHK must be set to enable this and all
3144                                                         PREAMBLE features. */
3145	uint64_t reserved_7_8                 : 2;
3146	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
3147                                                         0-254 cycles of PREAMBLE followed by SFD */
3148	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3149	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3150                                                         Multicast address */
3151	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3152	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3153	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3154                                                         0=PREAMBLE+SFD is sent to core as part of frame
3155                                                         1=PREAMBLE+SFD is dropped */
3156	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
3157                                                         to begin every frame.  GMX checks that the
3158                                                         PREAMBLE is sent correctly */
3159#else
3160	uint64_t pre_chk                      : 1;
3161	uint64_t pre_strp                     : 1;
3162	uint64_t ctl_drp                      : 1;
3163	uint64_t ctl_bck                      : 1;
3164	uint64_t ctl_mcst                     : 1;
3165	uint64_t ctl_smac                     : 1;
3166	uint64_t pre_free                     : 1;
3167	uint64_t reserved_7_8                 : 2;
3168	uint64_t pre_align                    : 1;
3169	uint64_t null_dis                     : 1;
3170	uint64_t reserved_11_63               : 53;
3171#endif
3172	} cn50xx;
3173	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn52xx;
3174	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn52xxp1;
3175	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn56xx;
3176	struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1
3177	{
3178#if __BYTE_ORDER == __BIG_ENDIAN
3179	uint64_t reserved_10_63               : 54;
3180	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
3181                                                         regardless of the number of previous PREAMBLE
3182                                                         nibbles.  In this mode, PRE_STRP should be set to
3183                                                         account for the variable nature of the PREAMBLE.
3184                                                         PRE_CHK must be set to enable this and all
3185                                                         PREAMBLE features.
3186                                                         (SGMII at 10/100Mbs only) */
3187	uint64_t reserved_7_8                 : 2;
3188	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
3189                                                         0 - 254 cycles of PREAMBLE followed by SFD
3190                                                         PRE_CHK must be set to enable this and all
3191                                                         PREAMBLE features.
3192                                                         (SGMII/1000Base-X only) */
3193	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3194	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3195                                                         Multicast address */
3196	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3197	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3198	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3199                                                         0=PREAMBLE+SFD is sent to core as part of frame
3200                                                         1=PREAMBLE+SFD is dropped
3201                                                         PRE_CHK must be set to enable this and all
3202                                                         PREAMBLE features. */
3203	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
3204                                                         to begin every frame.  GMX checks that the
3205                                                         PREAMBLE is sent correctly.
3206                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3207                                                         must be zero. */
3208#else
3209	uint64_t pre_chk                      : 1;
3210	uint64_t pre_strp                     : 1;
3211	uint64_t ctl_drp                      : 1;
3212	uint64_t ctl_bck                      : 1;
3213	uint64_t ctl_mcst                     : 1;
3214	uint64_t ctl_smac                     : 1;
3215	uint64_t pre_free                     : 1;
3216	uint64_t reserved_7_8                 : 2;
3217	uint64_t pre_align                    : 1;
3218	uint64_t reserved_10_63               : 54;
3219#endif
3220	} cn56xxp1;
3221	struct cvmx_gmxx_rxx_frm_ctl_cn58xx
3222	{
3223#if __BYTE_ORDER == __BIG_ENDIAN
3224	uint64_t reserved_11_63               : 53;
3225	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
3226                                                         due to PARITAL packets
3227                                                         In spi4 mode, all ports use prt0 for checking. */
3228	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
3229                                                         regardless of the number of previous PREAMBLE
3230                                                         nibbles.  In this mode, PREAMBLE can be consumed
3231                                                         by the HW so when PRE_ALIGN is set, PRE_FREE,
3232                                                         PRE_STRP must be set for correct operation.
3233                                                         PRE_CHK must be set to enable this and all
3234                                                         PREAMBLE features. */
3235	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
3236                                                         sized pkts with padding in the client data
3237                                                         (PASS3 Only) */
3238	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
3239	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
3240                                                         0 - 254 cycles of PREAMBLE followed by SFD */
3241	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3242	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3243                                                         Multicast address */
3244	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3245	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3246	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3247                                                         0=PREAMBLE+SFD is sent to core as part of frame
3248                                                         1=PREAMBLE+SFD is dropped */
3249	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
3250                                                         to begin every frame.  GMX checks that the
3251                                                         PREAMBLE is sent correctly */
3252#else
3253	uint64_t pre_chk                      : 1;
3254	uint64_t pre_strp                     : 1;
3255	uint64_t ctl_drp                      : 1;
3256	uint64_t ctl_bck                      : 1;
3257	uint64_t ctl_mcst                     : 1;
3258	uint64_t ctl_smac                     : 1;
3259	uint64_t pre_free                     : 1;
3260	uint64_t vlan_len                     : 1;
3261	uint64_t pad_len                      : 1;
3262	uint64_t pre_align                    : 1;
3263	uint64_t null_dis                     : 1;
3264	uint64_t reserved_11_63               : 53;
3265#endif
3266	} cn58xx;
3267	struct cvmx_gmxx_rxx_frm_ctl_cn30xx   cn58xxp1;
3268	struct cvmx_gmxx_rxx_frm_ctl_cn63xx
3269	{
3270#if __BYTE_ORDER == __BIG_ENDIAN
3271	uint64_t reserved_13_63               : 51;
3272	uint64_t ptp_mode                     : 1;  /**< Timestamp mode
3273                                                         When PTP_MODE is set, a 64-bit timestamp will be
3274                                                         prepended to every incoming packet. The timestamp
3275                                                         bytes are added to the packet in such a way as to
3276                                                         not modify the packet's receive byte count.  This
3277                                                         implies that the GMX_RX_JABBER, MINERR,
3278                                                         GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
3279                                                         GMX_RX_STATS_* do not require any adjustment as
3280                                                         they operate on the received packet size.
3281                                                         When the packet reaches PKI, its size will
3282                                                         reflect the additional bytes and is subject to
3283                                                         the restrictions below.
3284                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
3285                                                         If PTP_MODE=1,
3286                                                          PIP_PRT_CFGx[SKIP] should be increased by 8.
3287                                                          PIP_PRT_CFGx[HIGIG_EN] should be 0.
3288                                                          PIP_FRM_CHKx[MAXLEN] should be increased by 8.
3289                                                          PIP_FRM_CHKx[MINLEN] should be increased by 8.
3290                                                          PIP_TAG_INCx[EN] should be adjusted. */
3291	uint64_t reserved_11_11               : 1;
3292	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
3293                                                         due to PARITAL packets */
3294	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
3295                                                         regardless of the number of previous PREAMBLE
3296                                                         nibbles.  In this mode, PRE_STRP should be set to
3297                                                         account for the variable nature of the PREAMBLE.
3298                                                         PRE_CHK must be set to enable this and all
3299                                                         PREAMBLE features.
3300                                                         (SGMII at 10/100Mbs only) */
3301	uint64_t reserved_7_8                 : 2;
3302	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
3303                                                         GMX will begin the frame at the first SFD.
3304                                                         PRE_CHK must be set to enable this and all
3305                                                         PREAMBLE features.
3306                                                         (SGMII/1000Base-X only) */
3307	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
3308	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
3309                                                         Multicast address */
3310	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
3311	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
3312	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
3313                                                         0=PREAMBLE+SFD is sent to core as part of frame
3314                                                         1=PREAMBLE+SFD is dropped
3315                                                         PRE_CHK must be set to enable this and all
3316                                                         PREAMBLE features.
3317                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3318	uint64_t pre_chk                      : 1;  /**< This port is configured to send a valid 802.3
3319                                                         PREAMBLE to begin every frame. GMX checks that a
3320                                                         valid PREAMBLE is received (based on PRE_FREE).
3321                                                         When a problem does occur within the PREAMBLE
3322                                                         seqeunce, the frame is marked as bad and not sent
3323                                                         into the core.  The GMX_GMX_RX_INT_REG[PCTERR]
3324                                                         interrupt is also raised.
3325                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3326                                                         must be zero.
3327                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3328#else
3329	uint64_t pre_chk                      : 1;
3330	uint64_t pre_strp                     : 1;
3331	uint64_t ctl_drp                      : 1;
3332	uint64_t ctl_bck                      : 1;
3333	uint64_t ctl_mcst                     : 1;
3334	uint64_t ctl_smac                     : 1;
3335	uint64_t pre_free                     : 1;
3336	uint64_t reserved_7_8                 : 2;
3337	uint64_t pre_align                    : 1;
3338	uint64_t null_dis                     : 1;
3339	uint64_t reserved_11_11               : 1;
3340	uint64_t ptp_mode                     : 1;
3341	uint64_t reserved_13_63               : 51;
3342#endif
3343	} cn63xx;
3344	struct cvmx_gmxx_rxx_frm_ctl_cn63xx   cn63xxp1;
3345};
3346typedef union cvmx_gmxx_rxx_frm_ctl cvmx_gmxx_rxx_frm_ctl_t;
3347
3348/**
3349 * cvmx_gmx#_rx#_frm_max
3350 *
3351 * GMX_RX_FRM_MAX = Frame Max length
3352 *
3353 *
3354 * Notes:
3355 * In spi4 mode, all spi4 ports use prt0 for checking.
3356 *
3357 * When changing the LEN field, be sure that LEN does not exceed
3358 * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
3359 * are within the maximum length parameter to be rejected because they exceed
3360 * the GMX_RX_JABBER[CNT] limit.
3361 */
3362union cvmx_gmxx_rxx_frm_max
3363{
3364	uint64_t u64;
3365	struct cvmx_gmxx_rxx_frm_max_s
3366	{
3367#if __BYTE_ORDER == __BIG_ENDIAN
3368	uint64_t reserved_16_63               : 48;
3369	uint64_t len                          : 16; /**< Byte count for Max-sized frame check
3370                                                         GMX_RXn_FRM_CHK[MAXERR] enables the check for
3371                                                         port n.
3372                                                         If enabled, failing packets set the MAXERR
3373                                                         interrupt and work-queue entry WORD2[opcode] is
3374                                                         set to OVER_FCS (0x3, if packet has bad FCS) or
3375                                                         OVER_ERR (0x4, if packet has good FCS).
3376                                                         LEN =< GMX_RX_JABBER[CNT] */
3377#else
3378	uint64_t len                          : 16;
3379	uint64_t reserved_16_63               : 48;
3380#endif
3381	} s;
3382	struct cvmx_gmxx_rxx_frm_max_s        cn30xx;
3383	struct cvmx_gmxx_rxx_frm_max_s        cn31xx;
3384	struct cvmx_gmxx_rxx_frm_max_s        cn38xx;
3385	struct cvmx_gmxx_rxx_frm_max_s        cn38xxp2;
3386	struct cvmx_gmxx_rxx_frm_max_s        cn58xx;
3387	struct cvmx_gmxx_rxx_frm_max_s        cn58xxp1;
3388};
3389typedef union cvmx_gmxx_rxx_frm_max cvmx_gmxx_rxx_frm_max_t;
3390
3391/**
3392 * cvmx_gmx#_rx#_frm_min
3393 *
3394 * GMX_RX_FRM_MIN = Frame Min length
3395 *
3396 *
3397 * Notes:
3398 * In spi4 mode, all spi4 ports use prt0 for checking.
3399 *
3400 */
3401union cvmx_gmxx_rxx_frm_min
3402{
3403	uint64_t u64;
3404	struct cvmx_gmxx_rxx_frm_min_s
3405	{
3406#if __BYTE_ORDER == __BIG_ENDIAN
3407	uint64_t reserved_16_63               : 48;
3408	uint64_t len                          : 16; /**< Byte count for Min-sized frame check
3409                                                         GMX_RXn_FRM_CHK[MINERR] enables the check for
3410                                                         port n.
3411                                                         If enabled, failing packets set the MINERR
3412                                                         interrupt and work-queue entry WORD2[opcode] is
3413                                                         set to UNDER_FCS (0x6, if packet has bad FCS) or
3414                                                         UNDER_ERR (0x8, if packet has good FCS). */
3415#else
3416	uint64_t len                          : 16;
3417	uint64_t reserved_16_63               : 48;
3418#endif
3419	} s;
3420	struct cvmx_gmxx_rxx_frm_min_s        cn30xx;
3421	struct cvmx_gmxx_rxx_frm_min_s        cn31xx;
3422	struct cvmx_gmxx_rxx_frm_min_s        cn38xx;
3423	struct cvmx_gmxx_rxx_frm_min_s        cn38xxp2;
3424	struct cvmx_gmxx_rxx_frm_min_s        cn58xx;
3425	struct cvmx_gmxx_rxx_frm_min_s        cn58xxp1;
3426};
3427typedef union cvmx_gmxx_rxx_frm_min cvmx_gmxx_rxx_frm_min_t;
3428
3429/**
3430 * cvmx_gmx#_rx#_ifg
3431 *
3432 * GMX_RX_IFG = RX Min IFG
3433 *
3434 */
3435union cvmx_gmxx_rxx_ifg
3436{
3437	uint64_t u64;
3438	struct cvmx_gmxx_rxx_ifg_s
3439	{
3440#if __BYTE_ORDER == __BIG_ENDIAN
3441	uint64_t reserved_4_63                : 60;
3442	uint64_t ifg                          : 4;  /**< Min IFG (in IFG*8 bits) between packets used to
3443                                                         determine IFGERR. Normally IFG is 96 bits.
3444                                                         Note in some operating modes, IFG cycles can be
3445                                                         inserted or removed in order to achieve clock rate
3446                                                         adaptation. For these reasons, the default value
3447                                                         is slightly conservative and does not check upto
3448                                                         the full 96 bits of IFG.
3449                                                         (SGMII/1000Base-X only) */
3450#else
3451	uint64_t ifg                          : 4;
3452	uint64_t reserved_4_63                : 60;
3453#endif
3454	} s;
3455	struct cvmx_gmxx_rxx_ifg_s            cn30xx;
3456	struct cvmx_gmxx_rxx_ifg_s            cn31xx;
3457	struct cvmx_gmxx_rxx_ifg_s            cn38xx;
3458	struct cvmx_gmxx_rxx_ifg_s            cn38xxp2;
3459	struct cvmx_gmxx_rxx_ifg_s            cn50xx;
3460	struct cvmx_gmxx_rxx_ifg_s            cn52xx;
3461	struct cvmx_gmxx_rxx_ifg_s            cn52xxp1;
3462	struct cvmx_gmxx_rxx_ifg_s            cn56xx;
3463	struct cvmx_gmxx_rxx_ifg_s            cn56xxp1;
3464	struct cvmx_gmxx_rxx_ifg_s            cn58xx;
3465	struct cvmx_gmxx_rxx_ifg_s            cn58xxp1;
3466	struct cvmx_gmxx_rxx_ifg_s            cn63xx;
3467	struct cvmx_gmxx_rxx_ifg_s            cn63xxp1;
3468};
3469typedef union cvmx_gmxx_rxx_ifg cvmx_gmxx_rxx_ifg_t;
3470
3471/**
3472 * cvmx_gmx#_rx#_int_en
3473 *
3474 * GMX_RX_INT_EN = Interrupt Enable
3475 *
3476 *
3477 * Notes:
3478 * In XAUI mode prt0 is used for checking.
3479 *
3480 */
3481union cvmx_gmxx_rxx_int_en
3482{
3483	uint64_t u64;
3484	struct cvmx_gmxx_rxx_int_en_s
3485	{
3486#if __BYTE_ORDER == __BIG_ENDIAN
3487	uint64_t reserved_29_63               : 35;
3488	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
3489	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
3490	uint64_t undat                        : 1;  /**< Unexpected Data
3491                                                         (XAUI Mode only) */
3492	uint64_t uneop                        : 1;  /**< Unexpected EOP
3493                                                         (XAUI Mode only) */
3494	uint64_t unsop                        : 1;  /**< Unexpected SOP
3495                                                         (XAUI Mode only) */
3496	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
3497                                                         than /T/.  The error propagation control
3498                                                         character /E/ will be included as part of the
3499                                                         frame and does not cause a frame termination.
3500                                                         (XAUI Mode only) */
3501	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
3502                                                         (XAUI Mode only) */
3503	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
3504                                                         (XAUI Mode only) */
3505	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
3506                                                         (XAUI Mode only) */
3507	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3508	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
3509	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
3510	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
3511	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
3512                                                         (SGMII/1000Base-X only) */
3513	uint64_t coldet                       : 1;  /**< Collision Detection
3514                                                         (SGMII/1000Base-X half-duplex only) */
3515	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
3516                                                         (SGMII/1000Base-X only) */
3517	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
3518	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3519	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
3520                                                         (SGMII/1000Base-X only) */
3521	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
3522	uint64_t skperr                       : 1;  /**< Skipper error */
3523	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
3524	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
3525	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
3526	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3527	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3528	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
3529	uint64_t carext                       : 1;  /**< Carrier extend error
3530                                                         (SGMII/1000Base-X only) */
3531	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
3532#else
3533	uint64_t minerr                       : 1;
3534	uint64_t carext                       : 1;
3535	uint64_t maxerr                       : 1;
3536	uint64_t jabber                       : 1;
3537	uint64_t fcserr                       : 1;
3538	uint64_t alnerr                       : 1;
3539	uint64_t lenerr                       : 1;
3540	uint64_t rcverr                       : 1;
3541	uint64_t skperr                       : 1;
3542	uint64_t niberr                       : 1;
3543	uint64_t ovrerr                       : 1;
3544	uint64_t pcterr                       : 1;
3545	uint64_t rsverr                       : 1;
3546	uint64_t falerr                       : 1;
3547	uint64_t coldet                       : 1;
3548	uint64_t ifgerr                       : 1;
3549	uint64_t phy_link                     : 1;
3550	uint64_t phy_spd                      : 1;
3551	uint64_t phy_dupx                     : 1;
3552	uint64_t pause_drp                    : 1;
3553	uint64_t loc_fault                    : 1;
3554	uint64_t rem_fault                    : 1;
3555	uint64_t bad_seq                      : 1;
3556	uint64_t bad_term                     : 1;
3557	uint64_t unsop                        : 1;
3558	uint64_t uneop                        : 1;
3559	uint64_t undat                        : 1;
3560	uint64_t hg2fld                       : 1;
3561	uint64_t hg2cc                        : 1;
3562	uint64_t reserved_29_63               : 35;
3563#endif
3564	} s;
3565	struct cvmx_gmxx_rxx_int_en_cn30xx
3566	{
3567#if __BYTE_ORDER == __BIG_ENDIAN
3568	uint64_t reserved_19_63               : 45;
3569	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
3570	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
3571	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
3572	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
3573	uint64_t coldet                       : 1;  /**< Collision Detection */
3574	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
3575	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
3576	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3577	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
3578	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
3579	uint64_t skperr                       : 1;  /**< Skipper error */
3580	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
3581	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
3582	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
3583	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3584	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3585	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
3586	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
3587	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
3588#else
3589	uint64_t minerr                       : 1;
3590	uint64_t carext                       : 1;
3591	uint64_t maxerr                       : 1;
3592	uint64_t jabber                       : 1;
3593	uint64_t fcserr                       : 1;
3594	uint64_t alnerr                       : 1;
3595	uint64_t lenerr                       : 1;
3596	uint64_t rcverr                       : 1;
3597	uint64_t skperr                       : 1;
3598	uint64_t niberr                       : 1;
3599	uint64_t ovrerr                       : 1;
3600	uint64_t pcterr                       : 1;
3601	uint64_t rsverr                       : 1;
3602	uint64_t falerr                       : 1;
3603	uint64_t coldet                       : 1;
3604	uint64_t ifgerr                       : 1;
3605	uint64_t phy_link                     : 1;
3606	uint64_t phy_spd                      : 1;
3607	uint64_t phy_dupx                     : 1;
3608	uint64_t reserved_19_63               : 45;
3609#endif
3610	} cn30xx;
3611	struct cvmx_gmxx_rxx_int_en_cn30xx    cn31xx;
3612	struct cvmx_gmxx_rxx_int_en_cn30xx    cn38xx;
3613	struct cvmx_gmxx_rxx_int_en_cn30xx    cn38xxp2;
3614	struct cvmx_gmxx_rxx_int_en_cn50xx
3615	{
3616#if __BYTE_ORDER == __BIG_ENDIAN
3617	uint64_t reserved_20_63               : 44;
3618	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3619	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
3620	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
3621	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
3622	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
3623	uint64_t coldet                       : 1;  /**< Collision Detection */
3624	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
3625	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
3626	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3627	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
3628	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
3629	uint64_t skperr                       : 1;  /**< Skipper error */
3630	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
3631	uint64_t reserved_6_6                 : 1;
3632	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
3633	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3634	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3635	uint64_t reserved_2_2                 : 1;
3636	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
3637	uint64_t reserved_0_0                 : 1;
3638#else
3639	uint64_t reserved_0_0                 : 1;
3640	uint64_t carext                       : 1;
3641	uint64_t reserved_2_2                 : 1;
3642	uint64_t jabber                       : 1;
3643	uint64_t fcserr                       : 1;
3644	uint64_t alnerr                       : 1;
3645	uint64_t reserved_6_6                 : 1;
3646	uint64_t rcverr                       : 1;
3647	uint64_t skperr                       : 1;
3648	uint64_t niberr                       : 1;
3649	uint64_t ovrerr                       : 1;
3650	uint64_t pcterr                       : 1;
3651	uint64_t rsverr                       : 1;
3652	uint64_t falerr                       : 1;
3653	uint64_t coldet                       : 1;
3654	uint64_t ifgerr                       : 1;
3655	uint64_t phy_link                     : 1;
3656	uint64_t phy_spd                      : 1;
3657	uint64_t phy_dupx                     : 1;
3658	uint64_t pause_drp                    : 1;
3659	uint64_t reserved_20_63               : 44;
3660#endif
3661	} cn50xx;
3662	struct cvmx_gmxx_rxx_int_en_cn52xx
3663	{
3664#if __BYTE_ORDER == __BIG_ENDIAN
3665	uint64_t reserved_29_63               : 35;
3666	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
3667	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
3668	uint64_t undat                        : 1;  /**< Unexpected Data
3669                                                         (XAUI Mode only) */
3670	uint64_t uneop                        : 1;  /**< Unexpected EOP
3671                                                         (XAUI Mode only) */
3672	uint64_t unsop                        : 1;  /**< Unexpected SOP
3673                                                         (XAUI Mode only) */
3674	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
3675                                                         than /T/.  The error propagation control
3676                                                         character /E/ will be included as part of the
3677                                                         frame and does not cause a frame termination.
3678                                                         (XAUI Mode only) */
3679	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
3680                                                         (XAUI Mode only) */
3681	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
3682                                                         (XAUI Mode only) */
3683	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
3684                                                         (XAUI Mode only) */
3685	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3686	uint64_t reserved_16_18               : 3;
3687	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
3688                                                         (SGMII/1000Base-X only) */
3689	uint64_t coldet                       : 1;  /**< Collision Detection
3690                                                         (SGMII/1000Base-X half-duplex only) */
3691	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
3692                                                         (SGMII/1000Base-X only) */
3693	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
3694	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3695	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
3696                                                         (SGMII/1000Base-X only) */
3697	uint64_t reserved_9_9                 : 1;
3698	uint64_t skperr                       : 1;  /**< Skipper error */
3699	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
3700	uint64_t reserved_5_6                 : 2;
3701	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3702	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3703	uint64_t reserved_2_2                 : 1;
3704	uint64_t carext                       : 1;  /**< Carrier extend error
3705                                                         (SGMII/1000Base-X only) */
3706	uint64_t reserved_0_0                 : 1;
3707#else
3708	uint64_t reserved_0_0                 : 1;
3709	uint64_t carext                       : 1;
3710	uint64_t reserved_2_2                 : 1;
3711	uint64_t jabber                       : 1;
3712	uint64_t fcserr                       : 1;
3713	uint64_t reserved_5_6                 : 2;
3714	uint64_t rcverr                       : 1;
3715	uint64_t skperr                       : 1;
3716	uint64_t reserved_9_9                 : 1;
3717	uint64_t ovrerr                       : 1;
3718	uint64_t pcterr                       : 1;
3719	uint64_t rsverr                       : 1;
3720	uint64_t falerr                       : 1;
3721	uint64_t coldet                       : 1;
3722	uint64_t ifgerr                       : 1;
3723	uint64_t reserved_16_18               : 3;
3724	uint64_t pause_drp                    : 1;
3725	uint64_t loc_fault                    : 1;
3726	uint64_t rem_fault                    : 1;
3727	uint64_t bad_seq                      : 1;
3728	uint64_t bad_term                     : 1;
3729	uint64_t unsop                        : 1;
3730	uint64_t uneop                        : 1;
3731	uint64_t undat                        : 1;
3732	uint64_t hg2fld                       : 1;
3733	uint64_t hg2cc                        : 1;
3734	uint64_t reserved_29_63               : 35;
3735#endif
3736	} cn52xx;
3737	struct cvmx_gmxx_rxx_int_en_cn52xx    cn52xxp1;
3738	struct cvmx_gmxx_rxx_int_en_cn52xx    cn56xx;
3739	struct cvmx_gmxx_rxx_int_en_cn56xxp1
3740	{
3741#if __BYTE_ORDER == __BIG_ENDIAN
3742	uint64_t reserved_27_63               : 37;
3743	uint64_t undat                        : 1;  /**< Unexpected Data
3744                                                         (XAUI Mode only) */
3745	uint64_t uneop                        : 1;  /**< Unexpected EOP
3746                                                         (XAUI Mode only) */
3747	uint64_t unsop                        : 1;  /**< Unexpected SOP
3748                                                         (XAUI Mode only) */
3749	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
3750                                                         than /T/.  The error propagation control
3751                                                         character /E/ will be included as part of the
3752                                                         frame and does not cause a frame termination.
3753                                                         (XAUI Mode only) */
3754	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
3755                                                         (XAUI Mode only) */
3756	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
3757                                                         (XAUI Mode only) */
3758	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
3759                                                         (XAUI Mode only) */
3760	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3761	uint64_t reserved_16_18               : 3;
3762	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
3763                                                         (SGMII/1000Base-X only) */
3764	uint64_t coldet                       : 1;  /**< Collision Detection
3765                                                         (SGMII/1000Base-X half-duplex only) */
3766	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
3767                                                         (SGMII/1000Base-X only) */
3768	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
3769	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3770	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
3771                                                         (SGMII/1000Base-X only) */
3772	uint64_t reserved_9_9                 : 1;
3773	uint64_t skperr                       : 1;  /**< Skipper error */
3774	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
3775	uint64_t reserved_5_6                 : 2;
3776	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3777	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3778	uint64_t reserved_2_2                 : 1;
3779	uint64_t carext                       : 1;  /**< Carrier extend error
3780                                                         (SGMII/1000Base-X only) */
3781	uint64_t reserved_0_0                 : 1;
3782#else
3783	uint64_t reserved_0_0                 : 1;
3784	uint64_t carext                       : 1;
3785	uint64_t reserved_2_2                 : 1;
3786	uint64_t jabber                       : 1;
3787	uint64_t fcserr                       : 1;
3788	uint64_t reserved_5_6                 : 2;
3789	uint64_t rcverr                       : 1;
3790	uint64_t skperr                       : 1;
3791	uint64_t reserved_9_9                 : 1;
3792	uint64_t ovrerr                       : 1;
3793	uint64_t pcterr                       : 1;
3794	uint64_t rsverr                       : 1;
3795	uint64_t falerr                       : 1;
3796	uint64_t coldet                       : 1;
3797	uint64_t ifgerr                       : 1;
3798	uint64_t reserved_16_18               : 3;
3799	uint64_t pause_drp                    : 1;
3800	uint64_t loc_fault                    : 1;
3801	uint64_t rem_fault                    : 1;
3802	uint64_t bad_seq                      : 1;
3803	uint64_t bad_term                     : 1;
3804	uint64_t unsop                        : 1;
3805	uint64_t uneop                        : 1;
3806	uint64_t undat                        : 1;
3807	uint64_t reserved_27_63               : 37;
3808#endif
3809	} cn56xxp1;
3810	struct cvmx_gmxx_rxx_int_en_cn58xx
3811	{
3812#if __BYTE_ORDER == __BIG_ENDIAN
3813	uint64_t reserved_20_63               : 44;
3814	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3815	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
3816	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
3817	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
3818	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
3819	uint64_t coldet                       : 1;  /**< Collision Detection */
3820	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
3821	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
3822	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3823	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
3824	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
3825	uint64_t skperr                       : 1;  /**< Skipper error */
3826	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
3827	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
3828	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
3829	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3830	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3831	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
3832	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
3833	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
3834#else
3835	uint64_t minerr                       : 1;
3836	uint64_t carext                       : 1;
3837	uint64_t maxerr                       : 1;
3838	uint64_t jabber                       : 1;
3839	uint64_t fcserr                       : 1;
3840	uint64_t alnerr                       : 1;
3841	uint64_t lenerr                       : 1;
3842	uint64_t rcverr                       : 1;
3843	uint64_t skperr                       : 1;
3844	uint64_t niberr                       : 1;
3845	uint64_t ovrerr                       : 1;
3846	uint64_t pcterr                       : 1;
3847	uint64_t rsverr                       : 1;
3848	uint64_t falerr                       : 1;
3849	uint64_t coldet                       : 1;
3850	uint64_t ifgerr                       : 1;
3851	uint64_t phy_link                     : 1;
3852	uint64_t phy_spd                      : 1;
3853	uint64_t phy_dupx                     : 1;
3854	uint64_t pause_drp                    : 1;
3855	uint64_t reserved_20_63               : 44;
3856#endif
3857	} cn58xx;
3858	struct cvmx_gmxx_rxx_int_en_cn58xx    cn58xxp1;
3859	struct cvmx_gmxx_rxx_int_en_cn63xx
3860	{
3861#if __BYTE_ORDER == __BIG_ENDIAN
3862	uint64_t reserved_29_63               : 35;
3863	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
3864	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
3865	uint64_t undat                        : 1;  /**< Unexpected Data
3866                                                         (XAUI Mode only) */
3867	uint64_t uneop                        : 1;  /**< Unexpected EOP
3868                                                         (XAUI Mode only) */
3869	uint64_t unsop                        : 1;  /**< Unexpected SOP
3870                                                         (XAUI Mode only) */
3871	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
3872                                                         than /T/.  The error propagation control
3873                                                         character /E/ will be included as part of the
3874                                                         frame and does not cause a frame termination.
3875                                                         (XAUI Mode only) */
3876	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
3877                                                         (XAUI Mode only) */
3878	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
3879                                                         (XAUI Mode only) */
3880	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
3881                                                         (XAUI Mode only) */
3882	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
3883	uint64_t reserved_16_18               : 3;
3884	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
3885                                                         (SGMII/1000Base-X only) */
3886	uint64_t coldet                       : 1;  /**< Collision Detection
3887                                                         (SGMII/1000Base-X half-duplex only) */
3888	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
3889                                                         (SGMII/1000Base-X only) */
3890	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
3891	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
3892	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
3893                                                         (SGMII/1000Base-X only) */
3894	uint64_t reserved_9_9                 : 1;
3895	uint64_t skperr                       : 1;  /**< Skipper error */
3896	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
3897	uint64_t reserved_5_6                 : 2;
3898	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
3899	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
3900	uint64_t reserved_2_2                 : 1;
3901	uint64_t carext                       : 1;  /**< Carrier extend error
3902                                                         (SGMII/1000Base-X only) */
3903	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
3904#else
3905	uint64_t minerr                       : 1;
3906	uint64_t carext                       : 1;
3907	uint64_t reserved_2_2                 : 1;
3908	uint64_t jabber                       : 1;
3909	uint64_t fcserr                       : 1;
3910	uint64_t reserved_5_6                 : 2;
3911	uint64_t rcverr                       : 1;
3912	uint64_t skperr                       : 1;
3913	uint64_t reserved_9_9                 : 1;
3914	uint64_t ovrerr                       : 1;
3915	uint64_t pcterr                       : 1;
3916	uint64_t rsverr                       : 1;
3917	uint64_t falerr                       : 1;
3918	uint64_t coldet                       : 1;
3919	uint64_t ifgerr                       : 1;
3920	uint64_t reserved_16_18               : 3;
3921	uint64_t pause_drp                    : 1;
3922	uint64_t loc_fault                    : 1;
3923	uint64_t rem_fault                    : 1;
3924	uint64_t bad_seq                      : 1;
3925	uint64_t bad_term                     : 1;
3926	uint64_t unsop                        : 1;
3927	uint64_t uneop                        : 1;
3928	uint64_t undat                        : 1;
3929	uint64_t hg2fld                       : 1;
3930	uint64_t hg2cc                        : 1;
3931	uint64_t reserved_29_63               : 35;
3932#endif
3933	} cn63xx;
3934	struct cvmx_gmxx_rxx_int_en_cn63xx    cn63xxp1;
3935};
3936typedef union cvmx_gmxx_rxx_int_en cvmx_gmxx_rxx_int_en_t;
3937
3938/**
3939 * cvmx_gmx#_rx#_int_reg
3940 *
3941 * GMX_RX_INT_REG = Interrupt Register
3942 *
3943 *
3944 * Notes:
3945 * (1) exceptions will only be raised to the control processor if the
3946 *     corresponding bit in the GMX_RX_INT_EN register is set.
3947 *
3948 * (2) exception conditions 10:0 can also set the rcv/opcode in the received
3949 *     packet's workQ entry.  The GMX_RX_FRM_CHK register provides a bit mask
3950 *     for configuring which conditions set the error.
3951 *
3952 * (3) in half duplex operation, the expectation is that collisions will appear
3953 *     as either MINERR o r CAREXT errors.
3954 *
3955 * (4) JABBER - An RX Jabber error indicates that a packet was received which
3956 *              is longer than the maximum allowed packet as defined by the
3957 *              system.  GMX will truncate the packet at the JABBER count.
3958 *              Failure to do so could lead to system instabilty.
3959 *
3960 * (5) NIBERR - This error is illegal at 1000Mbs speeds
3961 *              (GMX_RX_PRT_CFG[SPEED]==0) and will never assert.
3962 *
3963 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
3964 *              GMX_RX_FRM_MAX.  For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
3965 *              > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
3966 *
3967 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < 64
3968 *
3969 * (8) ALNERR - Indicates that the packet received was not an integer number of
3970 *              bytes.  If FCS checking is enabled, ALNERR will only assert if
3971 *              the FCS is bad.  If FCS checking is disabled, ALNERR will
3972 *              assert in all non-integer frame cases.
3973 *
3974 * (9) Collisions - Collisions can only occur in half-duplex mode.  A collision
3975 *                  is assumed by the receiver when the slottime
3976 *                  (GMX_PRT_CFG[SLOTTIME]) is not satisfied.  In 10/100 mode,
3977 *                  this will result in a frame < SLOTTIME.  In 1000 mode, it
3978 *                  could result either in frame < SLOTTIME or a carrier extend
3979 *                  error with the SLOTTIME.  These conditions are visible by...
3980 *
3981 *                  . transfer ended before slottime - COLDET
3982 *                  . carrier extend error           - CAREXT
3983 *
3984 * (A) LENERR - Length errors occur when the received packet does not match the
3985 *              length field.  LENERR is only checked for packets between 64
3986 *              and 1500 bytes.  For untagged frames, the length must exact
3987 *              match.  For tagged frames the length or length+4 must match.
3988 *
3989 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
3990 *              Does not check the number of PREAMBLE cycles.
3991 *
3992 * (C) OVRERR - Not to be included in the HRM
3993 *
3994 *              OVRERR is an architectural assertion check internal to GMX to
3995 *              make sure no assumption was violated.  In a correctly operating
3996 *              system, this interrupt can never fire.
3997 *
3998 *              GMX has an internal arbiter which selects which of 4 ports to
3999 *              buffer in the main RX FIFO.  If we normally buffer 8 bytes,
4000 *              then each port will typically push a tick every 8 cycles - if
4001 *              the packet interface is going as fast as possible.  If there
4002 *              are four ports, they push every two cycles.  So that's the
4003 *              assumption.  That the inbound module will always be able to
4004 *              consume the tick before another is produced.  If that doesn't
4005 *              happen - that's when OVRERR will assert.
4006 *
4007 * (D) In XAUI mode prt0 is used for interrupt logging.
4008 */
4009union cvmx_gmxx_rxx_int_reg
4010{
4011	uint64_t u64;
4012	struct cvmx_gmxx_rxx_int_reg_s
4013	{
4014#if __BYTE_ORDER == __BIG_ENDIAN
4015	uint64_t reserved_29_63               : 35;
4016	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
4017                                                         Set when either CRC8 error detected or when
4018                                                         a Control Character is found in the message
4019                                                         bytes after the K.SOM
4020                                                         NOTE: HG2CC has higher priority than HG2FLD
4021                                                               i.e. a HiGig2 message that results in HG2CC
4022                                                               getting set, will never set HG2FLD. */
4023	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
4024                                                         1) MSG_TYPE field not 6'b00_0000
4025                                                            i.e. it is not a FLOW CONTROL message, which
4026                                                            is the only defined type for HiGig2
4027                                                         2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4028                                                            which is the only defined type for HiGig2
4029                                                         3) FC_OBJECT field is neither 4'b0000 for
4030                                                            Physical Link nor 4'b0010 for Logical Link.
4031                                                            Those are the only two defined types in HiGig2 */
4032	uint64_t undat                        : 1;  /**< Unexpected Data
4033                                                         (XAUI Mode only) */
4034	uint64_t uneop                        : 1;  /**< Unexpected EOP
4035                                                         (XAUI Mode only) */
4036	uint64_t unsop                        : 1;  /**< Unexpected SOP
4037                                                         (XAUI Mode only) */
4038	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
4039                                                         than /T/.  The error propagation control
4040                                                         character /E/ will be included as part of the
4041                                                         frame and does not cause a frame termination.
4042                                                         (XAUI Mode only) */
4043	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
4044                                                         (XAUI Mode only) */
4045	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
4046                                                         (XAUI Mode only) */
4047	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
4048                                                         (XAUI Mode only) */
4049	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4050	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
4051	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
4052	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
4053	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4054                                                         Does not necessarily indicate a failure
4055                                                         (SGMII/1000Base-X only) */
4056	uint64_t coldet                       : 1;  /**< Collision Detection
4057                                                         (SGMII/1000Base-X half-duplex only) */
4058	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
4059                                                         (SGMII/1000Base-X only) */
4060	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
4061	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
4062                                                         In XAUI mode, the column of data that was bad
4063                                                         will be logged in GMX_RX_XAUI_BAD_COL */
4064	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4065                                                         This interrupt should never assert
4066                                                         (SGMII/1000Base-X only) */
4067	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4068	uint64_t skperr                       : 1;  /**< Skipper error */
4069	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4070	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
4071	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4072	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4073	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4074	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
4075	uint64_t carext                       : 1;  /**< Carrier extend error
4076                                                         (SGMII/1000Base-X only) */
4077	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize
4078                                                         Frame length checks are typically handled in PIP
4079                                                         (PIP_INT_REG[MINERR]), but pause frames are
4080                                                         normally discarded before being inspected by PIP. */
4081#else
4082	uint64_t minerr                       : 1;
4083	uint64_t carext                       : 1;
4084	uint64_t maxerr                       : 1;
4085	uint64_t jabber                       : 1;
4086	uint64_t fcserr                       : 1;
4087	uint64_t alnerr                       : 1;
4088	uint64_t lenerr                       : 1;
4089	uint64_t rcverr                       : 1;
4090	uint64_t skperr                       : 1;
4091	uint64_t niberr                       : 1;
4092	uint64_t ovrerr                       : 1;
4093	uint64_t pcterr                       : 1;
4094	uint64_t rsverr                       : 1;
4095	uint64_t falerr                       : 1;
4096	uint64_t coldet                       : 1;
4097	uint64_t ifgerr                       : 1;
4098	uint64_t phy_link                     : 1;
4099	uint64_t phy_spd                      : 1;
4100	uint64_t phy_dupx                     : 1;
4101	uint64_t pause_drp                    : 1;
4102	uint64_t loc_fault                    : 1;
4103	uint64_t rem_fault                    : 1;
4104	uint64_t bad_seq                      : 1;
4105	uint64_t bad_term                     : 1;
4106	uint64_t unsop                        : 1;
4107	uint64_t uneop                        : 1;
4108	uint64_t undat                        : 1;
4109	uint64_t hg2fld                       : 1;
4110	uint64_t hg2cc                        : 1;
4111	uint64_t reserved_29_63               : 35;
4112#endif
4113	} s;
4114	struct cvmx_gmxx_rxx_int_reg_cn30xx
4115	{
4116#if __BYTE_ORDER == __BIG_ENDIAN
4117	uint64_t reserved_19_63               : 45;
4118	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
4119	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
4120	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
4121	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4122                                                         Does not necessarily indicate a failure */
4123	uint64_t coldet                       : 1;  /**< Collision Detection */
4124	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
4125	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
4126	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
4127	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4128                                                         This interrupt should never assert */
4129	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4130	uint64_t skperr                       : 1;  /**< Skipper error */
4131	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
4132	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
4133	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4134	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4135	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4136	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
4137	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
4138	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
4139#else
4140	uint64_t minerr                       : 1;
4141	uint64_t carext                       : 1;
4142	uint64_t maxerr                       : 1;
4143	uint64_t jabber                       : 1;
4144	uint64_t fcserr                       : 1;
4145	uint64_t alnerr                       : 1;
4146	uint64_t lenerr                       : 1;
4147	uint64_t rcverr                       : 1;
4148	uint64_t skperr                       : 1;
4149	uint64_t niberr                       : 1;
4150	uint64_t ovrerr                       : 1;
4151	uint64_t pcterr                       : 1;
4152	uint64_t rsverr                       : 1;
4153	uint64_t falerr                       : 1;
4154	uint64_t coldet                       : 1;
4155	uint64_t ifgerr                       : 1;
4156	uint64_t phy_link                     : 1;
4157	uint64_t phy_spd                      : 1;
4158	uint64_t phy_dupx                     : 1;
4159	uint64_t reserved_19_63               : 45;
4160#endif
4161	} cn30xx;
4162	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn31xx;
4163	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn38xx;
4164	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn38xxp2;
4165	struct cvmx_gmxx_rxx_int_reg_cn50xx
4166	{
4167#if __BYTE_ORDER == __BIG_ENDIAN
4168	uint64_t reserved_20_63               : 44;
4169	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4170	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
4171	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
4172	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
4173	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4174                                                         Does not necessarily indicate a failure */
4175	uint64_t coldet                       : 1;  /**< Collision Detection */
4176	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
4177	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
4178	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
4179	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4180                                                         This interrupt should never assert */
4181	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4182	uint64_t skperr                       : 1;  /**< Skipper error */
4183	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
4184	uint64_t reserved_6_6                 : 1;
4185	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4186	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4187	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4188	uint64_t reserved_2_2                 : 1;
4189	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
4190	uint64_t reserved_0_0                 : 1;
4191#else
4192	uint64_t reserved_0_0                 : 1;
4193	uint64_t carext                       : 1;
4194	uint64_t reserved_2_2                 : 1;
4195	uint64_t jabber                       : 1;
4196	uint64_t fcserr                       : 1;
4197	uint64_t alnerr                       : 1;
4198	uint64_t reserved_6_6                 : 1;
4199	uint64_t rcverr                       : 1;
4200	uint64_t skperr                       : 1;
4201	uint64_t niberr                       : 1;
4202	uint64_t ovrerr                       : 1;
4203	uint64_t pcterr                       : 1;
4204	uint64_t rsverr                       : 1;
4205	uint64_t falerr                       : 1;
4206	uint64_t coldet                       : 1;
4207	uint64_t ifgerr                       : 1;
4208	uint64_t phy_link                     : 1;
4209	uint64_t phy_spd                      : 1;
4210	uint64_t phy_dupx                     : 1;
4211	uint64_t pause_drp                    : 1;
4212	uint64_t reserved_20_63               : 44;
4213#endif
4214	} cn50xx;
4215	struct cvmx_gmxx_rxx_int_reg_cn52xx
4216	{
4217#if __BYTE_ORDER == __BIG_ENDIAN
4218	uint64_t reserved_29_63               : 35;
4219	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
4220                                                         Set when either CRC8 error detected or when
4221                                                         a Control Character is found in the message
4222                                                         bytes after the K.SOM
4223                                                         NOTE: HG2CC has higher priority than HG2FLD
4224                                                               i.e. a HiGig2 message that results in HG2CC
4225                                                               getting set, will never set HG2FLD. */
4226	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
4227                                                         1) MSG_TYPE field not 6'b00_0000
4228                                                            i.e. it is not a FLOW CONTROL message, which
4229                                                            is the only defined type for HiGig2
4230                                                         2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4231                                                            which is the only defined type for HiGig2
4232                                                         3) FC_OBJECT field is neither 4'b0000 for
4233                                                            Physical Link nor 4'b0010 for Logical Link.
4234                                                            Those are the only two defined types in HiGig2 */
4235	uint64_t undat                        : 1;  /**< Unexpected Data
4236                                                         (XAUI Mode only) */
4237	uint64_t uneop                        : 1;  /**< Unexpected EOP
4238                                                         (XAUI Mode only) */
4239	uint64_t unsop                        : 1;  /**< Unexpected SOP
4240                                                         (XAUI Mode only) */
4241	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
4242                                                         than /T/.  The error propagation control
4243                                                         character /E/ will be included as part of the
4244                                                         frame and does not cause a frame termination.
4245                                                         (XAUI Mode only) */
4246	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
4247                                                         (XAUI Mode only) */
4248	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
4249                                                         (XAUI Mode only) */
4250	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
4251                                                         (XAUI Mode only) */
4252	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4253	uint64_t reserved_16_18               : 3;
4254	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4255                                                         Does not necessarily indicate a failure
4256                                                         (SGMII/1000Base-X only) */
4257	uint64_t coldet                       : 1;  /**< Collision Detection
4258                                                         (SGMII/1000Base-X half-duplex only) */
4259	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
4260                                                         (SGMII/1000Base-X only) */
4261	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
4262	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
4263                                                         In XAUI mode, the column of data that was bad
4264                                                         will be logged in GMX_RX_XAUI_BAD_COL */
4265	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4266                                                         This interrupt should never assert
4267                                                         (SGMII/1000Base-X only) */
4268	uint64_t reserved_9_9                 : 1;
4269	uint64_t skperr                       : 1;  /**< Skipper error */
4270	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4271	uint64_t reserved_5_6                 : 2;
4272	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4273	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4274	uint64_t reserved_2_2                 : 1;
4275	uint64_t carext                       : 1;  /**< Carrier extend error
4276                                                         (SGMII/1000Base-X only) */
4277	uint64_t reserved_0_0                 : 1;
4278#else
4279	uint64_t reserved_0_0                 : 1;
4280	uint64_t carext                       : 1;
4281	uint64_t reserved_2_2                 : 1;
4282	uint64_t jabber                       : 1;
4283	uint64_t fcserr                       : 1;
4284	uint64_t reserved_5_6                 : 2;
4285	uint64_t rcverr                       : 1;
4286	uint64_t skperr                       : 1;
4287	uint64_t reserved_9_9                 : 1;
4288	uint64_t ovrerr                       : 1;
4289	uint64_t pcterr                       : 1;
4290	uint64_t rsverr                       : 1;
4291	uint64_t falerr                       : 1;
4292	uint64_t coldet                       : 1;
4293	uint64_t ifgerr                       : 1;
4294	uint64_t reserved_16_18               : 3;
4295	uint64_t pause_drp                    : 1;
4296	uint64_t loc_fault                    : 1;
4297	uint64_t rem_fault                    : 1;
4298	uint64_t bad_seq                      : 1;
4299	uint64_t bad_term                     : 1;
4300	uint64_t unsop                        : 1;
4301	uint64_t uneop                        : 1;
4302	uint64_t undat                        : 1;
4303	uint64_t hg2fld                       : 1;
4304	uint64_t hg2cc                        : 1;
4305	uint64_t reserved_29_63               : 35;
4306#endif
4307	} cn52xx;
4308	struct cvmx_gmxx_rxx_int_reg_cn52xx   cn52xxp1;
4309	struct cvmx_gmxx_rxx_int_reg_cn52xx   cn56xx;
4310	struct cvmx_gmxx_rxx_int_reg_cn56xxp1
4311	{
4312#if __BYTE_ORDER == __BIG_ENDIAN
4313	uint64_t reserved_27_63               : 37;
4314	uint64_t undat                        : 1;  /**< Unexpected Data
4315                                                         (XAUI Mode only) */
4316	uint64_t uneop                        : 1;  /**< Unexpected EOP
4317                                                         (XAUI Mode only) */
4318	uint64_t unsop                        : 1;  /**< Unexpected SOP
4319                                                         (XAUI Mode only) */
4320	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
4321                                                         than /T/.  The error propagation control
4322                                                         character /E/ will be included as part of the
4323                                                         frame and does not cause a frame termination.
4324                                                         (XAUI Mode only) */
4325	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
4326                                                         (XAUI Mode only) */
4327	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
4328                                                         (XAUI Mode only) */
4329	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
4330                                                         (XAUI Mode only) */
4331	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4332	uint64_t reserved_16_18               : 3;
4333	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4334                                                         Does not necessarily indicate a failure
4335                                                         (SGMII/1000Base-X only) */
4336	uint64_t coldet                       : 1;  /**< Collision Detection
4337                                                         (SGMII/1000Base-X half-duplex only) */
4338	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
4339                                                         (SGMII/1000Base-X only) */
4340	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
4341	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
4342                                                         In XAUI mode, the column of data that was bad
4343                                                         will be logged in GMX_RX_XAUI_BAD_COL */
4344	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4345                                                         This interrupt should never assert
4346                                                         (SGMII/1000Base-X only) */
4347	uint64_t reserved_9_9                 : 1;
4348	uint64_t skperr                       : 1;  /**< Skipper error */
4349	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4350	uint64_t reserved_5_6                 : 2;
4351	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4352	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4353	uint64_t reserved_2_2                 : 1;
4354	uint64_t carext                       : 1;  /**< Carrier extend error
4355                                                         (SGMII/1000Base-X only) */
4356	uint64_t reserved_0_0                 : 1;
4357#else
4358	uint64_t reserved_0_0                 : 1;
4359	uint64_t carext                       : 1;
4360	uint64_t reserved_2_2                 : 1;
4361	uint64_t jabber                       : 1;
4362	uint64_t fcserr                       : 1;
4363	uint64_t reserved_5_6                 : 2;
4364	uint64_t rcverr                       : 1;
4365	uint64_t skperr                       : 1;
4366	uint64_t reserved_9_9                 : 1;
4367	uint64_t ovrerr                       : 1;
4368	uint64_t pcterr                       : 1;
4369	uint64_t rsverr                       : 1;
4370	uint64_t falerr                       : 1;
4371	uint64_t coldet                       : 1;
4372	uint64_t ifgerr                       : 1;
4373	uint64_t reserved_16_18               : 3;
4374	uint64_t pause_drp                    : 1;
4375	uint64_t loc_fault                    : 1;
4376	uint64_t rem_fault                    : 1;
4377	uint64_t bad_seq                      : 1;
4378	uint64_t bad_term                     : 1;
4379	uint64_t unsop                        : 1;
4380	uint64_t uneop                        : 1;
4381	uint64_t undat                        : 1;
4382	uint64_t reserved_27_63               : 37;
4383#endif
4384	} cn56xxp1;
4385	struct cvmx_gmxx_rxx_int_reg_cn58xx
4386	{
4387#if __BYTE_ORDER == __BIG_ENDIAN
4388	uint64_t reserved_20_63               : 44;
4389	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4390	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
4391	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
4392	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
4393	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4394                                                         Does not necessarily indicate a failure */
4395	uint64_t coldet                       : 1;  /**< Collision Detection */
4396	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
4397	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
4398	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
4399	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4400                                                         This interrupt should never assert */
4401	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4402	uint64_t skperr                       : 1;  /**< Skipper error */
4403	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
4404	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
4405	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4406	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4407	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4408	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
4409	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
4410	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
4411#else
4412	uint64_t minerr                       : 1;
4413	uint64_t carext                       : 1;
4414	uint64_t maxerr                       : 1;
4415	uint64_t jabber                       : 1;
4416	uint64_t fcserr                       : 1;
4417	uint64_t alnerr                       : 1;
4418	uint64_t lenerr                       : 1;
4419	uint64_t rcverr                       : 1;
4420	uint64_t skperr                       : 1;
4421	uint64_t niberr                       : 1;
4422	uint64_t ovrerr                       : 1;
4423	uint64_t pcterr                       : 1;
4424	uint64_t rsverr                       : 1;
4425	uint64_t falerr                       : 1;
4426	uint64_t coldet                       : 1;
4427	uint64_t ifgerr                       : 1;
4428	uint64_t phy_link                     : 1;
4429	uint64_t phy_spd                      : 1;
4430	uint64_t phy_dupx                     : 1;
4431	uint64_t pause_drp                    : 1;
4432	uint64_t reserved_20_63               : 44;
4433#endif
4434	} cn58xx;
4435	struct cvmx_gmxx_rxx_int_reg_cn58xx   cn58xxp1;
4436	struct cvmx_gmxx_rxx_int_reg_cn63xx
4437	{
4438#if __BYTE_ORDER == __BIG_ENDIAN
4439	uint64_t reserved_29_63               : 35;
4440	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
4441                                                         Set when either CRC8 error detected or when
4442                                                         a Control Character is found in the message
4443                                                         bytes after the K.SOM
4444                                                         NOTE: HG2CC has higher priority than HG2FLD
4445                                                               i.e. a HiGig2 message that results in HG2CC
4446                                                               getting set, will never set HG2FLD. */
4447	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
4448                                                         1) MSG_TYPE field not 6'b00_0000
4449                                                            i.e. it is not a FLOW CONTROL message, which
4450                                                            is the only defined type for HiGig2
4451                                                         2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4452                                                            which is the only defined type for HiGig2
4453                                                         3) FC_OBJECT field is neither 4'b0000 for
4454                                                            Physical Link nor 4'b0010 for Logical Link.
4455                                                            Those are the only two defined types in HiGig2 */
4456	uint64_t undat                        : 1;  /**< Unexpected Data
4457                                                         (XAUI Mode only) */
4458	uint64_t uneop                        : 1;  /**< Unexpected EOP
4459                                                         (XAUI Mode only) */
4460	uint64_t unsop                        : 1;  /**< Unexpected SOP
4461                                                         (XAUI Mode only) */
4462	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
4463                                                         than /T/.  The error propagation control
4464                                                         character /E/ will be included as part of the
4465                                                         frame and does not cause a frame termination.
4466                                                         (XAUI Mode only) */
4467	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
4468                                                         (XAUI Mode only) */
4469	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
4470                                                         (XAUI Mode only) */
4471	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
4472                                                         (XAUI Mode only) */
4473	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
4474	uint64_t reserved_16_18               : 3;
4475	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
4476                                                         Does not necessarily indicate a failure
4477                                                         (SGMII/1000Base-X only) */
4478	uint64_t coldet                       : 1;  /**< Collision Detection
4479                                                         (SGMII/1000Base-X half-duplex only) */
4480	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
4481                                                         (SGMII/1000Base-X only) */
4482	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
4483	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
4484                                                         In XAUI mode, the column of data that was bad
4485                                                         will be logged in GMX_RX_XAUI_BAD_COL */
4486	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
4487                                                         This interrupt should never assert
4488                                                         (SGMII/1000Base-X only) */
4489	uint64_t reserved_9_9                 : 1;
4490	uint64_t skperr                       : 1;  /**< Skipper error */
4491	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4492	uint64_t reserved_5_6                 : 2;
4493	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4494	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4495	uint64_t reserved_2_2                 : 1;
4496	uint64_t carext                       : 1;  /**< Carrier extend error
4497                                                         (SGMII/1000Base-X only) */
4498	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize
4499                                                         Frame length checks are typically handled in PIP
4500                                                         (PIP_INT_REG[MINERR]), but pause frames are
4501                                                         normally discarded before being inspected by PIP. */
4502#else
4503	uint64_t minerr                       : 1;
4504	uint64_t carext                       : 1;
4505	uint64_t reserved_2_2                 : 1;
4506	uint64_t jabber                       : 1;
4507	uint64_t fcserr                       : 1;
4508	uint64_t reserved_5_6                 : 2;
4509	uint64_t rcverr                       : 1;
4510	uint64_t skperr                       : 1;
4511	uint64_t reserved_9_9                 : 1;
4512	uint64_t ovrerr                       : 1;
4513	uint64_t pcterr                       : 1;
4514	uint64_t rsverr                       : 1;
4515	uint64_t falerr                       : 1;
4516	uint64_t coldet                       : 1;
4517	uint64_t ifgerr                       : 1;
4518	uint64_t reserved_16_18               : 3;
4519	uint64_t pause_drp                    : 1;
4520	uint64_t loc_fault                    : 1;
4521	uint64_t rem_fault                    : 1;
4522	uint64_t bad_seq                      : 1;
4523	uint64_t bad_term                     : 1;
4524	uint64_t unsop                        : 1;
4525	uint64_t uneop                        : 1;
4526	uint64_t undat                        : 1;
4527	uint64_t hg2fld                       : 1;
4528	uint64_t hg2cc                        : 1;
4529	uint64_t reserved_29_63               : 35;
4530#endif
4531	} cn63xx;
4532	struct cvmx_gmxx_rxx_int_reg_cn63xx   cn63xxp1;
4533};
4534typedef union cvmx_gmxx_rxx_int_reg cvmx_gmxx_rxx_int_reg_t;
4535
4536/**
4537 * cvmx_gmx#_rx#_jabber
4538 *
4539 * GMX_RX_JABBER = The max size packet after which GMX will truncate
4540 *
4541 *
4542 * Notes:
4543 * CNT must be 8-byte aligned such that CNT[2:0] == 0
4544 *
4545 * The packet that will be sent to the packet input logic will have an
4546 * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and
4547 * GMX_RX_FRM_CTL[PRE_STRP] is clear.  The max packet that will be sent is
4548 * defined as...
4549 *
4550 *      max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8)
4551 *
4552 * In XAUI mode prt0 is used for checking.
4553 */
4554union cvmx_gmxx_rxx_jabber
4555{
4556	uint64_t u64;
4557	struct cvmx_gmxx_rxx_jabber_s
4558	{
4559#if __BYTE_ORDER == __BIG_ENDIAN
4560	uint64_t reserved_16_63               : 48;
4561	uint64_t cnt                          : 16; /**< Byte count for jabber check
4562                                                         Failing packets set the JABBER interrupt and are
4563                                                         optionally sent with opcode==JABBER
4564                                                         GMX will truncate the packet to CNT bytes */
4565#else
4566	uint64_t cnt                          : 16;
4567	uint64_t reserved_16_63               : 48;
4568#endif
4569	} s;
4570	struct cvmx_gmxx_rxx_jabber_s         cn30xx;
4571	struct cvmx_gmxx_rxx_jabber_s         cn31xx;
4572	struct cvmx_gmxx_rxx_jabber_s         cn38xx;
4573	struct cvmx_gmxx_rxx_jabber_s         cn38xxp2;
4574	struct cvmx_gmxx_rxx_jabber_s         cn50xx;
4575	struct cvmx_gmxx_rxx_jabber_s         cn52xx;
4576	struct cvmx_gmxx_rxx_jabber_s         cn52xxp1;
4577	struct cvmx_gmxx_rxx_jabber_s         cn56xx;
4578	struct cvmx_gmxx_rxx_jabber_s         cn56xxp1;
4579	struct cvmx_gmxx_rxx_jabber_s         cn58xx;
4580	struct cvmx_gmxx_rxx_jabber_s         cn58xxp1;
4581	struct cvmx_gmxx_rxx_jabber_s         cn63xx;
4582	struct cvmx_gmxx_rxx_jabber_s         cn63xxp1;
4583};
4584typedef union cvmx_gmxx_rxx_jabber cvmx_gmxx_rxx_jabber_t;
4585
4586/**
4587 * cvmx_gmx#_rx#_pause_drop_time
4588 *
4589 * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
4590 *
4591 */
4592union cvmx_gmxx_rxx_pause_drop_time
4593{
4594	uint64_t u64;
4595	struct cvmx_gmxx_rxx_pause_drop_time_s
4596	{
4597#if __BYTE_ORDER == __BIG_ENDIAN
4598	uint64_t reserved_16_63               : 48;
4599	uint64_t status                       : 16; /**< Time extracted from the dropped PAUSE packet */
4600#else
4601	uint64_t status                       : 16;
4602	uint64_t reserved_16_63               : 48;
4603#endif
4604	} s;
4605	struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
4606	struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
4607	struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
4608	struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
4609	struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
4610	struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
4611	struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
4612	struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx;
4613	struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1;
4614};
4615typedef union cvmx_gmxx_rxx_pause_drop_time cvmx_gmxx_rxx_pause_drop_time_t;
4616
4617/**
4618 * cvmx_gmx#_rx#_rx_inbnd
4619 *
4620 * GMX_RX_INBND = RGMII InBand Link Status
4621 *
4622 *
4623 * Notes:
4624 * These fields are only valid if the attached PHY is operating in RGMII mode
4625 * and supports the optional in-band status (see section 3.4.1 of the RGMII
4626 * specification, version 1.3 for more information).
4627 */
4628union cvmx_gmxx_rxx_rx_inbnd
4629{
4630	uint64_t u64;
4631	struct cvmx_gmxx_rxx_rx_inbnd_s
4632	{
4633#if __BYTE_ORDER == __BIG_ENDIAN
4634	uint64_t reserved_4_63                : 60;
4635	uint64_t duplex                       : 1;  /**< RGMII Inbound LinkDuplex
4636                                                         0=half-duplex
4637                                                         1=full-duplex */
4638	uint64_t speed                        : 2;  /**< RGMII Inbound LinkSpeed
4639                                                         00=2.5MHz
4640                                                         01=25MHz
4641                                                         10=125MHz
4642                                                         11=Reserved */
4643	uint64_t status                       : 1;  /**< RGMII Inbound LinkStatus
4644                                                         0=down
4645                                                         1=up */
4646#else
4647	uint64_t status                       : 1;
4648	uint64_t speed                        : 2;
4649	uint64_t duplex                       : 1;
4650	uint64_t reserved_4_63                : 60;
4651#endif
4652	} s;
4653	struct cvmx_gmxx_rxx_rx_inbnd_s       cn30xx;
4654	struct cvmx_gmxx_rxx_rx_inbnd_s       cn31xx;
4655	struct cvmx_gmxx_rxx_rx_inbnd_s       cn38xx;
4656	struct cvmx_gmxx_rxx_rx_inbnd_s       cn38xxp2;
4657	struct cvmx_gmxx_rxx_rx_inbnd_s       cn50xx;
4658	struct cvmx_gmxx_rxx_rx_inbnd_s       cn58xx;
4659	struct cvmx_gmxx_rxx_rx_inbnd_s       cn58xxp1;
4660};
4661typedef union cvmx_gmxx_rxx_rx_inbnd cvmx_gmxx_rxx_rx_inbnd_t;
4662
4663/**
4664 * cvmx_gmx#_rx#_stats_ctl
4665 *
4666 * GMX_RX_STATS_CTL = RX Stats Control register
4667 *
4668 */
4669union cvmx_gmxx_rxx_stats_ctl
4670{
4671	uint64_t u64;
4672	struct cvmx_gmxx_rxx_stats_ctl_s
4673	{
4674#if __BYTE_ORDER == __BIG_ENDIAN
4675	uint64_t reserved_1_63                : 63;
4676	uint64_t rd_clr                       : 1;  /**< RX Stats registers will clear on reads */
4677#else
4678	uint64_t rd_clr                       : 1;
4679	uint64_t reserved_1_63                : 63;
4680#endif
4681	} s;
4682	struct cvmx_gmxx_rxx_stats_ctl_s      cn30xx;
4683	struct cvmx_gmxx_rxx_stats_ctl_s      cn31xx;
4684	struct cvmx_gmxx_rxx_stats_ctl_s      cn38xx;
4685	struct cvmx_gmxx_rxx_stats_ctl_s      cn38xxp2;
4686	struct cvmx_gmxx_rxx_stats_ctl_s      cn50xx;
4687	struct cvmx_gmxx_rxx_stats_ctl_s      cn52xx;
4688	struct cvmx_gmxx_rxx_stats_ctl_s      cn52xxp1;
4689	struct cvmx_gmxx_rxx_stats_ctl_s      cn56xx;
4690	struct cvmx_gmxx_rxx_stats_ctl_s      cn56xxp1;
4691	struct cvmx_gmxx_rxx_stats_ctl_s      cn58xx;
4692	struct cvmx_gmxx_rxx_stats_ctl_s      cn58xxp1;
4693	struct cvmx_gmxx_rxx_stats_ctl_s      cn63xx;
4694	struct cvmx_gmxx_rxx_stats_ctl_s      cn63xxp1;
4695};
4696typedef union cvmx_gmxx_rxx_stats_ctl cvmx_gmxx_rxx_stats_ctl_t;
4697
4698/**
4699 * cvmx_gmx#_rx#_stats_octs
4700 *
4701 * Notes:
4702 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4703 * - Counters will wrap
4704 */
4705union cvmx_gmxx_rxx_stats_octs
4706{
4707	uint64_t u64;
4708	struct cvmx_gmxx_rxx_stats_octs_s
4709	{
4710#if __BYTE_ORDER == __BIG_ENDIAN
4711	uint64_t reserved_48_63               : 16;
4712	uint64_t cnt                          : 48; /**< Octet count of received good packets */
4713#else
4714	uint64_t cnt                          : 48;
4715	uint64_t reserved_48_63               : 16;
4716#endif
4717	} s;
4718	struct cvmx_gmxx_rxx_stats_octs_s     cn30xx;
4719	struct cvmx_gmxx_rxx_stats_octs_s     cn31xx;
4720	struct cvmx_gmxx_rxx_stats_octs_s     cn38xx;
4721	struct cvmx_gmxx_rxx_stats_octs_s     cn38xxp2;
4722	struct cvmx_gmxx_rxx_stats_octs_s     cn50xx;
4723	struct cvmx_gmxx_rxx_stats_octs_s     cn52xx;
4724	struct cvmx_gmxx_rxx_stats_octs_s     cn52xxp1;
4725	struct cvmx_gmxx_rxx_stats_octs_s     cn56xx;
4726	struct cvmx_gmxx_rxx_stats_octs_s     cn56xxp1;
4727	struct cvmx_gmxx_rxx_stats_octs_s     cn58xx;
4728	struct cvmx_gmxx_rxx_stats_octs_s     cn58xxp1;
4729	struct cvmx_gmxx_rxx_stats_octs_s     cn63xx;
4730	struct cvmx_gmxx_rxx_stats_octs_s     cn63xxp1;
4731};
4732typedef union cvmx_gmxx_rxx_stats_octs cvmx_gmxx_rxx_stats_octs_t;
4733
4734/**
4735 * cvmx_gmx#_rx#_stats_octs_ctl
4736 *
4737 * Notes:
4738 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4739 * - Counters will wrap
4740 */
4741union cvmx_gmxx_rxx_stats_octs_ctl
4742{
4743	uint64_t u64;
4744	struct cvmx_gmxx_rxx_stats_octs_ctl_s
4745	{
4746#if __BYTE_ORDER == __BIG_ENDIAN
4747	uint64_t reserved_48_63               : 16;
4748	uint64_t cnt                          : 48; /**< Octet count of received pause packets */
4749#else
4750	uint64_t cnt                          : 48;
4751	uint64_t reserved_48_63               : 16;
4752#endif
4753	} s;
4754	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
4755	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
4756	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
4757	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
4758	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
4759	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
4760	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
4761	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
4762	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
4763	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
4764	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
4765	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx;
4766	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1;
4767};
4768typedef union cvmx_gmxx_rxx_stats_octs_ctl cvmx_gmxx_rxx_stats_octs_ctl_t;
4769
4770/**
4771 * cvmx_gmx#_rx#_stats_octs_dmac
4772 *
4773 * Notes:
4774 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4775 * - Counters will wrap
4776 */
4777union cvmx_gmxx_rxx_stats_octs_dmac
4778{
4779	uint64_t u64;
4780	struct cvmx_gmxx_rxx_stats_octs_dmac_s
4781	{
4782#if __BYTE_ORDER == __BIG_ENDIAN
4783	uint64_t reserved_48_63               : 16;
4784	uint64_t cnt                          : 48; /**< Octet count of filtered dmac packets */
4785#else
4786	uint64_t cnt                          : 48;
4787	uint64_t reserved_48_63               : 16;
4788#endif
4789	} s;
4790	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
4791	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
4792	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
4793	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
4794	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
4795	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
4796	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
4797	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
4798	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
4799	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
4800	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
4801	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx;
4802	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1;
4803};
4804typedef union cvmx_gmxx_rxx_stats_octs_dmac cvmx_gmxx_rxx_stats_octs_dmac_t;
4805
4806/**
4807 * cvmx_gmx#_rx#_stats_octs_drp
4808 *
4809 * Notes:
4810 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4811 * - Counters will wrap
4812 */
4813union cvmx_gmxx_rxx_stats_octs_drp
4814{
4815	uint64_t u64;
4816	struct cvmx_gmxx_rxx_stats_octs_drp_s
4817	{
4818#if __BYTE_ORDER == __BIG_ENDIAN
4819	uint64_t reserved_48_63               : 16;
4820	uint64_t cnt                          : 48; /**< Octet count of dropped packets */
4821#else
4822	uint64_t cnt                          : 48;
4823	uint64_t reserved_48_63               : 16;
4824#endif
4825	} s;
4826	struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
4827	struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
4828	struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
4829	struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
4830	struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
4831	struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
4832	struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
4833	struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
4834	struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
4835	struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
4836	struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
4837	struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx;
4838	struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1;
4839};
4840typedef union cvmx_gmxx_rxx_stats_octs_drp cvmx_gmxx_rxx_stats_octs_drp_t;
4841
4842/**
4843 * cvmx_gmx#_rx#_stats_pkts
4844 *
4845 * GMX_RX_STATS_PKTS
4846 *
4847 * Count of good received packets - packets that are not recognized as PAUSE
4848 * packets, dropped due the DMAC filter, dropped due FIFO full status, or
4849 * have any other OPCODE (FCS, Length, etc).
4850 *
4851 * Notes:
4852 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4853 * - Counters will wrap
4854 */
4855union cvmx_gmxx_rxx_stats_pkts
4856{
4857	uint64_t u64;
4858	struct cvmx_gmxx_rxx_stats_pkts_s
4859	{
4860#if __BYTE_ORDER == __BIG_ENDIAN
4861	uint64_t reserved_32_63               : 32;
4862	uint64_t cnt                          : 32; /**< Count of received good packets */
4863#else
4864	uint64_t cnt                          : 32;
4865	uint64_t reserved_32_63               : 32;
4866#endif
4867	} s;
4868	struct cvmx_gmxx_rxx_stats_pkts_s     cn30xx;
4869	struct cvmx_gmxx_rxx_stats_pkts_s     cn31xx;
4870	struct cvmx_gmxx_rxx_stats_pkts_s     cn38xx;
4871	struct cvmx_gmxx_rxx_stats_pkts_s     cn38xxp2;
4872	struct cvmx_gmxx_rxx_stats_pkts_s     cn50xx;
4873	struct cvmx_gmxx_rxx_stats_pkts_s     cn52xx;
4874	struct cvmx_gmxx_rxx_stats_pkts_s     cn52xxp1;
4875	struct cvmx_gmxx_rxx_stats_pkts_s     cn56xx;
4876	struct cvmx_gmxx_rxx_stats_pkts_s     cn56xxp1;
4877	struct cvmx_gmxx_rxx_stats_pkts_s     cn58xx;
4878	struct cvmx_gmxx_rxx_stats_pkts_s     cn58xxp1;
4879	struct cvmx_gmxx_rxx_stats_pkts_s     cn63xx;
4880	struct cvmx_gmxx_rxx_stats_pkts_s     cn63xxp1;
4881};
4882typedef union cvmx_gmxx_rxx_stats_pkts cvmx_gmxx_rxx_stats_pkts_t;
4883
4884/**
4885 * cvmx_gmx#_rx#_stats_pkts_bad
4886 *
4887 * GMX_RX_STATS_PKTS_BAD
4888 *
4889 * Count of all packets received with some error that were not dropped
4890 * either due to the dmac filter or lack of room in the receive FIFO.
4891 *
4892 * Notes:
4893 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4894 * - Counters will wrap
4895 */
4896union cvmx_gmxx_rxx_stats_pkts_bad
4897{
4898	uint64_t u64;
4899	struct cvmx_gmxx_rxx_stats_pkts_bad_s
4900	{
4901#if __BYTE_ORDER == __BIG_ENDIAN
4902	uint64_t reserved_32_63               : 32;
4903	uint64_t cnt                          : 32; /**< Count of bad packets */
4904#else
4905	uint64_t cnt                          : 32;
4906	uint64_t reserved_32_63               : 32;
4907#endif
4908	} s;
4909	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
4910	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
4911	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
4912	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
4913	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
4914	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
4915	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
4916	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
4917	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
4918	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
4919	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
4920	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx;
4921	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1;
4922};
4923typedef union cvmx_gmxx_rxx_stats_pkts_bad cvmx_gmxx_rxx_stats_pkts_bad_t;
4924
4925/**
4926 * cvmx_gmx#_rx#_stats_pkts_ctl
4927 *
4928 * GMX_RX_STATS_PKTS_CTL
4929 *
4930 * Count of all packets received that were recognized as Flow Control or
4931 * PAUSE packets.  PAUSE packets with any kind of error are counted in
4932 * GMX_RX_STATS_PKTS_BAD.  Pause packets can be optionally dropped or
4933 * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit.  This count
4934 * increments regardless of whether the packet is dropped.  Pause packets
4935 * will never be counted in GMX_RX_STATS_PKTS.  Packets dropped due the dmac
4936 * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here.
4937 *
4938 * Notes:
4939 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4940 * - Counters will wrap
4941 */
4942union cvmx_gmxx_rxx_stats_pkts_ctl
4943{
4944	uint64_t u64;
4945	struct cvmx_gmxx_rxx_stats_pkts_ctl_s
4946	{
4947#if __BYTE_ORDER == __BIG_ENDIAN
4948	uint64_t reserved_32_63               : 32;
4949	uint64_t cnt                          : 32; /**< Count of received pause packets */
4950#else
4951	uint64_t cnt                          : 32;
4952	uint64_t reserved_32_63               : 32;
4953#endif
4954	} s;
4955	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
4956	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
4957	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
4958	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
4959	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
4960	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
4961	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
4962	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
4963	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
4964	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
4965	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
4966	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx;
4967	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1;
4968};
4969typedef union cvmx_gmxx_rxx_stats_pkts_ctl cvmx_gmxx_rxx_stats_pkts_ctl_t;
4970
4971/**
4972 * cvmx_gmx#_rx#_stats_pkts_dmac
4973 *
4974 * GMX_RX_STATS_PKTS_DMAC
4975 *
4976 * Count of all packets received that were dropped by the dmac filter.
4977 * Packets that match the DMAC will be dropped and counted here regardless
4978 * of if they were bad packets.  These packets will never be counted in
4979 * GMX_RX_STATS_PKTS.
4980 *
4981 * Some packets that were not able to satisify the DECISION_CNT may not
4982 * actually be dropped by Octeon, but they will be counted here as if they
4983 * were dropped.
4984 *
4985 * Notes:
4986 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4987 * - Counters will wrap
4988 */
4989union cvmx_gmxx_rxx_stats_pkts_dmac
4990{
4991	uint64_t u64;
4992	struct cvmx_gmxx_rxx_stats_pkts_dmac_s
4993	{
4994#if __BYTE_ORDER == __BIG_ENDIAN
4995	uint64_t reserved_32_63               : 32;
4996	uint64_t cnt                          : 32; /**< Count of filtered dmac packets */
4997#else
4998	uint64_t cnt                          : 32;
4999	uint64_t reserved_32_63               : 32;
5000#endif
5001	} s;
5002	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
5003	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
5004	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
5005	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
5006	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
5007	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
5008	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
5009	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
5010	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
5011	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
5012	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
5013	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx;
5014	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1;
5015};
5016typedef union cvmx_gmxx_rxx_stats_pkts_dmac cvmx_gmxx_rxx_stats_pkts_dmac_t;
5017
5018/**
5019 * cvmx_gmx#_rx#_stats_pkts_drp
5020 *
5021 * GMX_RX_STATS_PKTS_DRP
5022 *
5023 * Count of all packets received that were dropped due to a full receive
5024 * FIFO.  This counts good and bad packets received - all packets dropped by
5025 * the FIFO.  It does not count packets dropped by the dmac or pause packet
5026 * filters.
5027 *
5028 * Notes:
5029 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
5030 * - Counters will wrap
5031 */
5032union cvmx_gmxx_rxx_stats_pkts_drp
5033{
5034	uint64_t u64;
5035	struct cvmx_gmxx_rxx_stats_pkts_drp_s
5036	{
5037#if __BYTE_ORDER == __BIG_ENDIAN
5038	uint64_t reserved_32_63               : 32;
5039	uint64_t cnt                          : 32; /**< Count of dropped packets */
5040#else
5041	uint64_t cnt                          : 32;
5042	uint64_t reserved_32_63               : 32;
5043#endif
5044	} s;
5045	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
5046	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
5047	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
5048	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
5049	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
5050	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
5051	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
5052	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
5053	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
5054	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
5055	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
5056	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx;
5057	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1;
5058};
5059typedef union cvmx_gmxx_rxx_stats_pkts_drp cvmx_gmxx_rxx_stats_pkts_drp_t;
5060
5061/**
5062 * cvmx_gmx#_rx#_udd_skp
5063 *
5064 * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
5065 *
5066 *
5067 * Notes:
5068 * (1) The skip bytes are part of the packet and will be sent down the NCB
5069 *     packet interface and will be handled by PKI.
5070 *
5071 * (2) The system can determine if the UDD bytes are included in the FCS check
5072 *     by using the FCSSEL field - if the FCS check is enabled.
5073 *
5074 * (3) Assume that the preamble/sfd is always at the start of the frame - even
5075 *     before UDD bytes.  In most cases, there will be no preamble in these
5076 *     cases since it will be packet interface in direct communication to
5077 *     another packet interface (MAC to MAC) without a PHY involved.
5078 *
5079 * (4) We can still do address filtering and control packet filtering is the
5080 *     user desires.
5081 *
5082 * (5) UDD_SKP must be 0 in half-duplex operation unless
5083 *     GMX_RX_FRM_CTL[PRE_CHK] is clear.  If GMX_RX_FRM_CTL[PRE_CHK] is clear,
5084 *     then UDD_SKP will normally be 8.
5085 *
5086 * (6) In all cases, the UDD bytes will be sent down the packet interface as
5087 *     part of the packet.  The UDD bytes are never stripped from the actual
5088 *     packet.
5089 *
5090 * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero
5091 */
5092union cvmx_gmxx_rxx_udd_skp
5093{
5094	uint64_t u64;
5095	struct cvmx_gmxx_rxx_udd_skp_s
5096	{
5097#if __BYTE_ORDER == __BIG_ENDIAN
5098	uint64_t reserved_9_63                : 55;
5099	uint64_t fcssel                       : 1;  /**< Include the skip bytes in the FCS calculation
5100                                                         0 = all skip bytes are included in FCS
5101                                                         1 = the skip bytes are not included in FCS
5102                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, FCSSEL must
5103                                                         be zero. */
5104	uint64_t reserved_7_7                 : 1;
5105	uint64_t len                          : 7;  /**< Amount of User-defined data before the start of
5106                                                         the L2 data.  Zero means L2 comes first.
5107                                                         Max value is 64.
5108                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, LEN must be
5109                                                         set to 12 or 16 (depending on HiGig header size)
5110                                                         to account for the HiGig header. LEN=12 selects
5111                                                         HiGig/HiGig+, and LEN=16 selects HiGig2. */
5112#else
5113	uint64_t len                          : 7;
5114	uint64_t reserved_7_7                 : 1;
5115	uint64_t fcssel                       : 1;
5116	uint64_t reserved_9_63                : 55;
5117#endif
5118	} s;
5119	struct cvmx_gmxx_rxx_udd_skp_s        cn30xx;
5120	struct cvmx_gmxx_rxx_udd_skp_s        cn31xx;
5121	struct cvmx_gmxx_rxx_udd_skp_s        cn38xx;
5122	struct cvmx_gmxx_rxx_udd_skp_s        cn38xxp2;
5123	struct cvmx_gmxx_rxx_udd_skp_s        cn50xx;
5124	struct cvmx_gmxx_rxx_udd_skp_s        cn52xx;
5125	struct cvmx_gmxx_rxx_udd_skp_s        cn52xxp1;
5126	struct cvmx_gmxx_rxx_udd_skp_s        cn56xx;
5127	struct cvmx_gmxx_rxx_udd_skp_s        cn56xxp1;
5128	struct cvmx_gmxx_rxx_udd_skp_s        cn58xx;
5129	struct cvmx_gmxx_rxx_udd_skp_s        cn58xxp1;
5130	struct cvmx_gmxx_rxx_udd_skp_s        cn63xx;
5131	struct cvmx_gmxx_rxx_udd_skp_s        cn63xxp1;
5132};
5133typedef union cvmx_gmxx_rxx_udd_skp cvmx_gmxx_rxx_udd_skp_t;
5134
5135/**
5136 * cvmx_gmx#_rx_bp_drop#
5137 *
5138 * GMX_RX_BP_DROP = FIFO mark for packet drop
5139 *
5140 *
5141 * Notes:
5142 * The actual watermark is dynamic with respect to the GMX_RX_PRTS
5143 * register.  The GMX_RX_PRTS controls the depth of the port's
5144 * FIFO so as ports are added or removed, the drop point may change.
5145 *
5146 * In XAUI mode prt0 is used for checking.
5147 */
5148union cvmx_gmxx_rx_bp_dropx
5149{
5150	uint64_t u64;
5151	struct cvmx_gmxx_rx_bp_dropx_s
5152	{
5153#if __BYTE_ORDER == __BIG_ENDIAN
5154	uint64_t reserved_6_63                : 58;
5155	uint64_t mark                         : 6;  /**< Number of 8B ticks to reserve in the RX FIFO.
5156                                                         When the FIFO exceeds this count, packets will
5157                                                         be dropped and not buffered.
5158                                                         MARK should typically be programmed to ports+1.
5159                                                         Failure to program correctly can lead to system
5160                                                         instability. */
5161#else
5162	uint64_t mark                         : 6;
5163	uint64_t reserved_6_63                : 58;
5164#endif
5165	} s;
5166	struct cvmx_gmxx_rx_bp_dropx_s        cn30xx;
5167	struct cvmx_gmxx_rx_bp_dropx_s        cn31xx;
5168	struct cvmx_gmxx_rx_bp_dropx_s        cn38xx;
5169	struct cvmx_gmxx_rx_bp_dropx_s        cn38xxp2;
5170	struct cvmx_gmxx_rx_bp_dropx_s        cn50xx;
5171	struct cvmx_gmxx_rx_bp_dropx_s        cn52xx;
5172	struct cvmx_gmxx_rx_bp_dropx_s        cn52xxp1;
5173	struct cvmx_gmxx_rx_bp_dropx_s        cn56xx;
5174	struct cvmx_gmxx_rx_bp_dropx_s        cn56xxp1;
5175	struct cvmx_gmxx_rx_bp_dropx_s        cn58xx;
5176	struct cvmx_gmxx_rx_bp_dropx_s        cn58xxp1;
5177	struct cvmx_gmxx_rx_bp_dropx_s        cn63xx;
5178	struct cvmx_gmxx_rx_bp_dropx_s        cn63xxp1;
5179};
5180typedef union cvmx_gmxx_rx_bp_dropx cvmx_gmxx_rx_bp_dropx_t;
5181
5182/**
5183 * cvmx_gmx#_rx_bp_off#
5184 *
5185 * GMX_RX_BP_OFF = Lowater mark for packet drop
5186 *
5187 *
5188 * Notes:
5189 * In XAUI mode, prt0 is used for checking.
5190 *
5191 */
5192union cvmx_gmxx_rx_bp_offx
5193{
5194	uint64_t u64;
5195	struct cvmx_gmxx_rx_bp_offx_s
5196	{
5197#if __BYTE_ORDER == __BIG_ENDIAN
5198	uint64_t reserved_6_63                : 58;
5199	uint64_t mark                         : 6;  /**< Water mark (8B ticks) to deassert backpressure */
5200#else
5201	uint64_t mark                         : 6;
5202	uint64_t reserved_6_63                : 58;
5203#endif
5204	} s;
5205	struct cvmx_gmxx_rx_bp_offx_s         cn30xx;
5206	struct cvmx_gmxx_rx_bp_offx_s         cn31xx;
5207	struct cvmx_gmxx_rx_bp_offx_s         cn38xx;
5208	struct cvmx_gmxx_rx_bp_offx_s         cn38xxp2;
5209	struct cvmx_gmxx_rx_bp_offx_s         cn50xx;
5210	struct cvmx_gmxx_rx_bp_offx_s         cn52xx;
5211	struct cvmx_gmxx_rx_bp_offx_s         cn52xxp1;
5212	struct cvmx_gmxx_rx_bp_offx_s         cn56xx;
5213	struct cvmx_gmxx_rx_bp_offx_s         cn56xxp1;
5214	struct cvmx_gmxx_rx_bp_offx_s         cn58xx;
5215	struct cvmx_gmxx_rx_bp_offx_s         cn58xxp1;
5216	struct cvmx_gmxx_rx_bp_offx_s         cn63xx;
5217	struct cvmx_gmxx_rx_bp_offx_s         cn63xxp1;
5218};
5219typedef union cvmx_gmxx_rx_bp_offx cvmx_gmxx_rx_bp_offx_t;
5220
5221/**
5222 * cvmx_gmx#_rx_bp_on#
5223 *
5224 * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
5225 *
5226 *
5227 * Notes:
5228 * In XAUI mode, prt0 is used for checking.
5229 *
5230 */
5231union cvmx_gmxx_rx_bp_onx
5232{
5233	uint64_t u64;
5234	struct cvmx_gmxx_rx_bp_onx_s
5235	{
5236#if __BYTE_ORDER == __BIG_ENDIAN
5237	uint64_t reserved_9_63                : 55;
5238	uint64_t mark                         : 9;  /**< Hiwater mark (8B ticks) for backpressure.
5239                                                         Each register is for an individual port.  In XAUI
5240                                                         mode, prt0 is used for the unified RX FIFO
5241                                                         GMX_RX_BP_ON must satisfy
5242                                                         BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
5243                                                         A value of zero will immediately assert back
5244                                                         pressure. */
5245#else
5246	uint64_t mark                         : 9;
5247	uint64_t reserved_9_63                : 55;
5248#endif
5249	} s;
5250	struct cvmx_gmxx_rx_bp_onx_s          cn30xx;
5251	struct cvmx_gmxx_rx_bp_onx_s          cn31xx;
5252	struct cvmx_gmxx_rx_bp_onx_s          cn38xx;
5253	struct cvmx_gmxx_rx_bp_onx_s          cn38xxp2;
5254	struct cvmx_gmxx_rx_bp_onx_s          cn50xx;
5255	struct cvmx_gmxx_rx_bp_onx_s          cn52xx;
5256	struct cvmx_gmxx_rx_bp_onx_s          cn52xxp1;
5257	struct cvmx_gmxx_rx_bp_onx_s          cn56xx;
5258	struct cvmx_gmxx_rx_bp_onx_s          cn56xxp1;
5259	struct cvmx_gmxx_rx_bp_onx_s          cn58xx;
5260	struct cvmx_gmxx_rx_bp_onx_s          cn58xxp1;
5261	struct cvmx_gmxx_rx_bp_onx_s          cn63xx;
5262	struct cvmx_gmxx_rx_bp_onx_s          cn63xxp1;
5263};
5264typedef union cvmx_gmxx_rx_bp_onx cvmx_gmxx_rx_bp_onx_t;
5265
5266/**
5267 * cvmx_gmx#_rx_hg2_status
5268 *
5269 * ** HG2 message CSRs
5270 *
5271 */
5272union cvmx_gmxx_rx_hg2_status
5273{
5274	uint64_t u64;
5275	struct cvmx_gmxx_rx_hg2_status_s
5276	{
5277#if __BYTE_ORDER == __BIG_ENDIAN
5278	uint64_t reserved_48_63               : 16;
5279	uint64_t phtim2go                     : 16; /**< Physical time to go for removal of physical link
5280                                                         pause. Initial value from received HiGig2 msg pkt
5281                                                         Non-zero only when physical back pressure active */
5282	uint64_t xof                          : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt
5283                                                         or from CBFC packets.
5284                                                         Non-zero only when logical back pressure is active
5285                                                         All bits will be 0 when LGTIM2GO=0 */
5286	uint64_t lgtim2go                     : 16; /**< Logical packet flow back pressure time remaining
5287                                                         Initial value set from xof time field of HiGig2
5288                                                         message packet received or a function of the
5289                                                         enabled and current timers for CBFC packets.
5290                                                         Non-zero only when logical back pressure is active */
5291#else
5292	uint64_t lgtim2go                     : 16;
5293	uint64_t xof                          : 16;
5294	uint64_t phtim2go                     : 16;
5295	uint64_t reserved_48_63               : 16;
5296#endif
5297	} s;
5298	struct cvmx_gmxx_rx_hg2_status_s      cn52xx;
5299	struct cvmx_gmxx_rx_hg2_status_s      cn52xxp1;
5300	struct cvmx_gmxx_rx_hg2_status_s      cn56xx;
5301	struct cvmx_gmxx_rx_hg2_status_s      cn63xx;
5302	struct cvmx_gmxx_rx_hg2_status_s      cn63xxp1;
5303};
5304typedef union cvmx_gmxx_rx_hg2_status cvmx_gmxx_rx_hg2_status_t;
5305
5306/**
5307 * cvmx_gmx#_rx_pass_en
5308 *
5309 * GMX_RX_PASS_EN = Packet pass through mode enable
5310 *
5311 * When both Octane ports are running in Spi4 mode, packets can be directly
5312 * passed from one SPX interface to the other without being processed by the
5313 * core or PP's.  The register has one bit for each port to enable the pass
5314 * through feature.
5315 *
5316 * Notes:
5317 * (1) Can only be used in dual Spi4 configs
5318 *
5319 * (2) The mapped pass through output port cannot be the destination port for
5320 *     any Octane core traffic.
5321 */
5322union cvmx_gmxx_rx_pass_en
5323{
5324	uint64_t u64;
5325	struct cvmx_gmxx_rx_pass_en_s
5326	{
5327#if __BYTE_ORDER == __BIG_ENDIAN
5328	uint64_t reserved_16_63               : 48;
5329	uint64_t en                           : 16; /**< Which ports to configure in pass through mode */
5330#else
5331	uint64_t en                           : 16;
5332	uint64_t reserved_16_63               : 48;
5333#endif
5334	} s;
5335	struct cvmx_gmxx_rx_pass_en_s         cn38xx;
5336	struct cvmx_gmxx_rx_pass_en_s         cn38xxp2;
5337	struct cvmx_gmxx_rx_pass_en_s         cn58xx;
5338	struct cvmx_gmxx_rx_pass_en_s         cn58xxp1;
5339};
5340typedef union cvmx_gmxx_rx_pass_en cvmx_gmxx_rx_pass_en_t;
5341
5342/**
5343 * cvmx_gmx#_rx_pass_map#
5344 *
5345 * GMX_RX_PASS_MAP = Packet pass through port map
5346 *
5347 */
5348union cvmx_gmxx_rx_pass_mapx
5349{
5350	uint64_t u64;
5351	struct cvmx_gmxx_rx_pass_mapx_s
5352	{
5353#if __BYTE_ORDER == __BIG_ENDIAN
5354	uint64_t reserved_4_63                : 60;
5355	uint64_t dprt                         : 4;  /**< Destination port to map Spi pass through traffic */
5356#else
5357	uint64_t dprt                         : 4;
5358	uint64_t reserved_4_63                : 60;
5359#endif
5360	} s;
5361	struct cvmx_gmxx_rx_pass_mapx_s       cn38xx;
5362	struct cvmx_gmxx_rx_pass_mapx_s       cn38xxp2;
5363	struct cvmx_gmxx_rx_pass_mapx_s       cn58xx;
5364	struct cvmx_gmxx_rx_pass_mapx_s       cn58xxp1;
5365};
5366typedef union cvmx_gmxx_rx_pass_mapx cvmx_gmxx_rx_pass_mapx_t;
5367
5368/**
5369 * cvmx_gmx#_rx_prt_info
5370 *
5371 * GMX_RX_PRT_INFO = Report the RX status for port
5372 *
5373 *
5374 * Notes:
5375 * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used.
5376 *
5377 */
5378union cvmx_gmxx_rx_prt_info
5379{
5380	uint64_t u64;
5381	struct cvmx_gmxx_rx_prt_info_s
5382	{
5383#if __BYTE_ORDER == __BIG_ENDIAN
5384	uint64_t reserved_32_63               : 32;
5385	uint64_t drop                         : 16; /**< Per port indication that data was dropped */
5386	uint64_t commit                       : 16; /**< Per port indication that SOP was accepted */
5387#else
5388	uint64_t commit                       : 16;
5389	uint64_t drop                         : 16;
5390	uint64_t reserved_32_63               : 32;
5391#endif
5392	} s;
5393	struct cvmx_gmxx_rx_prt_info_cn30xx
5394	{
5395#if __BYTE_ORDER == __BIG_ENDIAN
5396	uint64_t reserved_19_63               : 45;
5397	uint64_t drop                         : 3;  /**< Per port indication that data was dropped */
5398	uint64_t reserved_3_15                : 13;
5399	uint64_t commit                       : 3;  /**< Per port indication that SOP was accepted */
5400#else
5401	uint64_t commit                       : 3;
5402	uint64_t reserved_3_15                : 13;
5403	uint64_t drop                         : 3;
5404	uint64_t reserved_19_63               : 45;
5405#endif
5406	} cn30xx;
5407	struct cvmx_gmxx_rx_prt_info_cn30xx   cn31xx;
5408	struct cvmx_gmxx_rx_prt_info_s        cn38xx;
5409	struct cvmx_gmxx_rx_prt_info_cn30xx   cn50xx;
5410	struct cvmx_gmxx_rx_prt_info_cn52xx
5411	{
5412#if __BYTE_ORDER == __BIG_ENDIAN
5413	uint64_t reserved_20_63               : 44;
5414	uint64_t drop                         : 4;  /**< Per port indication that data was dropped */
5415	uint64_t reserved_4_15                : 12;
5416	uint64_t commit                       : 4;  /**< Per port indication that SOP was accepted */
5417#else
5418	uint64_t commit                       : 4;
5419	uint64_t reserved_4_15                : 12;
5420	uint64_t drop                         : 4;
5421	uint64_t reserved_20_63               : 44;
5422#endif
5423	} cn52xx;
5424	struct cvmx_gmxx_rx_prt_info_cn52xx   cn52xxp1;
5425	struct cvmx_gmxx_rx_prt_info_cn52xx   cn56xx;
5426	struct cvmx_gmxx_rx_prt_info_cn52xx   cn56xxp1;
5427	struct cvmx_gmxx_rx_prt_info_s        cn58xx;
5428	struct cvmx_gmxx_rx_prt_info_s        cn58xxp1;
5429	struct cvmx_gmxx_rx_prt_info_cn52xx   cn63xx;
5430	struct cvmx_gmxx_rx_prt_info_cn52xx   cn63xxp1;
5431};
5432typedef union cvmx_gmxx_rx_prt_info cvmx_gmxx_rx_prt_info_t;
5433
5434/**
5435 * cvmx_gmx#_rx_prts
5436 *
5437 * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into
5438 *
5439 *
5440 * Notes:
5441 * GMX_RX_PRTS[PRTS] must be set to '1' in XAUI mode.
5442 *
5443 */
5444union cvmx_gmxx_rx_prts
5445{
5446	uint64_t u64;
5447	struct cvmx_gmxx_rx_prts_s
5448	{
5449#if __BYTE_ORDER == __BIG_ENDIAN
5450	uint64_t reserved_3_63                : 61;
5451	uint64_t prts                         : 3;  /**< In SGMII/1000Base-X mode, the RX buffer can be
5452                                                         carved into several logical buffers depending on
5453                                                         the number or implemented ports.
5454                                                         0 or 1 port  = 512ticks / 4096bytes
5455                                                         2 ports      = 256ticks / 2048bytes
5456                                                         3 or 4 ports = 128ticks / 1024bytes */
5457#else
5458	uint64_t prts                         : 3;
5459	uint64_t reserved_3_63                : 61;
5460#endif
5461	} s;
5462	struct cvmx_gmxx_rx_prts_s            cn30xx;
5463	struct cvmx_gmxx_rx_prts_s            cn31xx;
5464	struct cvmx_gmxx_rx_prts_s            cn38xx;
5465	struct cvmx_gmxx_rx_prts_s            cn38xxp2;
5466	struct cvmx_gmxx_rx_prts_s            cn50xx;
5467	struct cvmx_gmxx_rx_prts_s            cn52xx;
5468	struct cvmx_gmxx_rx_prts_s            cn52xxp1;
5469	struct cvmx_gmxx_rx_prts_s            cn56xx;
5470	struct cvmx_gmxx_rx_prts_s            cn56xxp1;
5471	struct cvmx_gmxx_rx_prts_s            cn58xx;
5472	struct cvmx_gmxx_rx_prts_s            cn58xxp1;
5473	struct cvmx_gmxx_rx_prts_s            cn63xx;
5474	struct cvmx_gmxx_rx_prts_s            cn63xxp1;
5475};
5476typedef union cvmx_gmxx_rx_prts cvmx_gmxx_rx_prts_t;
5477
5478/**
5479 * cvmx_gmx#_rx_tx_status
5480 *
5481 * GMX_RX_TX_STATUS = GMX RX/TX Status
5482 *
5483 */
5484union cvmx_gmxx_rx_tx_status
5485{
5486	uint64_t u64;
5487	struct cvmx_gmxx_rx_tx_status_s
5488	{
5489#if __BYTE_ORDER == __BIG_ENDIAN
5490	uint64_t reserved_7_63                : 57;
5491	uint64_t tx                           : 3;  /**< Transmit data since last read */
5492	uint64_t reserved_3_3                 : 1;
5493	uint64_t rx                           : 3;  /**< Receive data since last read */
5494#else
5495	uint64_t rx                           : 3;
5496	uint64_t reserved_3_3                 : 1;
5497	uint64_t tx                           : 3;
5498	uint64_t reserved_7_63                : 57;
5499#endif
5500	} s;
5501	struct cvmx_gmxx_rx_tx_status_s       cn30xx;
5502	struct cvmx_gmxx_rx_tx_status_s       cn31xx;
5503	struct cvmx_gmxx_rx_tx_status_s       cn50xx;
5504};
5505typedef union cvmx_gmxx_rx_tx_status cvmx_gmxx_rx_tx_status_t;
5506
5507/**
5508 * cvmx_gmx#_rx_xaui_bad_col
5509 */
5510union cvmx_gmxx_rx_xaui_bad_col
5511{
5512	uint64_t u64;
5513	struct cvmx_gmxx_rx_xaui_bad_col_s
5514	{
5515#if __BYTE_ORDER == __BIG_ENDIAN
5516	uint64_t reserved_40_63               : 24;
5517	uint64_t val                          : 1;  /**< Set when GMX_RX_INT_REG[PCTERR] is set.
5518                                                         (XAUI mode only) */
5519	uint64_t state                        : 3;  /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will
5520                                                         conatin the receive state at the time of the
5521                                                         error.
5522                                                         (XAUI mode only) */
5523	uint64_t lane_rxc                     : 4;  /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will
5524                                                         conatin the XAUI column at the time of the error.
5525                                                         (XAUI mode only) */
5526	uint64_t lane_rxd                     : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will
5527                                                         conatin the XAUI column at the time of the error.
5528                                                         (XAUI mode only) */
5529#else
5530	uint64_t lane_rxd                     : 32;
5531	uint64_t lane_rxc                     : 4;
5532	uint64_t state                        : 3;
5533	uint64_t val                          : 1;
5534	uint64_t reserved_40_63               : 24;
5535#endif
5536	} s;
5537	struct cvmx_gmxx_rx_xaui_bad_col_s    cn52xx;
5538	struct cvmx_gmxx_rx_xaui_bad_col_s    cn52xxp1;
5539	struct cvmx_gmxx_rx_xaui_bad_col_s    cn56xx;
5540	struct cvmx_gmxx_rx_xaui_bad_col_s    cn56xxp1;
5541	struct cvmx_gmxx_rx_xaui_bad_col_s    cn63xx;
5542	struct cvmx_gmxx_rx_xaui_bad_col_s    cn63xxp1;
5543};
5544typedef union cvmx_gmxx_rx_xaui_bad_col cvmx_gmxx_rx_xaui_bad_col_t;
5545
5546/**
5547 * cvmx_gmx#_rx_xaui_ctl
5548 */
5549union cvmx_gmxx_rx_xaui_ctl
5550{
5551	uint64_t u64;
5552	struct cvmx_gmxx_rx_xaui_ctl_s
5553	{
5554#if __BYTE_ORDER == __BIG_ENDIAN
5555	uint64_t reserved_2_63                : 62;
5556	uint64_t status                       : 2;  /**< Link Status
5557                                                         0=Link OK
5558                                                         1=Local Fault
5559                                                         2=Remote Fault
5560                                                         3=Reserved
5561                                                         (XAUI mode only) */
5562#else
5563	uint64_t status                       : 2;
5564	uint64_t reserved_2_63                : 62;
5565#endif
5566	} s;
5567	struct cvmx_gmxx_rx_xaui_ctl_s        cn52xx;
5568	struct cvmx_gmxx_rx_xaui_ctl_s        cn52xxp1;
5569	struct cvmx_gmxx_rx_xaui_ctl_s        cn56xx;
5570	struct cvmx_gmxx_rx_xaui_ctl_s        cn56xxp1;
5571	struct cvmx_gmxx_rx_xaui_ctl_s        cn63xx;
5572	struct cvmx_gmxx_rx_xaui_ctl_s        cn63xxp1;
5573};
5574typedef union cvmx_gmxx_rx_xaui_ctl cvmx_gmxx_rx_xaui_ctl_t;
5575
5576/**
5577 * cvmx_gmx#_smac#
5578 *
5579 * GMX_SMAC = Packet SMAC
5580 *
5581 */
5582union cvmx_gmxx_smacx
5583{
5584	uint64_t u64;
5585	struct cvmx_gmxx_smacx_s
5586	{
5587#if __BYTE_ORDER == __BIG_ENDIAN
5588	uint64_t reserved_48_63               : 16;
5589	uint64_t smac                         : 48; /**< The SMAC field is used for generating and
5590                                                         accepting Control Pause packets */
5591#else
5592	uint64_t smac                         : 48;
5593	uint64_t reserved_48_63               : 16;
5594#endif
5595	} s;
5596	struct cvmx_gmxx_smacx_s              cn30xx;
5597	struct cvmx_gmxx_smacx_s              cn31xx;
5598	struct cvmx_gmxx_smacx_s              cn38xx;
5599	struct cvmx_gmxx_smacx_s              cn38xxp2;
5600	struct cvmx_gmxx_smacx_s              cn50xx;
5601	struct cvmx_gmxx_smacx_s              cn52xx;
5602	struct cvmx_gmxx_smacx_s              cn52xxp1;
5603	struct cvmx_gmxx_smacx_s              cn56xx;
5604	struct cvmx_gmxx_smacx_s              cn56xxp1;
5605	struct cvmx_gmxx_smacx_s              cn58xx;
5606	struct cvmx_gmxx_smacx_s              cn58xxp1;
5607	struct cvmx_gmxx_smacx_s              cn63xx;
5608	struct cvmx_gmxx_smacx_s              cn63xxp1;
5609};
5610typedef union cvmx_gmxx_smacx cvmx_gmxx_smacx_t;
5611
5612/**
5613 * cvmx_gmx#_soft_bist
5614 *
5615 * GMX_SOFT_BIST = Software BIST Control
5616 *
5617 */
5618union cvmx_gmxx_soft_bist
5619{
5620	uint64_t u64;
5621	struct cvmx_gmxx_soft_bist_s
5622	{
5623#if __BYTE_ORDER == __BIG_ENDIAN
5624	uint64_t reserved_2_63                : 62;
5625	uint64_t start_bist                   : 1;  /**< Run BIST on all memories in the XAUI CLK domain */
5626	uint64_t clear_bist                   : 1;  /**< Choose between full BIST and CLEAR bist
5627                                                         0=Run full BIST
5628                                                         1=Only run clear BIST */
5629#else
5630	uint64_t clear_bist                   : 1;
5631	uint64_t start_bist                   : 1;
5632	uint64_t reserved_2_63                : 62;
5633#endif
5634	} s;
5635	struct cvmx_gmxx_soft_bist_s          cn63xx;
5636	struct cvmx_gmxx_soft_bist_s          cn63xxp1;
5637};
5638typedef union cvmx_gmxx_soft_bist cvmx_gmxx_soft_bist_t;
5639
5640/**
5641 * cvmx_gmx#_stat_bp
5642 *
5643 * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
5644 *
5645 */
5646union cvmx_gmxx_stat_bp
5647{
5648	uint64_t u64;
5649	struct cvmx_gmxx_stat_bp_s
5650	{
5651#if __BYTE_ORDER == __BIG_ENDIAN
5652	uint64_t reserved_17_63               : 47;
5653	uint64_t bp                           : 1;  /**< Current BP state */
5654	uint64_t cnt                          : 16; /**< Number of cycles that BP has been asserted
5655                                                         Saturating counter */
5656#else
5657	uint64_t cnt                          : 16;
5658	uint64_t bp                           : 1;
5659	uint64_t reserved_17_63               : 47;
5660#endif
5661	} s;
5662	struct cvmx_gmxx_stat_bp_s            cn30xx;
5663	struct cvmx_gmxx_stat_bp_s            cn31xx;
5664	struct cvmx_gmxx_stat_bp_s            cn38xx;
5665	struct cvmx_gmxx_stat_bp_s            cn38xxp2;
5666	struct cvmx_gmxx_stat_bp_s            cn50xx;
5667	struct cvmx_gmxx_stat_bp_s            cn52xx;
5668	struct cvmx_gmxx_stat_bp_s            cn52xxp1;
5669	struct cvmx_gmxx_stat_bp_s            cn56xx;
5670	struct cvmx_gmxx_stat_bp_s            cn56xxp1;
5671	struct cvmx_gmxx_stat_bp_s            cn58xx;
5672	struct cvmx_gmxx_stat_bp_s            cn58xxp1;
5673	struct cvmx_gmxx_stat_bp_s            cn63xx;
5674	struct cvmx_gmxx_stat_bp_s            cn63xxp1;
5675};
5676typedef union cvmx_gmxx_stat_bp cvmx_gmxx_stat_bp_t;
5677
5678/**
5679 * cvmx_gmx#_tx#_append
5680 *
5681 * GMX_TX_APPEND = Packet TX Append Control
5682 *
5683 */
5684union cvmx_gmxx_txx_append
5685{
5686	uint64_t u64;
5687	struct cvmx_gmxx_txx_append_s
5688	{
5689#if __BYTE_ORDER == __BIG_ENDIAN
5690	uint64_t reserved_4_63                : 60;
5691	uint64_t force_fcs                    : 1;  /**< Append the Ethernet FCS on each pause packet
5692                                                         when FCS is clear.  Pause packets are normally
5693                                                         padded to 60 bytes.  If GMX_TX_MIN_PKT[MIN_SIZE]
5694                                                         exceeds 59, then FORCE_FCS will not be used. */
5695	uint64_t fcs                          : 1;  /**< Append the Ethernet FCS on each packet */
5696	uint64_t pad                          : 1;  /**< Append PAD bytes such that min sized */
5697	uint64_t preamble                     : 1;  /**< Prepend the Ethernet preamble on each transfer
5698                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, PREAMBLE
5699                                                         must be zero. */
5700#else
5701	uint64_t preamble                     : 1;
5702	uint64_t pad                          : 1;
5703	uint64_t fcs                          : 1;
5704	uint64_t force_fcs                    : 1;
5705	uint64_t reserved_4_63                : 60;
5706#endif
5707	} s;
5708	struct cvmx_gmxx_txx_append_s         cn30xx;
5709	struct cvmx_gmxx_txx_append_s         cn31xx;
5710	struct cvmx_gmxx_txx_append_s         cn38xx;
5711	struct cvmx_gmxx_txx_append_s         cn38xxp2;
5712	struct cvmx_gmxx_txx_append_s         cn50xx;
5713	struct cvmx_gmxx_txx_append_s         cn52xx;
5714	struct cvmx_gmxx_txx_append_s         cn52xxp1;
5715	struct cvmx_gmxx_txx_append_s         cn56xx;
5716	struct cvmx_gmxx_txx_append_s         cn56xxp1;
5717	struct cvmx_gmxx_txx_append_s         cn58xx;
5718	struct cvmx_gmxx_txx_append_s         cn58xxp1;
5719	struct cvmx_gmxx_txx_append_s         cn63xx;
5720	struct cvmx_gmxx_txx_append_s         cn63xxp1;
5721};
5722typedef union cvmx_gmxx_txx_append cvmx_gmxx_txx_append_t;
5723
5724/**
5725 * cvmx_gmx#_tx#_burst
5726 *
5727 * GMX_TX_BURST = Packet TX Burst Counter
5728 *
5729 */
5730union cvmx_gmxx_txx_burst
5731{
5732	uint64_t u64;
5733	struct cvmx_gmxx_txx_burst_s
5734	{
5735#if __BYTE_ORDER == __BIG_ENDIAN
5736	uint64_t reserved_16_63               : 48;
5737	uint64_t burst                        : 16; /**< Burst (refer to 802.3 to set correctly)
5738                                                         Only valid for 1000Mbs half-duplex operation
5739                                                          halfdup / 1000Mbs: 0x2000
5740                                                          all other modes:   0x0
5741                                                         (SGMII/1000Base-X only) */
5742#else
5743	uint64_t burst                        : 16;
5744	uint64_t reserved_16_63               : 48;
5745#endif
5746	} s;
5747	struct cvmx_gmxx_txx_burst_s          cn30xx;
5748	struct cvmx_gmxx_txx_burst_s          cn31xx;
5749	struct cvmx_gmxx_txx_burst_s          cn38xx;
5750	struct cvmx_gmxx_txx_burst_s          cn38xxp2;
5751	struct cvmx_gmxx_txx_burst_s          cn50xx;
5752	struct cvmx_gmxx_txx_burst_s          cn52xx;
5753	struct cvmx_gmxx_txx_burst_s          cn52xxp1;
5754	struct cvmx_gmxx_txx_burst_s          cn56xx;
5755	struct cvmx_gmxx_txx_burst_s          cn56xxp1;
5756	struct cvmx_gmxx_txx_burst_s          cn58xx;
5757	struct cvmx_gmxx_txx_burst_s          cn58xxp1;
5758	struct cvmx_gmxx_txx_burst_s          cn63xx;
5759	struct cvmx_gmxx_txx_burst_s          cn63xxp1;
5760};
5761typedef union cvmx_gmxx_txx_burst cvmx_gmxx_txx_burst_t;
5762
5763/**
5764 * cvmx_gmx#_tx#_cbfc_xoff
5765 */
5766union cvmx_gmxx_txx_cbfc_xoff
5767{
5768	uint64_t u64;
5769	struct cvmx_gmxx_txx_cbfc_xoff_s
5770	{
5771#if __BYTE_ORDER == __BIG_ENDIAN
5772	uint64_t reserved_16_63               : 48;
5773	uint64_t xoff                         : 16; /**< Which ports to backpressure
5774                                                         Do not write in HiGig2 mode i.e. when
5775                                                         GMX_TX_XAUI_CTL[HG_EN]=1 and
5776                                                         GMX_RX_UDD_SKP[SKIP]=16. */
5777#else
5778	uint64_t xoff                         : 16;
5779	uint64_t reserved_16_63               : 48;
5780#endif
5781	} s;
5782	struct cvmx_gmxx_txx_cbfc_xoff_s      cn52xx;
5783	struct cvmx_gmxx_txx_cbfc_xoff_s      cn56xx;
5784	struct cvmx_gmxx_txx_cbfc_xoff_s      cn63xx;
5785	struct cvmx_gmxx_txx_cbfc_xoff_s      cn63xxp1;
5786};
5787typedef union cvmx_gmxx_txx_cbfc_xoff cvmx_gmxx_txx_cbfc_xoff_t;
5788
5789/**
5790 * cvmx_gmx#_tx#_cbfc_xon
5791 */
5792union cvmx_gmxx_txx_cbfc_xon
5793{
5794	uint64_t u64;
5795	struct cvmx_gmxx_txx_cbfc_xon_s
5796	{
5797#if __BYTE_ORDER == __BIG_ENDIAN
5798	uint64_t reserved_16_63               : 48;
5799	uint64_t xon                          : 16; /**< Which ports to stop backpressure
5800                                                         Do not write in HiGig2 mode i.e. when
5801                                                         GMX_TX_XAUI_CTL[HG_EN]=1 and
5802                                                         GMX_RX_UDD_SKP[SKIP]=16. */
5803#else
5804	uint64_t xon                          : 16;
5805	uint64_t reserved_16_63               : 48;
5806#endif
5807	} s;
5808	struct cvmx_gmxx_txx_cbfc_xon_s       cn52xx;
5809	struct cvmx_gmxx_txx_cbfc_xon_s       cn56xx;
5810	struct cvmx_gmxx_txx_cbfc_xon_s       cn63xx;
5811	struct cvmx_gmxx_txx_cbfc_xon_s       cn63xxp1;
5812};
5813typedef union cvmx_gmxx_txx_cbfc_xon cvmx_gmxx_txx_cbfc_xon_t;
5814
5815/**
5816 * cvmx_gmx#_tx#_clk
5817 *
5818 * Per Port
5819 *
5820 *
5821 * GMX_TX_CLK = RGMII TX Clock Generation Register
5822 *
5823 * Notes:
5824 * Programming Restrictions:
5825 *  (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1.
5826 *  (2) In MII mode, CLK_CNT == 1
5827 *  (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock.
5828 *
5829 * RGMII Example:
5830 *  Given a 125MHz PLL reference clock...
5831 *   CLK_CNT ==  1 ==> 125.0MHz TXC clock period (8ns* 1)
5832 *   CLK_CNT ==  5 ==>  25.0MHz TXC clock period (8ns* 5)
5833 *   CLK_CNT == 50 ==>   2.5MHz TXC clock period (8ns*50)
5834 */
5835union cvmx_gmxx_txx_clk
5836{
5837	uint64_t u64;
5838	struct cvmx_gmxx_txx_clk_s
5839	{
5840#if __BYTE_ORDER == __BIG_ENDIAN
5841	uint64_t reserved_6_63                : 58;
5842	uint64_t clk_cnt                      : 6;  /**< Controls the RGMII TXC frequency
5843                                                         When PLL is used, TXC(phase) =
5844                                                          spi4_tx_pll_ref_clk(period)/2*CLK_CNT
5845                                                         When PLL bypass is used, TXC(phase) =
5846                                                          spi4_tx_pll_ref_clk(period)*2*CLK_CNT
5847                                                         NOTE: CLK_CNT==0 will not generate any clock
5848                                                         if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */
5849#else
5850	uint64_t clk_cnt                      : 6;
5851	uint64_t reserved_6_63                : 58;
5852#endif
5853	} s;
5854	struct cvmx_gmxx_txx_clk_s            cn30xx;
5855	struct cvmx_gmxx_txx_clk_s            cn31xx;
5856	struct cvmx_gmxx_txx_clk_s            cn38xx;
5857	struct cvmx_gmxx_txx_clk_s            cn38xxp2;
5858	struct cvmx_gmxx_txx_clk_s            cn50xx;
5859	struct cvmx_gmxx_txx_clk_s            cn58xx;
5860	struct cvmx_gmxx_txx_clk_s            cn58xxp1;
5861};
5862typedef union cvmx_gmxx_txx_clk cvmx_gmxx_txx_clk_t;
5863
5864/**
5865 * cvmx_gmx#_tx#_ctl
5866 *
5867 * GMX_TX_CTL = TX Control register
5868 *
5869 */
5870union cvmx_gmxx_txx_ctl
5871{
5872	uint64_t u64;
5873	struct cvmx_gmxx_txx_ctl_s
5874	{
5875#if __BYTE_ORDER == __BIG_ENDIAN
5876	uint64_t reserved_2_63                : 62;
5877	uint64_t xsdef_en                     : 1;  /**< Enables the excessive deferral check for stats
5878                                                         and interrupts
5879                                                         (SGMII/1000Base-X half-duplex only) */
5880	uint64_t xscol_en                     : 1;  /**< Enables the excessive collision check for stats
5881                                                         and interrupts
5882                                                         (SGMII/1000Base-X half-duplex only) */
5883#else
5884	uint64_t xscol_en                     : 1;
5885	uint64_t xsdef_en                     : 1;
5886	uint64_t reserved_2_63                : 62;
5887#endif
5888	} s;
5889	struct cvmx_gmxx_txx_ctl_s            cn30xx;
5890	struct cvmx_gmxx_txx_ctl_s            cn31xx;
5891	struct cvmx_gmxx_txx_ctl_s            cn38xx;
5892	struct cvmx_gmxx_txx_ctl_s            cn38xxp2;
5893	struct cvmx_gmxx_txx_ctl_s            cn50xx;
5894	struct cvmx_gmxx_txx_ctl_s            cn52xx;
5895	struct cvmx_gmxx_txx_ctl_s            cn52xxp1;
5896	struct cvmx_gmxx_txx_ctl_s            cn56xx;
5897	struct cvmx_gmxx_txx_ctl_s            cn56xxp1;
5898	struct cvmx_gmxx_txx_ctl_s            cn58xx;
5899	struct cvmx_gmxx_txx_ctl_s            cn58xxp1;
5900	struct cvmx_gmxx_txx_ctl_s            cn63xx;
5901	struct cvmx_gmxx_txx_ctl_s            cn63xxp1;
5902};
5903typedef union cvmx_gmxx_txx_ctl cvmx_gmxx_txx_ctl_t;
5904
5905/**
5906 * cvmx_gmx#_tx#_min_pkt
5907 *
5908 * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
5909 *
5910 */
5911union cvmx_gmxx_txx_min_pkt
5912{
5913	uint64_t u64;
5914	struct cvmx_gmxx_txx_min_pkt_s
5915	{
5916#if __BYTE_ORDER == __BIG_ENDIAN
5917	uint64_t reserved_8_63                : 56;
5918	uint64_t min_size                     : 8;  /**< Min frame in bytes before the FCS is applied
5919                                                         Padding is only appened when GMX_TX_APPEND[PAD]
5920                                                         for the coresponding port is set.
5921                                                         In SGMII mode, packets will be padded to
5922                                                          MIN_SIZE+1. The reset value will pad to 60 bytes.
5923                                                         In XAUI mode, packets will be padded to
5924                                                          MIN(252,(MIN_SIZE+1 & ~0x3))
5925                                                         When GMX_TX_XAUI_CTL[HG_EN] is set, the HiGig
5926                                                          header (12B or 16B) is normally added to the
5927                                                          packet, so MIN_SIZE should be 59+12=71B for
5928                                                          HiGig or 59+16=75B for HiGig2. */
5929#else
5930	uint64_t min_size                     : 8;
5931	uint64_t reserved_8_63                : 56;
5932#endif
5933	} s;
5934	struct cvmx_gmxx_txx_min_pkt_s        cn30xx;
5935	struct cvmx_gmxx_txx_min_pkt_s        cn31xx;
5936	struct cvmx_gmxx_txx_min_pkt_s        cn38xx;
5937	struct cvmx_gmxx_txx_min_pkt_s        cn38xxp2;
5938	struct cvmx_gmxx_txx_min_pkt_s        cn50xx;
5939	struct cvmx_gmxx_txx_min_pkt_s        cn52xx;
5940	struct cvmx_gmxx_txx_min_pkt_s        cn52xxp1;
5941	struct cvmx_gmxx_txx_min_pkt_s        cn56xx;
5942	struct cvmx_gmxx_txx_min_pkt_s        cn56xxp1;
5943	struct cvmx_gmxx_txx_min_pkt_s        cn58xx;
5944	struct cvmx_gmxx_txx_min_pkt_s        cn58xxp1;
5945	struct cvmx_gmxx_txx_min_pkt_s        cn63xx;
5946	struct cvmx_gmxx_txx_min_pkt_s        cn63xxp1;
5947};
5948typedef union cvmx_gmxx_txx_min_pkt cvmx_gmxx_txx_min_pkt_t;
5949
5950/**
5951 * cvmx_gmx#_tx#_pause_pkt_interval
5952 *
5953 * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
5954 *
5955 *
5956 * Notes:
5957 * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
5958 * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
5959 * designer.  It is suggested that TIME be much greater than INTERVAL and
5960 * GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
5961 * count and then when the backpressure condition is lifted, a PAUSE packet
5962 * with TIME==0 will be sent indicating that Octane is ready for additional
5963 * data.
5964 *
5965 * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
5966 * suggested that TIME and INTERVAL are programmed such that they satisify the
5967 * following rule...
5968 *
5969 *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
5970 *
5971 * where largest_pkt_size is that largest packet that the system can send
5972 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
5973 * of the PAUSE packet (normally 64B).
5974 */
5975union cvmx_gmxx_txx_pause_pkt_interval
5976{
5977	uint64_t u64;
5978	struct cvmx_gmxx_txx_pause_pkt_interval_s
5979	{
5980#if __BYTE_ORDER == __BIG_ENDIAN
5981	uint64_t reserved_16_63               : 48;
5982	uint64_t interval                     : 16; /**< Arbitrate for a 802.3 pause packet, HiGig2 message,
5983                                                         or CBFC pause packet every (INTERVAL*512)
5984                                                         bit-times.
5985                                                         Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME
5986                                                         INTERVAL=0, will only send a single PAUSE packet
5987                                                         for each backpressure event */
5988#else
5989	uint64_t interval                     : 16;
5990	uint64_t reserved_16_63               : 48;
5991#endif
5992	} s;
5993	struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
5994	struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
5995	struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
5996	struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
5997	struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
5998	struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
5999	struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
6000	struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
6001	struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
6002	struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
6003	struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
6004	struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx;
6005	struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1;
6006};
6007typedef union cvmx_gmxx_txx_pause_pkt_interval cvmx_gmxx_txx_pause_pkt_interval_t;
6008
6009/**
6010 * cvmx_gmx#_tx#_pause_pkt_time
6011 *
6012 * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
6013 *
6014 *
6015 * Notes:
6016 * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
6017 * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
6018 * designer.  It is suggested that TIME be much greater than INTERVAL and
6019 * GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
6020 * count and then when the backpressure condition is lifted, a PAUSE packet
6021 * with TIME==0 will be sent indicating that Octane is ready for additional
6022 * data.
6023 *
6024 * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
6025 * suggested that TIME and INTERVAL are programmed such that they satisify the
6026 * following rule...
6027 *
6028 *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
6029 *
6030 * where largest_pkt_size is that largest packet that the system can send
6031 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
6032 * of the PAUSE packet (normally 64B).
6033 */
6034union cvmx_gmxx_txx_pause_pkt_time
6035{
6036	uint64_t u64;
6037	struct cvmx_gmxx_txx_pause_pkt_time_s
6038	{
6039#if __BYTE_ORDER == __BIG_ENDIAN
6040	uint64_t reserved_16_63               : 48;
6041	uint64_t time                         : 16; /**< The pause_time field placed in outbnd 802.3 pause
6042                                                         packets, HiGig2 messages, or CBFC pause packets.
6043                                                         pause_time is in 512 bit-times
6044                                                         Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */
6045#else
6046	uint64_t time                         : 16;
6047	uint64_t reserved_16_63               : 48;
6048#endif
6049	} s;
6050	struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
6051	struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
6052	struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
6053	struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
6054	struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
6055	struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
6056	struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
6057	struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
6058	struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
6059	struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
6060	struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
6061	struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx;
6062	struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1;
6063};
6064typedef union cvmx_gmxx_txx_pause_pkt_time cvmx_gmxx_txx_pause_pkt_time_t;
6065
6066/**
6067 * cvmx_gmx#_tx#_pause_togo
6068 *
6069 * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
6070 *
6071 */
6072union cvmx_gmxx_txx_pause_togo
6073{
6074	uint64_t u64;
6075	struct cvmx_gmxx_txx_pause_togo_s
6076	{
6077#if __BYTE_ORDER == __BIG_ENDIAN
6078	uint64_t reserved_32_63               : 32;
6079	uint64_t msg_time                     : 16; /**< Amount of time remaining to backpressure
6080                                                         From the higig2 physical message pause timer
6081                                                         (only valid on port0) */
6082	uint64_t time                         : 16; /**< Amount of time remaining to backpressure
6083                                                         From the standard 802.3 pause timer */
6084#else
6085	uint64_t time                         : 16;
6086	uint64_t msg_time                     : 16;
6087	uint64_t reserved_32_63               : 32;
6088#endif
6089	} s;
6090	struct cvmx_gmxx_txx_pause_togo_cn30xx
6091	{
6092#if __BYTE_ORDER == __BIG_ENDIAN
6093	uint64_t reserved_16_63               : 48;
6094	uint64_t time                         : 16; /**< Amount of time remaining to backpressure */
6095#else
6096	uint64_t time                         : 16;
6097	uint64_t reserved_16_63               : 48;
6098#endif
6099	} cn30xx;
6100	struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
6101	struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
6102	struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
6103	struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
6104	struct cvmx_gmxx_txx_pause_togo_s     cn52xx;
6105	struct cvmx_gmxx_txx_pause_togo_s     cn52xxp1;
6106	struct cvmx_gmxx_txx_pause_togo_s     cn56xx;
6107	struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
6108	struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
6109	struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
6110	struct cvmx_gmxx_txx_pause_togo_s     cn63xx;
6111	struct cvmx_gmxx_txx_pause_togo_s     cn63xxp1;
6112};
6113typedef union cvmx_gmxx_txx_pause_togo cvmx_gmxx_txx_pause_togo_t;
6114
6115/**
6116 * cvmx_gmx#_tx#_pause_zero
6117 *
6118 * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
6119 *
6120 */
6121union cvmx_gmxx_txx_pause_zero
6122{
6123	uint64_t u64;
6124	struct cvmx_gmxx_txx_pause_zero_s
6125	{
6126#if __BYTE_ORDER == __BIG_ENDIAN
6127	uint64_t reserved_1_63                : 63;
6128	uint64_t send                         : 1;  /**< When backpressure condition clear, send PAUSE
6129                                                         packet with pause_time of zero to enable the
6130                                                         channel */
6131#else
6132	uint64_t send                         : 1;
6133	uint64_t reserved_1_63                : 63;
6134#endif
6135	} s;
6136	struct cvmx_gmxx_txx_pause_zero_s     cn30xx;
6137	struct cvmx_gmxx_txx_pause_zero_s     cn31xx;
6138	struct cvmx_gmxx_txx_pause_zero_s     cn38xx;
6139	struct cvmx_gmxx_txx_pause_zero_s     cn38xxp2;
6140	struct cvmx_gmxx_txx_pause_zero_s     cn50xx;
6141	struct cvmx_gmxx_txx_pause_zero_s     cn52xx;
6142	struct cvmx_gmxx_txx_pause_zero_s     cn52xxp1;
6143	struct cvmx_gmxx_txx_pause_zero_s     cn56xx;
6144	struct cvmx_gmxx_txx_pause_zero_s     cn56xxp1;
6145	struct cvmx_gmxx_txx_pause_zero_s     cn58xx;
6146	struct cvmx_gmxx_txx_pause_zero_s     cn58xxp1;
6147	struct cvmx_gmxx_txx_pause_zero_s     cn63xx;
6148	struct cvmx_gmxx_txx_pause_zero_s     cn63xxp1;
6149};
6150typedef union cvmx_gmxx_txx_pause_zero cvmx_gmxx_txx_pause_zero_t;
6151
6152/**
6153 * cvmx_gmx#_tx#_sgmii_ctl
6154 */
6155union cvmx_gmxx_txx_sgmii_ctl
6156{
6157	uint64_t u64;
6158	struct cvmx_gmxx_txx_sgmii_ctl_s
6159	{
6160#if __BYTE_ORDER == __BIG_ENDIAN
6161	uint64_t reserved_1_63                : 63;
6162	uint64_t align                        : 1;  /**< Align the transmission to even cycles
6163                                                         0 = Data can be sent on any cycle
6164                                                             Possible to for the TX PCS machine to drop
6165                                                             first byte of preamble
6166                                                         1 = Data will only be sent on even cycles
6167                                                             There will be no loss of data
6168                                                         (SGMII/1000Base-X only) */
6169#else
6170	uint64_t align                        : 1;
6171	uint64_t reserved_1_63                : 63;
6172#endif
6173	} s;
6174	struct cvmx_gmxx_txx_sgmii_ctl_s      cn52xx;
6175	struct cvmx_gmxx_txx_sgmii_ctl_s      cn52xxp1;
6176	struct cvmx_gmxx_txx_sgmii_ctl_s      cn56xx;
6177	struct cvmx_gmxx_txx_sgmii_ctl_s      cn56xxp1;
6178	struct cvmx_gmxx_txx_sgmii_ctl_s      cn63xx;
6179	struct cvmx_gmxx_txx_sgmii_ctl_s      cn63xxp1;
6180};
6181typedef union cvmx_gmxx_txx_sgmii_ctl cvmx_gmxx_txx_sgmii_ctl_t;
6182
6183/**
6184 * cvmx_gmx#_tx#_slot
6185 *
6186 * GMX_TX_SLOT = Packet TX Slottime Counter
6187 *
6188 */
6189union cvmx_gmxx_txx_slot
6190{
6191	uint64_t u64;
6192	struct cvmx_gmxx_txx_slot_s
6193	{
6194#if __BYTE_ORDER == __BIG_ENDIAN
6195	uint64_t reserved_10_63               : 54;
6196	uint64_t slot                         : 10; /**< Slottime (refer to 802.3 to set correctly)
6197                                                         10/100Mbs: 0x40
6198                                                         1000Mbs:   0x200
6199                                                         (SGMII/1000Base-X only) */
6200#else
6201	uint64_t slot                         : 10;
6202	uint64_t reserved_10_63               : 54;
6203#endif
6204	} s;
6205	struct cvmx_gmxx_txx_slot_s           cn30xx;
6206	struct cvmx_gmxx_txx_slot_s           cn31xx;
6207	struct cvmx_gmxx_txx_slot_s           cn38xx;
6208	struct cvmx_gmxx_txx_slot_s           cn38xxp2;
6209	struct cvmx_gmxx_txx_slot_s           cn50xx;
6210	struct cvmx_gmxx_txx_slot_s           cn52xx;
6211	struct cvmx_gmxx_txx_slot_s           cn52xxp1;
6212	struct cvmx_gmxx_txx_slot_s           cn56xx;
6213	struct cvmx_gmxx_txx_slot_s           cn56xxp1;
6214	struct cvmx_gmxx_txx_slot_s           cn58xx;
6215	struct cvmx_gmxx_txx_slot_s           cn58xxp1;
6216	struct cvmx_gmxx_txx_slot_s           cn63xx;
6217	struct cvmx_gmxx_txx_slot_s           cn63xxp1;
6218};
6219typedef union cvmx_gmxx_txx_slot cvmx_gmxx_txx_slot_t;
6220
6221/**
6222 * cvmx_gmx#_tx#_soft_pause
6223 *
6224 * GMX_TX_SOFT_PAUSE = Packet TX Software Pause
6225 *
6226 */
6227union cvmx_gmxx_txx_soft_pause
6228{
6229	uint64_t u64;
6230	struct cvmx_gmxx_txx_soft_pause_s
6231	{
6232#if __BYTE_ORDER == __BIG_ENDIAN
6233	uint64_t reserved_16_63               : 48;
6234	uint64_t time                         : 16; /**< Back off the TX bus for (TIME*512) bit-times */
6235#else
6236	uint64_t time                         : 16;
6237	uint64_t reserved_16_63               : 48;
6238#endif
6239	} s;
6240	struct cvmx_gmxx_txx_soft_pause_s     cn30xx;
6241	struct cvmx_gmxx_txx_soft_pause_s     cn31xx;
6242	struct cvmx_gmxx_txx_soft_pause_s     cn38xx;
6243	struct cvmx_gmxx_txx_soft_pause_s     cn38xxp2;
6244	struct cvmx_gmxx_txx_soft_pause_s     cn50xx;
6245	struct cvmx_gmxx_txx_soft_pause_s     cn52xx;
6246	struct cvmx_gmxx_txx_soft_pause_s     cn52xxp1;
6247	struct cvmx_gmxx_txx_soft_pause_s     cn56xx;
6248	struct cvmx_gmxx_txx_soft_pause_s     cn56xxp1;
6249	struct cvmx_gmxx_txx_soft_pause_s     cn58xx;
6250	struct cvmx_gmxx_txx_soft_pause_s     cn58xxp1;
6251	struct cvmx_gmxx_txx_soft_pause_s     cn63xx;
6252	struct cvmx_gmxx_txx_soft_pause_s     cn63xxp1;
6253};
6254typedef union cvmx_gmxx_txx_soft_pause cvmx_gmxx_txx_soft_pause_t;
6255
6256/**
6257 * cvmx_gmx#_tx#_stat0
6258 *
6259 * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL
6260 *
6261 *
6262 * Notes:
6263 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6264 * - Counters will wrap
6265 */
6266union cvmx_gmxx_txx_stat0
6267{
6268	uint64_t u64;
6269	struct cvmx_gmxx_txx_stat0_s
6270	{
6271#if __BYTE_ORDER == __BIG_ENDIAN
6272	uint64_t xsdef                        : 32; /**< Number of packets dropped (never successfully
6273                                                         sent) due to excessive deferal
6274                                                         (SGMII/1000Base-X half-duplex only) */
6275	uint64_t xscol                        : 32; /**< Number of packets dropped (never successfully
6276                                                         sent) due to excessive collision.  Defined by
6277                                                         GMX_TX_COL_ATTEMPT[LIMIT].
6278                                                         (SGMII/1000Base-X half-duplex only) */
6279#else
6280	uint64_t xscol                        : 32;
6281	uint64_t xsdef                        : 32;
6282#endif
6283	} s;
6284	struct cvmx_gmxx_txx_stat0_s          cn30xx;
6285	struct cvmx_gmxx_txx_stat0_s          cn31xx;
6286	struct cvmx_gmxx_txx_stat0_s          cn38xx;
6287	struct cvmx_gmxx_txx_stat0_s          cn38xxp2;
6288	struct cvmx_gmxx_txx_stat0_s          cn50xx;
6289	struct cvmx_gmxx_txx_stat0_s          cn52xx;
6290	struct cvmx_gmxx_txx_stat0_s          cn52xxp1;
6291	struct cvmx_gmxx_txx_stat0_s          cn56xx;
6292	struct cvmx_gmxx_txx_stat0_s          cn56xxp1;
6293	struct cvmx_gmxx_txx_stat0_s          cn58xx;
6294	struct cvmx_gmxx_txx_stat0_s          cn58xxp1;
6295	struct cvmx_gmxx_txx_stat0_s          cn63xx;
6296	struct cvmx_gmxx_txx_stat0_s          cn63xxp1;
6297};
6298typedef union cvmx_gmxx_txx_stat0 cvmx_gmxx_txx_stat0_t;
6299
6300/**
6301 * cvmx_gmx#_tx#_stat1
6302 *
6303 * GMX_TX_STAT1 = GMX_TX_STATS_SCOL  / GMX_TX_STATS_MCOL
6304 *
6305 *
6306 * Notes:
6307 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6308 * - Counters will wrap
6309 */
6310union cvmx_gmxx_txx_stat1
6311{
6312	uint64_t u64;
6313	struct cvmx_gmxx_txx_stat1_s
6314	{
6315#if __BYTE_ORDER == __BIG_ENDIAN
6316	uint64_t scol                         : 32; /**< Number of packets sent with a single collision
6317                                                         (SGMII/1000Base-X half-duplex only) */
6318	uint64_t mcol                         : 32; /**< Number of packets sent with multiple collisions
6319                                                         but < GMX_TX_COL_ATTEMPT[LIMIT].
6320                                                         (SGMII/1000Base-X half-duplex only) */
6321#else
6322	uint64_t mcol                         : 32;
6323	uint64_t scol                         : 32;
6324#endif
6325	} s;
6326	struct cvmx_gmxx_txx_stat1_s          cn30xx;
6327	struct cvmx_gmxx_txx_stat1_s          cn31xx;
6328	struct cvmx_gmxx_txx_stat1_s          cn38xx;
6329	struct cvmx_gmxx_txx_stat1_s          cn38xxp2;
6330	struct cvmx_gmxx_txx_stat1_s          cn50xx;
6331	struct cvmx_gmxx_txx_stat1_s          cn52xx;
6332	struct cvmx_gmxx_txx_stat1_s          cn52xxp1;
6333	struct cvmx_gmxx_txx_stat1_s          cn56xx;
6334	struct cvmx_gmxx_txx_stat1_s          cn56xxp1;
6335	struct cvmx_gmxx_txx_stat1_s          cn58xx;
6336	struct cvmx_gmxx_txx_stat1_s          cn58xxp1;
6337	struct cvmx_gmxx_txx_stat1_s          cn63xx;
6338	struct cvmx_gmxx_txx_stat1_s          cn63xxp1;
6339};
6340typedef union cvmx_gmxx_txx_stat1 cvmx_gmxx_txx_stat1_t;
6341
6342/**
6343 * cvmx_gmx#_tx#_stat2
6344 *
6345 * GMX_TX_STAT2 = GMX_TX_STATS_OCTS
6346 *
6347 *
6348 * Notes:
6349 * - Octect counts are the sum of all data transmitted on the wire including
6350 *   packet data, pad bytes, fcs bytes, pause bytes, and jam bytes.  The octect
6351 *   counts do not include PREAMBLE byte or EXTEND cycles.
6352 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6353 * - Counters will wrap
6354 */
6355union cvmx_gmxx_txx_stat2
6356{
6357	uint64_t u64;
6358	struct cvmx_gmxx_txx_stat2_s
6359	{
6360#if __BYTE_ORDER == __BIG_ENDIAN
6361	uint64_t reserved_48_63               : 16;
6362	uint64_t octs                         : 48; /**< Number of total octets sent on the interface.
6363                                                         Does not count octets from frames that were
6364                                                         truncated due to collisions in halfdup mode. */
6365#else
6366	uint64_t octs                         : 48;
6367	uint64_t reserved_48_63               : 16;
6368#endif
6369	} s;
6370	struct cvmx_gmxx_txx_stat2_s          cn30xx;
6371	struct cvmx_gmxx_txx_stat2_s          cn31xx;
6372	struct cvmx_gmxx_txx_stat2_s          cn38xx;
6373	struct cvmx_gmxx_txx_stat2_s          cn38xxp2;
6374	struct cvmx_gmxx_txx_stat2_s          cn50xx;
6375	struct cvmx_gmxx_txx_stat2_s          cn52xx;
6376	struct cvmx_gmxx_txx_stat2_s          cn52xxp1;
6377	struct cvmx_gmxx_txx_stat2_s          cn56xx;
6378	struct cvmx_gmxx_txx_stat2_s          cn56xxp1;
6379	struct cvmx_gmxx_txx_stat2_s          cn58xx;
6380	struct cvmx_gmxx_txx_stat2_s          cn58xxp1;
6381	struct cvmx_gmxx_txx_stat2_s          cn63xx;
6382	struct cvmx_gmxx_txx_stat2_s          cn63xxp1;
6383};
6384typedef union cvmx_gmxx_txx_stat2 cvmx_gmxx_txx_stat2_t;
6385
6386/**
6387 * cvmx_gmx#_tx#_stat3
6388 *
6389 * GMX_TX_STAT3 = GMX_TX_STATS_PKTS
6390 *
6391 *
6392 * Notes:
6393 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6394 * - Counters will wrap
6395 */
6396union cvmx_gmxx_txx_stat3
6397{
6398	uint64_t u64;
6399	struct cvmx_gmxx_txx_stat3_s
6400	{
6401#if __BYTE_ORDER == __BIG_ENDIAN
6402	uint64_t reserved_32_63               : 32;
6403	uint64_t pkts                         : 32; /**< Number of total frames sent on the interface.
6404                                                         Does not count frames that were truncated due to
6405                                                          collisions in halfdup mode. */
6406#else
6407	uint64_t pkts                         : 32;
6408	uint64_t reserved_32_63               : 32;
6409#endif
6410	} s;
6411	struct cvmx_gmxx_txx_stat3_s          cn30xx;
6412	struct cvmx_gmxx_txx_stat3_s          cn31xx;
6413	struct cvmx_gmxx_txx_stat3_s          cn38xx;
6414	struct cvmx_gmxx_txx_stat3_s          cn38xxp2;
6415	struct cvmx_gmxx_txx_stat3_s          cn50xx;
6416	struct cvmx_gmxx_txx_stat3_s          cn52xx;
6417	struct cvmx_gmxx_txx_stat3_s          cn52xxp1;
6418	struct cvmx_gmxx_txx_stat3_s          cn56xx;
6419	struct cvmx_gmxx_txx_stat3_s          cn56xxp1;
6420	struct cvmx_gmxx_txx_stat3_s          cn58xx;
6421	struct cvmx_gmxx_txx_stat3_s          cn58xxp1;
6422	struct cvmx_gmxx_txx_stat3_s          cn63xx;
6423	struct cvmx_gmxx_txx_stat3_s          cn63xxp1;
6424};
6425typedef union cvmx_gmxx_txx_stat3 cvmx_gmxx_txx_stat3_t;
6426
6427/**
6428 * cvmx_gmx#_tx#_stat4
6429 *
6430 * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64)
6431 *
6432 *
6433 * Notes:
6434 * - Packet length is the sum of all data transmitted on the wire for the given
6435 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6436 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
6437 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6438 * - Counters will wrap
6439 */
6440union cvmx_gmxx_txx_stat4
6441{
6442	uint64_t u64;
6443	struct cvmx_gmxx_txx_stat4_s
6444	{
6445#if __BYTE_ORDER == __BIG_ENDIAN
6446	uint64_t hist1                        : 32; /**< Number of packets sent with an octet count of 64. */
6447	uint64_t hist0                        : 32; /**< Number of packets sent with an octet count
6448                                                         of < 64. */
6449#else
6450	uint64_t hist0                        : 32;
6451	uint64_t hist1                        : 32;
6452#endif
6453	} s;
6454	struct cvmx_gmxx_txx_stat4_s          cn30xx;
6455	struct cvmx_gmxx_txx_stat4_s          cn31xx;
6456	struct cvmx_gmxx_txx_stat4_s          cn38xx;
6457	struct cvmx_gmxx_txx_stat4_s          cn38xxp2;
6458	struct cvmx_gmxx_txx_stat4_s          cn50xx;
6459	struct cvmx_gmxx_txx_stat4_s          cn52xx;
6460	struct cvmx_gmxx_txx_stat4_s          cn52xxp1;
6461	struct cvmx_gmxx_txx_stat4_s          cn56xx;
6462	struct cvmx_gmxx_txx_stat4_s          cn56xxp1;
6463	struct cvmx_gmxx_txx_stat4_s          cn58xx;
6464	struct cvmx_gmxx_txx_stat4_s          cn58xxp1;
6465	struct cvmx_gmxx_txx_stat4_s          cn63xx;
6466	struct cvmx_gmxx_txx_stat4_s          cn63xxp1;
6467};
6468typedef union cvmx_gmxx_txx_stat4 cvmx_gmxx_txx_stat4_t;
6469
6470/**
6471 * cvmx_gmx#_tx#_stat5
6472 *
6473 * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127)
6474 *
6475 *
6476 * Notes:
6477 * - Packet length is the sum of all data transmitted on the wire for the given
6478 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6479 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
6480 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6481 * - Counters will wrap
6482 */
6483union cvmx_gmxx_txx_stat5
6484{
6485	uint64_t u64;
6486	struct cvmx_gmxx_txx_stat5_s
6487	{
6488#if __BYTE_ORDER == __BIG_ENDIAN
6489	uint64_t hist3                        : 32; /**< Number of packets sent with an octet count of
6490                                                         128 - 255. */
6491	uint64_t hist2                        : 32; /**< Number of packets sent with an octet count of
6492                                                         65 - 127. */
6493#else
6494	uint64_t hist2                        : 32;
6495	uint64_t hist3                        : 32;
6496#endif
6497	} s;
6498	struct cvmx_gmxx_txx_stat5_s          cn30xx;
6499	struct cvmx_gmxx_txx_stat5_s          cn31xx;
6500	struct cvmx_gmxx_txx_stat5_s          cn38xx;
6501	struct cvmx_gmxx_txx_stat5_s          cn38xxp2;
6502	struct cvmx_gmxx_txx_stat5_s          cn50xx;
6503	struct cvmx_gmxx_txx_stat5_s          cn52xx;
6504	struct cvmx_gmxx_txx_stat5_s          cn52xxp1;
6505	struct cvmx_gmxx_txx_stat5_s          cn56xx;
6506	struct cvmx_gmxx_txx_stat5_s          cn56xxp1;
6507	struct cvmx_gmxx_txx_stat5_s          cn58xx;
6508	struct cvmx_gmxx_txx_stat5_s          cn58xxp1;
6509	struct cvmx_gmxx_txx_stat5_s          cn63xx;
6510	struct cvmx_gmxx_txx_stat5_s          cn63xxp1;
6511};
6512typedef union cvmx_gmxx_txx_stat5 cvmx_gmxx_txx_stat5_t;
6513
6514/**
6515 * cvmx_gmx#_tx#_stat6
6516 *
6517 * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511)
6518 *
6519 *
6520 * Notes:
6521 * - Packet length is the sum of all data transmitted on the wire for the given
6522 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6523 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
6524 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6525 * - Counters will wrap
6526 */
6527union cvmx_gmxx_txx_stat6
6528{
6529	uint64_t u64;
6530	struct cvmx_gmxx_txx_stat6_s
6531	{
6532#if __BYTE_ORDER == __BIG_ENDIAN
6533	uint64_t hist5                        : 32; /**< Number of packets sent with an octet count of
6534                                                         512 - 1023. */
6535	uint64_t hist4                        : 32; /**< Number of packets sent with an octet count of
6536                                                         256 - 511. */
6537#else
6538	uint64_t hist4                        : 32;
6539	uint64_t hist5                        : 32;
6540#endif
6541	} s;
6542	struct cvmx_gmxx_txx_stat6_s          cn30xx;
6543	struct cvmx_gmxx_txx_stat6_s          cn31xx;
6544	struct cvmx_gmxx_txx_stat6_s          cn38xx;
6545	struct cvmx_gmxx_txx_stat6_s          cn38xxp2;
6546	struct cvmx_gmxx_txx_stat6_s          cn50xx;
6547	struct cvmx_gmxx_txx_stat6_s          cn52xx;
6548	struct cvmx_gmxx_txx_stat6_s          cn52xxp1;
6549	struct cvmx_gmxx_txx_stat6_s          cn56xx;
6550	struct cvmx_gmxx_txx_stat6_s          cn56xxp1;
6551	struct cvmx_gmxx_txx_stat6_s          cn58xx;
6552	struct cvmx_gmxx_txx_stat6_s          cn58xxp1;
6553	struct cvmx_gmxx_txx_stat6_s          cn63xx;
6554	struct cvmx_gmxx_txx_stat6_s          cn63xxp1;
6555};
6556typedef union cvmx_gmxx_txx_stat6 cvmx_gmxx_txx_stat6_t;
6557
6558/**
6559 * cvmx_gmx#_tx#_stat7
6560 *
6561 * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518)
6562 *
6563 *
6564 * Notes:
6565 * - Packet length is the sum of all data transmitted on the wire for the given
6566 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6567 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
6568 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6569 * - Counters will wrap
6570 */
6571union cvmx_gmxx_txx_stat7
6572{
6573	uint64_t u64;
6574	struct cvmx_gmxx_txx_stat7_s
6575	{
6576#if __BYTE_ORDER == __BIG_ENDIAN
6577	uint64_t hist7                        : 32; /**< Number of packets sent with an octet count
6578                                                         of > 1518. */
6579	uint64_t hist6                        : 32; /**< Number of packets sent with an octet count of
6580                                                         1024 - 1518. */
6581#else
6582	uint64_t hist6                        : 32;
6583	uint64_t hist7                        : 32;
6584#endif
6585	} s;
6586	struct cvmx_gmxx_txx_stat7_s          cn30xx;
6587	struct cvmx_gmxx_txx_stat7_s          cn31xx;
6588	struct cvmx_gmxx_txx_stat7_s          cn38xx;
6589	struct cvmx_gmxx_txx_stat7_s          cn38xxp2;
6590	struct cvmx_gmxx_txx_stat7_s          cn50xx;
6591	struct cvmx_gmxx_txx_stat7_s          cn52xx;
6592	struct cvmx_gmxx_txx_stat7_s          cn52xxp1;
6593	struct cvmx_gmxx_txx_stat7_s          cn56xx;
6594	struct cvmx_gmxx_txx_stat7_s          cn56xxp1;
6595	struct cvmx_gmxx_txx_stat7_s          cn58xx;
6596	struct cvmx_gmxx_txx_stat7_s          cn58xxp1;
6597	struct cvmx_gmxx_txx_stat7_s          cn63xx;
6598	struct cvmx_gmxx_txx_stat7_s          cn63xxp1;
6599};
6600typedef union cvmx_gmxx_txx_stat7 cvmx_gmxx_txx_stat7_t;
6601
6602/**
6603 * cvmx_gmx#_tx#_stat8
6604 *
6605 * GMX_TX_STAT8 = GMX_TX_STATS_MCST  / GMX_TX_STATS_BCST
6606 *
6607 *
6608 * Notes:
6609 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6610 * - Counters will wrap
6611 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
6612 *   packet.  GMX assumes that the DMAC lies in the first 6 bytes of the packet
6613 *   as per the 802.3 frame definition.  If the system requires additional data
6614 *   before the L2 header, then the MCST and BCST counters may not reflect
6615 *   reality and should be ignored by software.
6616 */
6617union cvmx_gmxx_txx_stat8
6618{
6619	uint64_t u64;
6620	struct cvmx_gmxx_txx_stat8_s
6621	{
6622#if __BYTE_ORDER == __BIG_ENDIAN
6623	uint64_t mcst                         : 32; /**< Number of packets sent to multicast DMAC.
6624                                                         Does not include BCST packets. */
6625	uint64_t bcst                         : 32; /**< Number of packets sent to broadcast DMAC.
6626                                                         Does not include MCST packets. */
6627#else
6628	uint64_t bcst                         : 32;
6629	uint64_t mcst                         : 32;
6630#endif
6631	} s;
6632	struct cvmx_gmxx_txx_stat8_s          cn30xx;
6633	struct cvmx_gmxx_txx_stat8_s          cn31xx;
6634	struct cvmx_gmxx_txx_stat8_s          cn38xx;
6635	struct cvmx_gmxx_txx_stat8_s          cn38xxp2;
6636	struct cvmx_gmxx_txx_stat8_s          cn50xx;
6637	struct cvmx_gmxx_txx_stat8_s          cn52xx;
6638	struct cvmx_gmxx_txx_stat8_s          cn52xxp1;
6639	struct cvmx_gmxx_txx_stat8_s          cn56xx;
6640	struct cvmx_gmxx_txx_stat8_s          cn56xxp1;
6641	struct cvmx_gmxx_txx_stat8_s          cn58xx;
6642	struct cvmx_gmxx_txx_stat8_s          cn58xxp1;
6643	struct cvmx_gmxx_txx_stat8_s          cn63xx;
6644	struct cvmx_gmxx_txx_stat8_s          cn63xxp1;
6645};
6646typedef union cvmx_gmxx_txx_stat8 cvmx_gmxx_txx_stat8_t;
6647
6648/**
6649 * cvmx_gmx#_tx#_stat9
6650 *
6651 * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL
6652 *
6653 *
6654 * Notes:
6655 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6656 * - Counters will wrap
6657 */
6658union cvmx_gmxx_txx_stat9
6659{
6660	uint64_t u64;
6661	struct cvmx_gmxx_txx_stat9_s
6662	{
6663#if __BYTE_ORDER == __BIG_ENDIAN
6664	uint64_t undflw                       : 32; /**< Number of underflow packets */
6665	uint64_t ctl                          : 32; /**< Number of Control packets (PAUSE flow control)
6666                                                         generated by GMX.  It does not include control
6667                                                         packets forwarded or generated by the PP's. */
6668#else
6669	uint64_t ctl                          : 32;
6670	uint64_t undflw                       : 32;
6671#endif
6672	} s;
6673	struct cvmx_gmxx_txx_stat9_s          cn30xx;
6674	struct cvmx_gmxx_txx_stat9_s          cn31xx;
6675	struct cvmx_gmxx_txx_stat9_s          cn38xx;
6676	struct cvmx_gmxx_txx_stat9_s          cn38xxp2;
6677	struct cvmx_gmxx_txx_stat9_s          cn50xx;
6678	struct cvmx_gmxx_txx_stat9_s          cn52xx;
6679	struct cvmx_gmxx_txx_stat9_s          cn52xxp1;
6680	struct cvmx_gmxx_txx_stat9_s          cn56xx;
6681	struct cvmx_gmxx_txx_stat9_s          cn56xxp1;
6682	struct cvmx_gmxx_txx_stat9_s          cn58xx;
6683	struct cvmx_gmxx_txx_stat9_s          cn58xxp1;
6684	struct cvmx_gmxx_txx_stat9_s          cn63xx;
6685	struct cvmx_gmxx_txx_stat9_s          cn63xxp1;
6686};
6687typedef union cvmx_gmxx_txx_stat9 cvmx_gmxx_txx_stat9_t;
6688
6689/**
6690 * cvmx_gmx#_tx#_stats_ctl
6691 *
6692 * GMX_TX_STATS_CTL = TX Stats Control register
6693 *
6694 */
6695union cvmx_gmxx_txx_stats_ctl
6696{
6697	uint64_t u64;
6698	struct cvmx_gmxx_txx_stats_ctl_s
6699	{
6700#if __BYTE_ORDER == __BIG_ENDIAN
6701	uint64_t reserved_1_63                : 63;
6702	uint64_t rd_clr                       : 1;  /**< Stats registers will clear on reads */
6703#else
6704	uint64_t rd_clr                       : 1;
6705	uint64_t reserved_1_63                : 63;
6706#endif
6707	} s;
6708	struct cvmx_gmxx_txx_stats_ctl_s      cn30xx;
6709	struct cvmx_gmxx_txx_stats_ctl_s      cn31xx;
6710	struct cvmx_gmxx_txx_stats_ctl_s      cn38xx;
6711	struct cvmx_gmxx_txx_stats_ctl_s      cn38xxp2;
6712	struct cvmx_gmxx_txx_stats_ctl_s      cn50xx;
6713	struct cvmx_gmxx_txx_stats_ctl_s      cn52xx;
6714	struct cvmx_gmxx_txx_stats_ctl_s      cn52xxp1;
6715	struct cvmx_gmxx_txx_stats_ctl_s      cn56xx;
6716	struct cvmx_gmxx_txx_stats_ctl_s      cn56xxp1;
6717	struct cvmx_gmxx_txx_stats_ctl_s      cn58xx;
6718	struct cvmx_gmxx_txx_stats_ctl_s      cn58xxp1;
6719	struct cvmx_gmxx_txx_stats_ctl_s      cn63xx;
6720	struct cvmx_gmxx_txx_stats_ctl_s      cn63xxp1;
6721};
6722typedef union cvmx_gmxx_txx_stats_ctl cvmx_gmxx_txx_stats_ctl_t;
6723
6724/**
6725 * cvmx_gmx#_tx#_thresh
6726 *
6727 * Per Port
6728 *
6729 *
6730 * GMX_TX_THRESH = Packet TX Threshold
6731 *
6732 * Notes:
6733 * In XAUI mode, prt0 is used for checking.  Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x100.
6734 *
6735 */
6736union cvmx_gmxx_txx_thresh
6737{
6738	uint64_t u64;
6739	struct cvmx_gmxx_txx_thresh_s
6740	{
6741#if __BYTE_ORDER == __BIG_ENDIAN
6742	uint64_t reserved_9_63                : 55;
6743	uint64_t cnt                          : 9;  /**< Number of 16B ticks to accumulate in the TX FIFO
6744                                                         before sending on the packet interface
6745                                                         This register should be large enough to prevent
6746                                                         underflow on the packet interface and must never
6747                                                         be set to zero.  This register cannot exceed the
6748                                                         the TX FIFO depth which is...
6749                                                          GMX_TX_PRTS==0,1:  CNT MAX = 0x100
6750                                                          GMX_TX_PRTS==2  :  CNT MAX = 0x080
6751                                                          GMX_TX_PRTS==3,4:  CNT MAX = 0x040 */
6752#else
6753	uint64_t cnt                          : 9;
6754	uint64_t reserved_9_63                : 55;
6755#endif
6756	} s;
6757	struct cvmx_gmxx_txx_thresh_cn30xx
6758	{
6759#if __BYTE_ORDER == __BIG_ENDIAN
6760	uint64_t reserved_7_63                : 57;
6761	uint64_t cnt                          : 7;  /**< Number of 16B ticks to accumulate in the TX FIFO
6762                                                         before sending on the RGMII interface
6763                                                         This register should be large enough to prevent
6764                                                         underflow on the RGMII interface and must never
6765                                                         be set below 4.  This register cannot exceed the
6766                                                         the TX FIFO depth which is 64 16B entries. */
6767#else
6768	uint64_t cnt                          : 7;
6769	uint64_t reserved_7_63                : 57;
6770#endif
6771	} cn30xx;
6772	struct cvmx_gmxx_txx_thresh_cn30xx    cn31xx;
6773	struct cvmx_gmxx_txx_thresh_s         cn38xx;
6774	struct cvmx_gmxx_txx_thresh_s         cn38xxp2;
6775	struct cvmx_gmxx_txx_thresh_cn30xx    cn50xx;
6776	struct cvmx_gmxx_txx_thresh_s         cn52xx;
6777	struct cvmx_gmxx_txx_thresh_s         cn52xxp1;
6778	struct cvmx_gmxx_txx_thresh_s         cn56xx;
6779	struct cvmx_gmxx_txx_thresh_s         cn56xxp1;
6780	struct cvmx_gmxx_txx_thresh_s         cn58xx;
6781	struct cvmx_gmxx_txx_thresh_s         cn58xxp1;
6782	struct cvmx_gmxx_txx_thresh_s         cn63xx;
6783	struct cvmx_gmxx_txx_thresh_s         cn63xxp1;
6784};
6785typedef union cvmx_gmxx_txx_thresh cvmx_gmxx_txx_thresh_t;
6786
6787/**
6788 * cvmx_gmx#_tx_bp
6789 *
6790 * GMX_TX_BP = Packet Interface TX BackPressure Register
6791 *
6792 *
6793 * Notes:
6794 * In XAUI mode, only the lsb (corresponding to port0) of BP is used.
6795 *
6796 */
6797union cvmx_gmxx_tx_bp
6798{
6799	uint64_t u64;
6800	struct cvmx_gmxx_tx_bp_s
6801	{
6802#if __BYTE_ORDER == __BIG_ENDIAN
6803	uint64_t reserved_4_63                : 60;
6804	uint64_t bp                           : 4;  /**< Per port BackPressure status
6805                                                         0=Port is available
6806                                                         1=Port should be back pressured */
6807#else
6808	uint64_t bp                           : 4;
6809	uint64_t reserved_4_63                : 60;
6810#endif
6811	} s;
6812	struct cvmx_gmxx_tx_bp_cn30xx
6813	{
6814#if __BYTE_ORDER == __BIG_ENDIAN
6815	uint64_t reserved_3_63                : 61;
6816	uint64_t bp                           : 3;  /**< Per port BackPressure status
6817                                                         0=Port is available
6818                                                         1=Port should be back pressured */
6819#else
6820	uint64_t bp                           : 3;
6821	uint64_t reserved_3_63                : 61;
6822#endif
6823	} cn30xx;
6824	struct cvmx_gmxx_tx_bp_cn30xx         cn31xx;
6825	struct cvmx_gmxx_tx_bp_s              cn38xx;
6826	struct cvmx_gmxx_tx_bp_s              cn38xxp2;
6827	struct cvmx_gmxx_tx_bp_cn30xx         cn50xx;
6828	struct cvmx_gmxx_tx_bp_s              cn52xx;
6829	struct cvmx_gmxx_tx_bp_s              cn52xxp1;
6830	struct cvmx_gmxx_tx_bp_s              cn56xx;
6831	struct cvmx_gmxx_tx_bp_s              cn56xxp1;
6832	struct cvmx_gmxx_tx_bp_s              cn58xx;
6833	struct cvmx_gmxx_tx_bp_s              cn58xxp1;
6834	struct cvmx_gmxx_tx_bp_s              cn63xx;
6835	struct cvmx_gmxx_tx_bp_s              cn63xxp1;
6836};
6837typedef union cvmx_gmxx_tx_bp cvmx_gmxx_tx_bp_t;
6838
6839/**
6840 * cvmx_gmx#_tx_clk_msk#
6841 *
6842 * GMX_TX_CLK_MSK = GMX Clock Select
6843 *
6844 */
6845union cvmx_gmxx_tx_clk_mskx
6846{
6847	uint64_t u64;
6848	struct cvmx_gmxx_tx_clk_mskx_s
6849	{
6850#if __BYTE_ORDER == __BIG_ENDIAN
6851	uint64_t reserved_1_63                : 63;
6852	uint64_t msk                          : 1;  /**< Write this bit to a 1 when switching clks */
6853#else
6854	uint64_t msk                          : 1;
6855	uint64_t reserved_1_63                : 63;
6856#endif
6857	} s;
6858	struct cvmx_gmxx_tx_clk_mskx_s        cn30xx;
6859	struct cvmx_gmxx_tx_clk_mskx_s        cn50xx;
6860};
6861typedef union cvmx_gmxx_tx_clk_mskx cvmx_gmxx_tx_clk_mskx_t;
6862
6863/**
6864 * cvmx_gmx#_tx_col_attempt
6865 *
6866 * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
6867 *
6868 */
6869union cvmx_gmxx_tx_col_attempt
6870{
6871	uint64_t u64;
6872	struct cvmx_gmxx_tx_col_attempt_s
6873	{
6874#if __BYTE_ORDER == __BIG_ENDIAN
6875	uint64_t reserved_5_63                : 59;
6876	uint64_t limit                        : 5;  /**< Collision Attempts
6877                                                         (SGMII/1000Base-X half-duplex only) */
6878#else
6879	uint64_t limit                        : 5;
6880	uint64_t reserved_5_63                : 59;
6881#endif
6882	} s;
6883	struct cvmx_gmxx_tx_col_attempt_s     cn30xx;
6884	struct cvmx_gmxx_tx_col_attempt_s     cn31xx;
6885	struct cvmx_gmxx_tx_col_attempt_s     cn38xx;
6886	struct cvmx_gmxx_tx_col_attempt_s     cn38xxp2;
6887	struct cvmx_gmxx_tx_col_attempt_s     cn50xx;
6888	struct cvmx_gmxx_tx_col_attempt_s     cn52xx;
6889	struct cvmx_gmxx_tx_col_attempt_s     cn52xxp1;
6890	struct cvmx_gmxx_tx_col_attempt_s     cn56xx;
6891	struct cvmx_gmxx_tx_col_attempt_s     cn56xxp1;
6892	struct cvmx_gmxx_tx_col_attempt_s     cn58xx;
6893	struct cvmx_gmxx_tx_col_attempt_s     cn58xxp1;
6894	struct cvmx_gmxx_tx_col_attempt_s     cn63xx;
6895	struct cvmx_gmxx_tx_col_attempt_s     cn63xxp1;
6896};
6897typedef union cvmx_gmxx_tx_col_attempt cvmx_gmxx_tx_col_attempt_t;
6898
6899/**
6900 * cvmx_gmx#_tx_corrupt
6901 *
6902 * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set
6903 *
6904 *
6905 * Notes:
6906 * Packets sent from PKO with the ERR wire asserted will be corrupted by
6907 * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0).
6908 *
6909 * Corruption means that GMX will send a bad FCS value.  If GMX_TX_APPEND[FCS]
6910 * is clear then no FCS is sent and the GMX cannot corrupt it.  The corrupt FCS
6911 * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error
6912 * propagation code in XAUI mode.
6913 */
6914union cvmx_gmxx_tx_corrupt
6915{
6916	uint64_t u64;
6917	struct cvmx_gmxx_tx_corrupt_s
6918	{
6919#if __BYTE_ORDER == __BIG_ENDIAN
6920	uint64_t reserved_4_63                : 60;
6921	uint64_t corrupt                      : 4;  /**< Per port error propagation
6922                                                         0=Never corrupt packets
6923                                                         1=Corrupt packets with ERR */
6924#else
6925	uint64_t corrupt                      : 4;
6926	uint64_t reserved_4_63                : 60;
6927#endif
6928	} s;
6929	struct cvmx_gmxx_tx_corrupt_cn30xx
6930	{
6931#if __BYTE_ORDER == __BIG_ENDIAN
6932	uint64_t reserved_3_63                : 61;
6933	uint64_t corrupt                      : 3;  /**< Per port error propagation
6934                                                         0=Never corrupt packets
6935                                                         1=Corrupt packets with ERR */
6936#else
6937	uint64_t corrupt                      : 3;
6938	uint64_t reserved_3_63                : 61;
6939#endif
6940	} cn30xx;
6941	struct cvmx_gmxx_tx_corrupt_cn30xx    cn31xx;
6942	struct cvmx_gmxx_tx_corrupt_s         cn38xx;
6943	struct cvmx_gmxx_tx_corrupt_s         cn38xxp2;
6944	struct cvmx_gmxx_tx_corrupt_cn30xx    cn50xx;
6945	struct cvmx_gmxx_tx_corrupt_s         cn52xx;
6946	struct cvmx_gmxx_tx_corrupt_s         cn52xxp1;
6947	struct cvmx_gmxx_tx_corrupt_s         cn56xx;
6948	struct cvmx_gmxx_tx_corrupt_s         cn56xxp1;
6949	struct cvmx_gmxx_tx_corrupt_s         cn58xx;
6950	struct cvmx_gmxx_tx_corrupt_s         cn58xxp1;
6951	struct cvmx_gmxx_tx_corrupt_s         cn63xx;
6952	struct cvmx_gmxx_tx_corrupt_s         cn63xxp1;
6953};
6954typedef union cvmx_gmxx_tx_corrupt cvmx_gmxx_tx_corrupt_t;
6955
6956/**
6957 * cvmx_gmx#_tx_hg2_reg1
6958 *
6959 * Notes:
6960 * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
6961 * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
6962 * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
6963 * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
6964 * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
6965 */
6966union cvmx_gmxx_tx_hg2_reg1
6967{
6968	uint64_t u64;
6969	struct cvmx_gmxx_tx_hg2_reg1_s
6970	{
6971#if __BYTE_ORDER == __BIG_ENDIAN
6972	uint64_t reserved_16_63               : 48;
6973	uint64_t tx_xof                       : 16; /**< TX HiGig2 message for logical link pause when any
6974                                                         bit value changes
6975                                                          Only write in HiGig2 mode i.e. when
6976                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
6977                                                          GMX_RX_UDD_SKP[SKIP]=16. */
6978#else
6979	uint64_t tx_xof                       : 16;
6980	uint64_t reserved_16_63               : 48;
6981#endif
6982	} s;
6983	struct cvmx_gmxx_tx_hg2_reg1_s        cn52xx;
6984	struct cvmx_gmxx_tx_hg2_reg1_s        cn52xxp1;
6985	struct cvmx_gmxx_tx_hg2_reg1_s        cn56xx;
6986	struct cvmx_gmxx_tx_hg2_reg1_s        cn63xx;
6987	struct cvmx_gmxx_tx_hg2_reg1_s        cn63xxp1;
6988};
6989typedef union cvmx_gmxx_tx_hg2_reg1 cvmx_gmxx_tx_hg2_reg1_t;
6990
6991/**
6992 * cvmx_gmx#_tx_hg2_reg2
6993 *
6994 * Notes:
6995 * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
6996 * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address  of
6997 * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
6998 * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
6999 * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
7000 */
7001union cvmx_gmxx_tx_hg2_reg2
7002{
7003	uint64_t u64;
7004	struct cvmx_gmxx_tx_hg2_reg2_s
7005	{
7006#if __BYTE_ORDER == __BIG_ENDIAN
7007	uint64_t reserved_16_63               : 48;
7008	uint64_t tx_xon                       : 16; /**< TX HiGig2 message for logical link pause when any
7009                                                         bit value changes
7010                                                          Only write in HiGig2 mode i.e. when
7011                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
7012                                                          GMX_RX_UDD_SKP[SKIP]=16. */
7013#else
7014	uint64_t tx_xon                       : 16;
7015	uint64_t reserved_16_63               : 48;
7016#endif
7017	} s;
7018	struct cvmx_gmxx_tx_hg2_reg2_s        cn52xx;
7019	struct cvmx_gmxx_tx_hg2_reg2_s        cn52xxp1;
7020	struct cvmx_gmxx_tx_hg2_reg2_s        cn56xx;
7021	struct cvmx_gmxx_tx_hg2_reg2_s        cn63xx;
7022	struct cvmx_gmxx_tx_hg2_reg2_s        cn63xxp1;
7023};
7024typedef union cvmx_gmxx_tx_hg2_reg2 cvmx_gmxx_tx_hg2_reg2_t;
7025
7026/**
7027 * cvmx_gmx#_tx_ifg
7028 *
7029 * GMX_TX_IFG = Packet TX Interframe Gap
7030 *
7031 *
7032 * Notes:
7033 * * Programming IFG1 and IFG2.
7034 *
7035 * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3
7036 * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range
7037 * of 4-12, and the IFG1+IFG2 sum must be 12.
7038 *
7039 * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3
7040 * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range
7041 * of 1-11, and the IFG1+IFG2 sum must be 12.
7042 *
7043 * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the
7044 * IFG1+IFG2 sum must be 12.  IFG1[1:0] and IFG2[1:0] must be zero.
7045 *
7046 * For all other systems, IFG1 and IFG2 can be any value in the range of
7047 * 1-15.  Allowing for a total possible IFG sum of 2-30.
7048 */
7049union cvmx_gmxx_tx_ifg
7050{
7051	uint64_t u64;
7052	struct cvmx_gmxx_tx_ifg_s
7053	{
7054#if __BYTE_ORDER == __BIG_ENDIAN
7055	uint64_t reserved_8_63                : 56;
7056	uint64_t ifg2                         : 4;  /**< 1/3 of the interframe gap timing (in IFG2*8 bits)
7057                                                         If CRS is detected during IFG2, then the
7058                                                         interFrameSpacing timer is not reset and a frame
7059                                                         is transmited once the timer expires. */
7060	uint64_t ifg1                         : 4;  /**< 2/3 of the interframe gap timing (in IFG1*8 bits)
7061                                                         If CRS is detected during IFG1, then the
7062                                                         interFrameSpacing timer is reset and a frame is
7063                                                         not transmited. */
7064#else
7065	uint64_t ifg1                         : 4;
7066	uint64_t ifg2                         : 4;
7067	uint64_t reserved_8_63                : 56;
7068#endif
7069	} s;
7070	struct cvmx_gmxx_tx_ifg_s             cn30xx;
7071	struct cvmx_gmxx_tx_ifg_s             cn31xx;
7072	struct cvmx_gmxx_tx_ifg_s             cn38xx;
7073	struct cvmx_gmxx_tx_ifg_s             cn38xxp2;
7074	struct cvmx_gmxx_tx_ifg_s             cn50xx;
7075	struct cvmx_gmxx_tx_ifg_s             cn52xx;
7076	struct cvmx_gmxx_tx_ifg_s             cn52xxp1;
7077	struct cvmx_gmxx_tx_ifg_s             cn56xx;
7078	struct cvmx_gmxx_tx_ifg_s             cn56xxp1;
7079	struct cvmx_gmxx_tx_ifg_s             cn58xx;
7080	struct cvmx_gmxx_tx_ifg_s             cn58xxp1;
7081	struct cvmx_gmxx_tx_ifg_s             cn63xx;
7082	struct cvmx_gmxx_tx_ifg_s             cn63xxp1;
7083};
7084typedef union cvmx_gmxx_tx_ifg cvmx_gmxx_tx_ifg_t;
7085
7086/**
7087 * cvmx_gmx#_tx_int_en
7088 *
7089 * GMX_TX_INT_EN = Interrupt Enable
7090 *
7091 *
7092 * Notes:
7093 * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
7094 *
7095 */
7096union cvmx_gmxx_tx_int_en
7097{
7098	uint64_t u64;
7099	struct cvmx_gmxx_tx_int_en_s
7100	{
7101#if __BYTE_ORDER == __BIG_ENDIAN
7102	uint64_t reserved_24_63               : 40;
7103	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
7104                                                         sent due to XSCOL */
7105	uint64_t late_col                     : 4;  /**< TX Late Collision
7106                                                         (SGMII/1000Base-X half-duplex only) */
7107	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7108                                                         (SGMII/1000Base-X half-duplex only) */
7109	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7110                                                         (SGMII/1000Base-X half-duplex only) */
7111	uint64_t reserved_6_7                 : 2;
7112	uint64_t undflw                       : 4;  /**< TX Underflow */
7113	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7114	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7115#else
7116	uint64_t pko_nxa                      : 1;
7117	uint64_t ncb_nxa                      : 1;
7118	uint64_t undflw                       : 4;
7119	uint64_t reserved_6_7                 : 2;
7120	uint64_t xscol                        : 4;
7121	uint64_t xsdef                        : 4;
7122	uint64_t late_col                     : 4;
7123	uint64_t ptp_lost                     : 4;
7124	uint64_t reserved_24_63               : 40;
7125#endif
7126	} s;
7127	struct cvmx_gmxx_tx_int_en_cn30xx
7128	{
7129#if __BYTE_ORDER == __BIG_ENDIAN
7130	uint64_t reserved_19_63               : 45;
7131	uint64_t late_col                     : 3;  /**< TX Late Collision */
7132	uint64_t reserved_15_15               : 1;
7133	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7134	uint64_t reserved_11_11               : 1;
7135	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7136	uint64_t reserved_5_7                 : 3;
7137	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
7138	uint64_t reserved_1_1                 : 1;
7139	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7140#else
7141	uint64_t pko_nxa                      : 1;
7142	uint64_t reserved_1_1                 : 1;
7143	uint64_t undflw                       : 3;
7144	uint64_t reserved_5_7                 : 3;
7145	uint64_t xscol                        : 3;
7146	uint64_t reserved_11_11               : 1;
7147	uint64_t xsdef                        : 3;
7148	uint64_t reserved_15_15               : 1;
7149	uint64_t late_col                     : 3;
7150	uint64_t reserved_19_63               : 45;
7151#endif
7152	} cn30xx;
7153	struct cvmx_gmxx_tx_int_en_cn31xx
7154	{
7155#if __BYTE_ORDER == __BIG_ENDIAN
7156	uint64_t reserved_15_63               : 49;
7157	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7158	uint64_t reserved_11_11               : 1;
7159	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7160	uint64_t reserved_5_7                 : 3;
7161	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
7162	uint64_t reserved_1_1                 : 1;
7163	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7164#else
7165	uint64_t pko_nxa                      : 1;
7166	uint64_t reserved_1_1                 : 1;
7167	uint64_t undflw                       : 3;
7168	uint64_t reserved_5_7                 : 3;
7169	uint64_t xscol                        : 3;
7170	uint64_t reserved_11_11               : 1;
7171	uint64_t xsdef                        : 3;
7172	uint64_t reserved_15_63               : 49;
7173#endif
7174	} cn31xx;
7175	struct cvmx_gmxx_tx_int_en_cn38xx
7176	{
7177#if __BYTE_ORDER == __BIG_ENDIAN
7178	uint64_t reserved_20_63               : 44;
7179	uint64_t late_col                     : 4;  /**< TX Late Collision
7180                                                         (PASS3 only) */
7181	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7182	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7183	uint64_t reserved_6_7                 : 2;
7184	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
7185	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7186	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7187#else
7188	uint64_t pko_nxa                      : 1;
7189	uint64_t ncb_nxa                      : 1;
7190	uint64_t undflw                       : 4;
7191	uint64_t reserved_6_7                 : 2;
7192	uint64_t xscol                        : 4;
7193	uint64_t xsdef                        : 4;
7194	uint64_t late_col                     : 4;
7195	uint64_t reserved_20_63               : 44;
7196#endif
7197	} cn38xx;
7198	struct cvmx_gmxx_tx_int_en_cn38xxp2
7199	{
7200#if __BYTE_ORDER == __BIG_ENDIAN
7201	uint64_t reserved_16_63               : 48;
7202	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7203	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7204	uint64_t reserved_6_7                 : 2;
7205	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
7206	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7207	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7208#else
7209	uint64_t pko_nxa                      : 1;
7210	uint64_t ncb_nxa                      : 1;
7211	uint64_t undflw                       : 4;
7212	uint64_t reserved_6_7                 : 2;
7213	uint64_t xscol                        : 4;
7214	uint64_t xsdef                        : 4;
7215	uint64_t reserved_16_63               : 48;
7216#endif
7217	} cn38xxp2;
7218	struct cvmx_gmxx_tx_int_en_cn30xx     cn50xx;
7219	struct cvmx_gmxx_tx_int_en_cn52xx
7220	{
7221#if __BYTE_ORDER == __BIG_ENDIAN
7222	uint64_t reserved_20_63               : 44;
7223	uint64_t late_col                     : 4;  /**< TX Late Collision
7224                                                         (SGMII/1000Base-X half-duplex only) */
7225	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7226                                                         (SGMII/1000Base-X half-duplex only) */
7227	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7228                                                         (SGMII/1000Base-X half-duplex only) */
7229	uint64_t reserved_6_7                 : 2;
7230	uint64_t undflw                       : 4;  /**< TX Underflow */
7231	uint64_t reserved_1_1                 : 1;
7232	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7233#else
7234	uint64_t pko_nxa                      : 1;
7235	uint64_t reserved_1_1                 : 1;
7236	uint64_t undflw                       : 4;
7237	uint64_t reserved_6_7                 : 2;
7238	uint64_t xscol                        : 4;
7239	uint64_t xsdef                        : 4;
7240	uint64_t late_col                     : 4;
7241	uint64_t reserved_20_63               : 44;
7242#endif
7243	} cn52xx;
7244	struct cvmx_gmxx_tx_int_en_cn52xx     cn52xxp1;
7245	struct cvmx_gmxx_tx_int_en_cn52xx     cn56xx;
7246	struct cvmx_gmxx_tx_int_en_cn52xx     cn56xxp1;
7247	struct cvmx_gmxx_tx_int_en_cn38xx     cn58xx;
7248	struct cvmx_gmxx_tx_int_en_cn38xx     cn58xxp1;
7249	struct cvmx_gmxx_tx_int_en_cn63xx
7250	{
7251#if __BYTE_ORDER == __BIG_ENDIAN
7252	uint64_t reserved_24_63               : 40;
7253	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
7254                                                         sent due to XSCOL */
7255	uint64_t late_col                     : 4;  /**< TX Late Collision
7256                                                         (SGMII/1000Base-X half-duplex only) */
7257	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7258                                                         (SGMII/1000Base-X half-duplex only) */
7259	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7260                                                         (SGMII/1000Base-X half-duplex only) */
7261	uint64_t reserved_6_7                 : 2;
7262	uint64_t undflw                       : 4;  /**< TX Underflow */
7263	uint64_t reserved_1_1                 : 1;
7264	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7265#else
7266	uint64_t pko_nxa                      : 1;
7267	uint64_t reserved_1_1                 : 1;
7268	uint64_t undflw                       : 4;
7269	uint64_t reserved_6_7                 : 2;
7270	uint64_t xscol                        : 4;
7271	uint64_t xsdef                        : 4;
7272	uint64_t late_col                     : 4;
7273	uint64_t ptp_lost                     : 4;
7274	uint64_t reserved_24_63               : 40;
7275#endif
7276	} cn63xx;
7277	struct cvmx_gmxx_tx_int_en_cn63xx     cn63xxp1;
7278};
7279typedef union cvmx_gmxx_tx_int_en cvmx_gmxx_tx_int_en_t;
7280
7281/**
7282 * cvmx_gmx#_tx_int_reg
7283 *
7284 * GMX_TX_INT_REG = Interrupt Register
7285 *
7286 *
7287 * Notes:
7288 * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
7289 *
7290 */
7291union cvmx_gmxx_tx_int_reg
7292{
7293	uint64_t u64;
7294	struct cvmx_gmxx_tx_int_reg_s
7295	{
7296#if __BYTE_ORDER == __BIG_ENDIAN
7297	uint64_t reserved_24_63               : 40;
7298	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
7299                                                         sent due to XSCOL */
7300	uint64_t late_col                     : 4;  /**< TX Late Collision
7301                                                         (SGMII/1000Base-X half-duplex only) */
7302	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7303                                                         (SGMII/1000Base-X half-duplex only) */
7304	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7305                                                         (SGMII/1000Base-X half-duplex only) */
7306	uint64_t reserved_6_7                 : 2;
7307	uint64_t undflw                       : 4;  /**< TX Underflow */
7308	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7309	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7310#else
7311	uint64_t pko_nxa                      : 1;
7312	uint64_t ncb_nxa                      : 1;
7313	uint64_t undflw                       : 4;
7314	uint64_t reserved_6_7                 : 2;
7315	uint64_t xscol                        : 4;
7316	uint64_t xsdef                        : 4;
7317	uint64_t late_col                     : 4;
7318	uint64_t ptp_lost                     : 4;
7319	uint64_t reserved_24_63               : 40;
7320#endif
7321	} s;
7322	struct cvmx_gmxx_tx_int_reg_cn30xx
7323	{
7324#if __BYTE_ORDER == __BIG_ENDIAN
7325	uint64_t reserved_19_63               : 45;
7326	uint64_t late_col                     : 3;  /**< TX Late Collision */
7327	uint64_t reserved_15_15               : 1;
7328	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7329	uint64_t reserved_11_11               : 1;
7330	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7331	uint64_t reserved_5_7                 : 3;
7332	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
7333	uint64_t reserved_1_1                 : 1;
7334	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7335#else
7336	uint64_t pko_nxa                      : 1;
7337	uint64_t reserved_1_1                 : 1;
7338	uint64_t undflw                       : 3;
7339	uint64_t reserved_5_7                 : 3;
7340	uint64_t xscol                        : 3;
7341	uint64_t reserved_11_11               : 1;
7342	uint64_t xsdef                        : 3;
7343	uint64_t reserved_15_15               : 1;
7344	uint64_t late_col                     : 3;
7345	uint64_t reserved_19_63               : 45;
7346#endif
7347	} cn30xx;
7348	struct cvmx_gmxx_tx_int_reg_cn31xx
7349	{
7350#if __BYTE_ORDER == __BIG_ENDIAN
7351	uint64_t reserved_15_63               : 49;
7352	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7353	uint64_t reserved_11_11               : 1;
7354	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7355	uint64_t reserved_5_7                 : 3;
7356	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
7357	uint64_t reserved_1_1                 : 1;
7358	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7359#else
7360	uint64_t pko_nxa                      : 1;
7361	uint64_t reserved_1_1                 : 1;
7362	uint64_t undflw                       : 3;
7363	uint64_t reserved_5_7                 : 3;
7364	uint64_t xscol                        : 3;
7365	uint64_t reserved_11_11               : 1;
7366	uint64_t xsdef                        : 3;
7367	uint64_t reserved_15_63               : 49;
7368#endif
7369	} cn31xx;
7370	struct cvmx_gmxx_tx_int_reg_cn38xx
7371	{
7372#if __BYTE_ORDER == __BIG_ENDIAN
7373	uint64_t reserved_20_63               : 44;
7374	uint64_t late_col                     : 4;  /**< TX Late Collision
7375                                                         (PASS3 only) */
7376	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7377	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7378	uint64_t reserved_6_7                 : 2;
7379	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
7380	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7381	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7382#else
7383	uint64_t pko_nxa                      : 1;
7384	uint64_t ncb_nxa                      : 1;
7385	uint64_t undflw                       : 4;
7386	uint64_t reserved_6_7                 : 2;
7387	uint64_t xscol                        : 4;
7388	uint64_t xsdef                        : 4;
7389	uint64_t late_col                     : 4;
7390	uint64_t reserved_20_63               : 44;
7391#endif
7392	} cn38xx;
7393	struct cvmx_gmxx_tx_int_reg_cn38xxp2
7394	{
7395#if __BYTE_ORDER == __BIG_ENDIAN
7396	uint64_t reserved_16_63               : 48;
7397	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
7398	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
7399	uint64_t reserved_6_7                 : 2;
7400	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
7401	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
7402	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7403#else
7404	uint64_t pko_nxa                      : 1;
7405	uint64_t ncb_nxa                      : 1;
7406	uint64_t undflw                       : 4;
7407	uint64_t reserved_6_7                 : 2;
7408	uint64_t xscol                        : 4;
7409	uint64_t xsdef                        : 4;
7410	uint64_t reserved_16_63               : 48;
7411#endif
7412	} cn38xxp2;
7413	struct cvmx_gmxx_tx_int_reg_cn30xx    cn50xx;
7414	struct cvmx_gmxx_tx_int_reg_cn52xx
7415	{
7416#if __BYTE_ORDER == __BIG_ENDIAN
7417	uint64_t reserved_20_63               : 44;
7418	uint64_t late_col                     : 4;  /**< TX Late Collision
7419                                                         (SGMII/1000Base-X half-duplex only) */
7420	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7421                                                         (SGMII/1000Base-X half-duplex only) */
7422	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7423                                                         (SGMII/1000Base-X half-duplex only) */
7424	uint64_t reserved_6_7                 : 2;
7425	uint64_t undflw                       : 4;  /**< TX Underflow */
7426	uint64_t reserved_1_1                 : 1;
7427	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7428#else
7429	uint64_t pko_nxa                      : 1;
7430	uint64_t reserved_1_1                 : 1;
7431	uint64_t undflw                       : 4;
7432	uint64_t reserved_6_7                 : 2;
7433	uint64_t xscol                        : 4;
7434	uint64_t xsdef                        : 4;
7435	uint64_t late_col                     : 4;
7436	uint64_t reserved_20_63               : 44;
7437#endif
7438	} cn52xx;
7439	struct cvmx_gmxx_tx_int_reg_cn52xx    cn52xxp1;
7440	struct cvmx_gmxx_tx_int_reg_cn52xx    cn56xx;
7441	struct cvmx_gmxx_tx_int_reg_cn52xx    cn56xxp1;
7442	struct cvmx_gmxx_tx_int_reg_cn38xx    cn58xx;
7443	struct cvmx_gmxx_tx_int_reg_cn38xx    cn58xxp1;
7444	struct cvmx_gmxx_tx_int_reg_cn63xx
7445	{
7446#if __BYTE_ORDER == __BIG_ENDIAN
7447	uint64_t reserved_24_63               : 40;
7448	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
7449                                                         sent due to XSCOL */
7450	uint64_t late_col                     : 4;  /**< TX Late Collision
7451                                                         (SGMII/1000Base-X half-duplex only) */
7452	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
7453                                                         (SGMII/1000Base-X half-duplex only) */
7454	uint64_t xscol                        : 4;  /**< TX Excessive collisions
7455                                                         (SGMII/1000Base-X half-duplex only) */
7456	uint64_t reserved_6_7                 : 2;
7457	uint64_t undflw                       : 4;  /**< TX Underflow */
7458	uint64_t reserved_1_1                 : 1;
7459	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
7460#else
7461	uint64_t pko_nxa                      : 1;
7462	uint64_t reserved_1_1                 : 1;
7463	uint64_t undflw                       : 4;
7464	uint64_t reserved_6_7                 : 2;
7465	uint64_t xscol                        : 4;
7466	uint64_t xsdef                        : 4;
7467	uint64_t late_col                     : 4;
7468	uint64_t ptp_lost                     : 4;
7469	uint64_t reserved_24_63               : 40;
7470#endif
7471	} cn63xx;
7472	struct cvmx_gmxx_tx_int_reg_cn63xx    cn63xxp1;
7473};
7474typedef union cvmx_gmxx_tx_int_reg cvmx_gmxx_tx_int_reg_t;
7475
7476/**
7477 * cvmx_gmx#_tx_jam
7478 *
7479 * GMX_TX_JAM = Packet TX Jam Pattern
7480 *
7481 */
7482union cvmx_gmxx_tx_jam
7483{
7484	uint64_t u64;
7485	struct cvmx_gmxx_tx_jam_s
7486	{
7487#if __BYTE_ORDER == __BIG_ENDIAN
7488	uint64_t reserved_8_63                : 56;
7489	uint64_t jam                          : 8;  /**< Jam pattern
7490                                                         (SGMII/1000Base-X half-duplex only) */
7491#else
7492	uint64_t jam                          : 8;
7493	uint64_t reserved_8_63                : 56;
7494#endif
7495	} s;
7496	struct cvmx_gmxx_tx_jam_s             cn30xx;
7497	struct cvmx_gmxx_tx_jam_s             cn31xx;
7498	struct cvmx_gmxx_tx_jam_s             cn38xx;
7499	struct cvmx_gmxx_tx_jam_s             cn38xxp2;
7500	struct cvmx_gmxx_tx_jam_s             cn50xx;
7501	struct cvmx_gmxx_tx_jam_s             cn52xx;
7502	struct cvmx_gmxx_tx_jam_s             cn52xxp1;
7503	struct cvmx_gmxx_tx_jam_s             cn56xx;
7504	struct cvmx_gmxx_tx_jam_s             cn56xxp1;
7505	struct cvmx_gmxx_tx_jam_s             cn58xx;
7506	struct cvmx_gmxx_tx_jam_s             cn58xxp1;
7507	struct cvmx_gmxx_tx_jam_s             cn63xx;
7508	struct cvmx_gmxx_tx_jam_s             cn63xxp1;
7509};
7510typedef union cvmx_gmxx_tx_jam cvmx_gmxx_tx_jam_t;
7511
7512/**
7513 * cvmx_gmx#_tx_lfsr
7514 *
7515 * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
7516 *
7517 */
7518union cvmx_gmxx_tx_lfsr
7519{
7520	uint64_t u64;
7521	struct cvmx_gmxx_tx_lfsr_s
7522	{
7523#if __BYTE_ORDER == __BIG_ENDIAN
7524	uint64_t reserved_16_63               : 48;
7525	uint64_t lfsr                         : 16; /**< The current state of the LFSR used to feed random
7526                                                         numbers to compute truncated binary exponential
7527                                                         backoff.
7528                                                         (SGMII/1000Base-X half-duplex only) */
7529#else
7530	uint64_t lfsr                         : 16;
7531	uint64_t reserved_16_63               : 48;
7532#endif
7533	} s;
7534	struct cvmx_gmxx_tx_lfsr_s            cn30xx;
7535	struct cvmx_gmxx_tx_lfsr_s            cn31xx;
7536	struct cvmx_gmxx_tx_lfsr_s            cn38xx;
7537	struct cvmx_gmxx_tx_lfsr_s            cn38xxp2;
7538	struct cvmx_gmxx_tx_lfsr_s            cn50xx;
7539	struct cvmx_gmxx_tx_lfsr_s            cn52xx;
7540	struct cvmx_gmxx_tx_lfsr_s            cn52xxp1;
7541	struct cvmx_gmxx_tx_lfsr_s            cn56xx;
7542	struct cvmx_gmxx_tx_lfsr_s            cn56xxp1;
7543	struct cvmx_gmxx_tx_lfsr_s            cn58xx;
7544	struct cvmx_gmxx_tx_lfsr_s            cn58xxp1;
7545	struct cvmx_gmxx_tx_lfsr_s            cn63xx;
7546	struct cvmx_gmxx_tx_lfsr_s            cn63xxp1;
7547};
7548typedef union cvmx_gmxx_tx_lfsr cvmx_gmxx_tx_lfsr_t;
7549
7550/**
7551 * cvmx_gmx#_tx_ovr_bp
7552 *
7553 * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure
7554 *
7555 *
7556 * Notes:
7557 * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used.
7558 *
7559 * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
7560 * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol
7561 * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by
7562 * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure
7563 * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
7564 * protocol.
7565 */
7566union cvmx_gmxx_tx_ovr_bp
7567{
7568	uint64_t u64;
7569	struct cvmx_gmxx_tx_ovr_bp_s
7570	{
7571#if __BYTE_ORDER == __BIG_ENDIAN
7572	uint64_t reserved_48_63               : 16;
7573	uint64_t tx_prt_bp                    : 16; /**< Per port BP sent to PKO
7574                                                         0=Port is available
7575                                                         1=Port should be back pressured
7576                                                         TX_PRT_BP should not be set until
7577                                                         GMX_INF_MODE[EN] has been enabled */
7578	uint64_t reserved_12_31               : 20;
7579	uint64_t en                           : 4;  /**< Per port Enable back pressure override */
7580	uint64_t bp                           : 4;  /**< Per port BackPressure status to use
7581                                                         0=Port is available
7582                                                         1=Port should be back pressured */
7583	uint64_t ign_full                     : 4;  /**< Ignore the RX FIFO full when computing BP */
7584#else
7585	uint64_t ign_full                     : 4;
7586	uint64_t bp                           : 4;
7587	uint64_t en                           : 4;
7588	uint64_t reserved_12_31               : 20;
7589	uint64_t tx_prt_bp                    : 16;
7590	uint64_t reserved_48_63               : 16;
7591#endif
7592	} s;
7593	struct cvmx_gmxx_tx_ovr_bp_cn30xx
7594	{
7595#if __BYTE_ORDER == __BIG_ENDIAN
7596	uint64_t reserved_11_63               : 53;
7597	uint64_t en                           : 3;  /**< Per port Enable back pressure override */
7598	uint64_t reserved_7_7                 : 1;
7599	uint64_t bp                           : 3;  /**< Per port BackPressure status to use
7600                                                         0=Port is available
7601                                                         1=Port should be back pressured */
7602	uint64_t reserved_3_3                 : 1;
7603	uint64_t ign_full                     : 3;  /**< Ignore the RX FIFO full when computing BP */
7604#else
7605	uint64_t ign_full                     : 3;
7606	uint64_t reserved_3_3                 : 1;
7607	uint64_t bp                           : 3;
7608	uint64_t reserved_7_7                 : 1;
7609	uint64_t en                           : 3;
7610	uint64_t reserved_11_63               : 53;
7611#endif
7612	} cn30xx;
7613	struct cvmx_gmxx_tx_ovr_bp_cn30xx     cn31xx;
7614	struct cvmx_gmxx_tx_ovr_bp_cn38xx
7615	{
7616#if __BYTE_ORDER == __BIG_ENDIAN
7617	uint64_t reserved_12_63               : 52;
7618	uint64_t en                           : 4;  /**< Per port Enable back pressure override */
7619	uint64_t bp                           : 4;  /**< Per port BackPressure status to use
7620                                                         0=Port is available
7621                                                         1=Port should be back pressured */
7622	uint64_t ign_full                     : 4;  /**< Ignore the RX FIFO full when computing BP */
7623#else
7624	uint64_t ign_full                     : 4;
7625	uint64_t bp                           : 4;
7626	uint64_t en                           : 4;
7627	uint64_t reserved_12_63               : 52;
7628#endif
7629	} cn38xx;
7630	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn38xxp2;
7631	struct cvmx_gmxx_tx_ovr_bp_cn30xx     cn50xx;
7632	struct cvmx_gmxx_tx_ovr_bp_s          cn52xx;
7633	struct cvmx_gmxx_tx_ovr_bp_s          cn52xxp1;
7634	struct cvmx_gmxx_tx_ovr_bp_s          cn56xx;
7635	struct cvmx_gmxx_tx_ovr_bp_s          cn56xxp1;
7636	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn58xx;
7637	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn58xxp1;
7638	struct cvmx_gmxx_tx_ovr_bp_s          cn63xx;
7639	struct cvmx_gmxx_tx_ovr_bp_s          cn63xxp1;
7640};
7641typedef union cvmx_gmxx_tx_ovr_bp cvmx_gmxx_tx_ovr_bp_t;
7642
7643/**
7644 * cvmx_gmx#_tx_pause_pkt_dmac
7645 *
7646 * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
7647 *
7648 */
7649union cvmx_gmxx_tx_pause_pkt_dmac
7650{
7651	uint64_t u64;
7652	struct cvmx_gmxx_tx_pause_pkt_dmac_s
7653	{
7654#if __BYTE_ORDER == __BIG_ENDIAN
7655	uint64_t reserved_48_63               : 16;
7656	uint64_t dmac                         : 48; /**< The DMAC field placed is outbnd pause pkts */
7657#else
7658	uint64_t dmac                         : 48;
7659	uint64_t reserved_48_63               : 16;
7660#endif
7661	} s;
7662	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn30xx;
7663	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn31xx;
7664	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn38xx;
7665	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn38xxp2;
7666	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn50xx;
7667	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn52xx;
7668	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn52xxp1;
7669	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn56xx;
7670	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn56xxp1;
7671	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn58xx;
7672	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn58xxp1;
7673	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn63xx;
7674	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn63xxp1;
7675};
7676typedef union cvmx_gmxx_tx_pause_pkt_dmac cvmx_gmxx_tx_pause_pkt_dmac_t;
7677
7678/**
7679 * cvmx_gmx#_tx_pause_pkt_type
7680 *
7681 * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field
7682 *
7683 */
7684union cvmx_gmxx_tx_pause_pkt_type
7685{
7686	uint64_t u64;
7687	struct cvmx_gmxx_tx_pause_pkt_type_s
7688	{
7689#if __BYTE_ORDER == __BIG_ENDIAN
7690	uint64_t reserved_16_63               : 48;
7691	uint64_t type                         : 16; /**< The TYPE field placed is outbnd pause pkts */
7692#else
7693	uint64_t type                         : 16;
7694	uint64_t reserved_16_63               : 48;
7695#endif
7696	} s;
7697	struct cvmx_gmxx_tx_pause_pkt_type_s  cn30xx;
7698	struct cvmx_gmxx_tx_pause_pkt_type_s  cn31xx;
7699	struct cvmx_gmxx_tx_pause_pkt_type_s  cn38xx;
7700	struct cvmx_gmxx_tx_pause_pkt_type_s  cn38xxp2;
7701	struct cvmx_gmxx_tx_pause_pkt_type_s  cn50xx;
7702	struct cvmx_gmxx_tx_pause_pkt_type_s  cn52xx;
7703	struct cvmx_gmxx_tx_pause_pkt_type_s  cn52xxp1;
7704	struct cvmx_gmxx_tx_pause_pkt_type_s  cn56xx;
7705	struct cvmx_gmxx_tx_pause_pkt_type_s  cn56xxp1;
7706	struct cvmx_gmxx_tx_pause_pkt_type_s  cn58xx;
7707	struct cvmx_gmxx_tx_pause_pkt_type_s  cn58xxp1;
7708	struct cvmx_gmxx_tx_pause_pkt_type_s  cn63xx;
7709	struct cvmx_gmxx_tx_pause_pkt_type_s  cn63xxp1;
7710};
7711typedef union cvmx_gmxx_tx_pause_pkt_type cvmx_gmxx_tx_pause_pkt_type_t;
7712
7713/**
7714 * cvmx_gmx#_tx_prts
7715 *
7716 * Common
7717 *
7718 *
7719 * GMX_TX_PRTS = TX Ports
7720 *
7721 * Notes:
7722 * * The value programmed for PRTS is the number of the highest architected
7723 * port number on the interface, plus 1.  For example, if port 2 is the
7724 * highest architected port, then the programmed value should be 3 since
7725 * there are 3 ports in the system - 0, 1, and 2.
7726 */
7727union cvmx_gmxx_tx_prts
7728{
7729	uint64_t u64;
7730	struct cvmx_gmxx_tx_prts_s
7731	{
7732#if __BYTE_ORDER == __BIG_ENDIAN
7733	uint64_t reserved_5_63                : 59;
7734	uint64_t prts                         : 5;  /**< Number of ports allowed on the interface
7735                                                         (SGMII/1000Base-X only) */
7736#else
7737	uint64_t prts                         : 5;
7738	uint64_t reserved_5_63                : 59;
7739#endif
7740	} s;
7741	struct cvmx_gmxx_tx_prts_s            cn30xx;
7742	struct cvmx_gmxx_tx_prts_s            cn31xx;
7743	struct cvmx_gmxx_tx_prts_s            cn38xx;
7744	struct cvmx_gmxx_tx_prts_s            cn38xxp2;
7745	struct cvmx_gmxx_tx_prts_s            cn50xx;
7746	struct cvmx_gmxx_tx_prts_s            cn52xx;
7747	struct cvmx_gmxx_tx_prts_s            cn52xxp1;
7748	struct cvmx_gmxx_tx_prts_s            cn56xx;
7749	struct cvmx_gmxx_tx_prts_s            cn56xxp1;
7750	struct cvmx_gmxx_tx_prts_s            cn58xx;
7751	struct cvmx_gmxx_tx_prts_s            cn58xxp1;
7752	struct cvmx_gmxx_tx_prts_s            cn63xx;
7753	struct cvmx_gmxx_tx_prts_s            cn63xxp1;
7754};
7755typedef union cvmx_gmxx_tx_prts cvmx_gmxx_tx_prts_t;
7756
7757/**
7758 * cvmx_gmx#_tx_spi_ctl
7759 *
7760 * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4
7761 *
7762 */
7763union cvmx_gmxx_tx_spi_ctl
7764{
7765	uint64_t u64;
7766	struct cvmx_gmxx_tx_spi_ctl_s
7767	{
7768#if __BYTE_ORDER == __BIG_ENDIAN
7769	uint64_t reserved_2_63                : 62;
7770	uint64_t tpa_clr                      : 1;  /**< TPA Clear Mode
7771                                                         Clear credit counter when satisifed status */
7772	uint64_t cont_pkt                     : 1;  /**< Contiguous Packet Mode
7773                                                         Finish one packet before switching to another
7774                                                         Cannot be set in Spi4 pass-through mode */
7775#else
7776	uint64_t cont_pkt                     : 1;
7777	uint64_t tpa_clr                      : 1;
7778	uint64_t reserved_2_63                : 62;
7779#endif
7780	} s;
7781	struct cvmx_gmxx_tx_spi_ctl_s         cn38xx;
7782	struct cvmx_gmxx_tx_spi_ctl_s         cn38xxp2;
7783	struct cvmx_gmxx_tx_spi_ctl_s         cn58xx;
7784	struct cvmx_gmxx_tx_spi_ctl_s         cn58xxp1;
7785};
7786typedef union cvmx_gmxx_tx_spi_ctl cvmx_gmxx_tx_spi_ctl_t;
7787
7788/**
7789 * cvmx_gmx#_tx_spi_drain
7790 *
7791 * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO
7792 *
7793 */
7794union cvmx_gmxx_tx_spi_drain
7795{
7796	uint64_t u64;
7797	struct cvmx_gmxx_tx_spi_drain_s
7798	{
7799#if __BYTE_ORDER == __BIG_ENDIAN
7800	uint64_t reserved_16_63               : 48;
7801	uint64_t drain                        : 16; /**< Per port drain control
7802                                                         0=Normal operation
7803                                                         1=GMX TX will be popped, but no valid data will
7804                                                           be sent to SPX.  Credits are correctly returned
7805                                                           to PKO.  STX_IGN_CAL should be set to ignore
7806                                                           TPA and not stall due to back-pressure.
7807                                                         (PASS3 only) */
7808#else
7809	uint64_t drain                        : 16;
7810	uint64_t reserved_16_63               : 48;
7811#endif
7812	} s;
7813	struct cvmx_gmxx_tx_spi_drain_s       cn38xx;
7814	struct cvmx_gmxx_tx_spi_drain_s       cn58xx;
7815	struct cvmx_gmxx_tx_spi_drain_s       cn58xxp1;
7816};
7817typedef union cvmx_gmxx_tx_spi_drain cvmx_gmxx_tx_spi_drain_t;
7818
7819/**
7820 * cvmx_gmx#_tx_spi_max
7821 *
7822 * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX
7823 *
7824 */
7825union cvmx_gmxx_tx_spi_max
7826{
7827	uint64_t u64;
7828	struct cvmx_gmxx_tx_spi_max_s
7829	{
7830#if __BYTE_ORDER == __BIG_ENDIAN
7831	uint64_t reserved_23_63               : 41;
7832	uint64_t slice                        : 7;  /**< Number of 16B blocks to transmit in a burst before
7833                                                         switching to the next port. SLICE does not always
7834                                                         limit the burst length transmitted by OCTEON.
7835                                                         Depending on the traffic pattern and
7836                                                         GMX_TX_SPI_ROUND programming, the next port could
7837                                                         be the same as the current port. In this case,
7838                                                         OCTEON may merge multiple sub-SLICE bursts into
7839                                                         one contiguous burst that is longer than SLICE
7840                                                         (as long as the burst does not cross a packet
7841                                                         boundary).
7842                                                         SLICE must be programmed to be >=
7843                                                           GMX_TX_SPI_THRESH[THRESH]
7844                                                         If SLICE==0, then the transmitter will tend to
7845                                                         send the complete packet. The port will only
7846                                                         switch if credits are exhausted or PKO cannot
7847                                                         keep up.
7848                                                         (90nm ONLY) */
7849	uint64_t max2                         : 8;  /**< MAX2 (per Spi4.2 spec) */
7850	uint64_t max1                         : 8;  /**< MAX1 (per Spi4.2 spec)
7851                                                         MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
7852#else
7853	uint64_t max1                         : 8;
7854	uint64_t max2                         : 8;
7855	uint64_t slice                        : 7;
7856	uint64_t reserved_23_63               : 41;
7857#endif
7858	} s;
7859	struct cvmx_gmxx_tx_spi_max_cn38xx
7860	{
7861#if __BYTE_ORDER == __BIG_ENDIAN
7862	uint64_t reserved_16_63               : 48;
7863	uint64_t max2                         : 8;  /**< MAX2 (per Spi4.2 spec) */
7864	uint64_t max1                         : 8;  /**< MAX1 (per Spi4.2 spec)
7865                                                         MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
7866#else
7867	uint64_t max1                         : 8;
7868	uint64_t max2                         : 8;
7869	uint64_t reserved_16_63               : 48;
7870#endif
7871	} cn38xx;
7872	struct cvmx_gmxx_tx_spi_max_cn38xx    cn38xxp2;
7873	struct cvmx_gmxx_tx_spi_max_s         cn58xx;
7874	struct cvmx_gmxx_tx_spi_max_s         cn58xxp1;
7875};
7876typedef union cvmx_gmxx_tx_spi_max cvmx_gmxx_tx_spi_max_t;
7877
7878/**
7879 * cvmx_gmx#_tx_spi_round#
7880 *
7881 * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration
7882 *
7883 */
7884union cvmx_gmxx_tx_spi_roundx
7885{
7886	uint64_t u64;
7887	struct cvmx_gmxx_tx_spi_roundx_s
7888	{
7889#if __BYTE_ORDER == __BIG_ENDIAN
7890	uint64_t reserved_16_63               : 48;
7891	uint64_t round                        : 16; /**< Which Spi ports participate in each arbitration
7892                                                          round.  Each bit corresponds to a spi port
7893                                                         - 0: this port will arb in this round
7894                                                         - 1: this port will not arb in this round
7895                                                          (90nm ONLY) */
7896#else
7897	uint64_t round                        : 16;
7898	uint64_t reserved_16_63               : 48;
7899#endif
7900	} s;
7901	struct cvmx_gmxx_tx_spi_roundx_s      cn58xx;
7902	struct cvmx_gmxx_tx_spi_roundx_s      cn58xxp1;
7903};
7904typedef union cvmx_gmxx_tx_spi_roundx cvmx_gmxx_tx_spi_roundx_t;
7905
7906/**
7907 * cvmx_gmx#_tx_spi_thresh
7908 *
7909 * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold
7910 *
7911 *
7912 * Notes:
7913 * Note: zero will map to 0x20
7914 *
7915 * This will normally creates Spi4 traffic bursts at least THRESH in length.
7916 * If dclk > eclk, then this rule may not always hold and Octeon may split
7917 * transfers into smaller bursts - some of which could be as short as 16B.
7918 * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is
7919 * not a multiple of 16B.
7920 */
7921union cvmx_gmxx_tx_spi_thresh
7922{
7923	uint64_t u64;
7924	struct cvmx_gmxx_tx_spi_thresh_s
7925	{
7926#if __BYTE_ORDER == __BIG_ENDIAN
7927	uint64_t reserved_6_63                : 58;
7928	uint64_t thresh                       : 6;  /**< Transmit threshold in 16B blocks - cannot be zero
7929                                                         THRESH <= TX_FIFO size   (in non-passthrough mode)
7930                                                         THRESH <= TX_FIFO size-2 (in passthrough mode)
7931                                                         THRESH <= GMX_TX_SPI_MAX[MAX1]
7932                                                         THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it
7933                                                          possible for Octeon to send a Spi4 data burst of
7934                                                          MAX2 <= burst <= THRESH 16B ticks
7935                                                         GMX_TX_SPI_MAX[SLICE] must be programmed to be >=
7936                                                           THRESH */
7937#else
7938	uint64_t thresh                       : 6;
7939	uint64_t reserved_6_63                : 58;
7940#endif
7941	} s;
7942	struct cvmx_gmxx_tx_spi_thresh_s      cn38xx;
7943	struct cvmx_gmxx_tx_spi_thresh_s      cn38xxp2;
7944	struct cvmx_gmxx_tx_spi_thresh_s      cn58xx;
7945	struct cvmx_gmxx_tx_spi_thresh_s      cn58xxp1;
7946};
7947typedef union cvmx_gmxx_tx_spi_thresh cvmx_gmxx_tx_spi_thresh_t;
7948
7949/**
7950 * cvmx_gmx#_tx_xaui_ctl
7951 */
7952union cvmx_gmxx_tx_xaui_ctl
7953{
7954	uint64_t u64;
7955	struct cvmx_gmxx_tx_xaui_ctl_s
7956	{
7957#if __BYTE_ORDER == __BIG_ENDIAN
7958	uint64_t reserved_11_63               : 53;
7959	uint64_t hg_pause_hgi                 : 2;  /**< HGI Field for HW generated HiGig pause packets
7960                                                         (XAUI mode only) */
7961	uint64_t hg_en                        : 1;  /**< Enable HiGig Mode
7962                                                         When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=12
7963                                                          the interface is in HiGig/HiGig+ mode and the
7964                                                          following must be set:
7965                                                          GMX_RX_FRM_CTL[PRE_CHK] == 0
7966                                                          GMX_RX_UDD_SKP[FCSSEL] == 0
7967                                                          GMX_RX_UDD_SKP[SKIP] == 12
7968                                                          GMX_TX_APPEND[PREAMBLE] == 0
7969                                                         When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=16
7970                                                          the interface is in HiGig2 mode and the
7971                                                          following must be set:
7972                                                          GMX_RX_FRM_CTL[PRE_CHK] == 0
7973                                                          GMX_RX_UDD_SKP[FCSSEL] == 0
7974                                                          GMX_RX_UDD_SKP[SKIP] == 16
7975                                                          GMX_TX_APPEND[PREAMBLE] == 0
7976                                                          GMX_PRT0_CBFC_CTL[RX_EN] == 0
7977                                                          GMX_PRT0_CBFC_CTL[TX_EN] == 0
7978                                                         (XAUI mode only) */
7979	uint64_t reserved_7_7                 : 1;
7980	uint64_t ls_byp                       : 1;  /**< Bypass the link status as determined by the XGMII
7981                                                         receiver and set the link status of the
7982                                                         transmitter to LS.
7983                                                         (XAUI mode only) */
7984	uint64_t ls                           : 2;  /**< Link Status
7985                                                         0 = Link Ok
7986                                                             Link runs normally. RS passes MAC data to PCS
7987                                                         1 = Local Fault
7988                                                             RS layer sends continuous remote fault
7989                                                              sequences.
7990                                                         2 = Remote Fault
7991                                                             RS layer sends continuous idles sequences
7992                                                         3 = Link Drain
7993                                                             RS layer drops full packets to allow GMX and
7994                                                              PKO to drain their FIFOs
7995                                                         (XAUI mode only) */
7996	uint64_t reserved_2_3                 : 2;
7997	uint64_t uni_en                       : 1;  /**< Enable Unidirectional Mode (IEEE Clause 66)
7998                                                         (XAUI mode only) */
7999	uint64_t dic_en                       : 1;  /**< Enable the deficit idle counter for IFG averaging
8000                                                         (XAUI mode only) */
8001#else
8002	uint64_t dic_en                       : 1;
8003	uint64_t uni_en                       : 1;
8004	uint64_t reserved_2_3                 : 2;
8005	uint64_t ls                           : 2;
8006	uint64_t ls_byp                       : 1;
8007	uint64_t reserved_7_7                 : 1;
8008	uint64_t hg_en                        : 1;
8009	uint64_t hg_pause_hgi                 : 2;
8010	uint64_t reserved_11_63               : 53;
8011#endif
8012	} s;
8013	struct cvmx_gmxx_tx_xaui_ctl_s        cn52xx;
8014	struct cvmx_gmxx_tx_xaui_ctl_s        cn52xxp1;
8015	struct cvmx_gmxx_tx_xaui_ctl_s        cn56xx;
8016	struct cvmx_gmxx_tx_xaui_ctl_s        cn56xxp1;
8017	struct cvmx_gmxx_tx_xaui_ctl_s        cn63xx;
8018	struct cvmx_gmxx_tx_xaui_ctl_s        cn63xxp1;
8019};
8020typedef union cvmx_gmxx_tx_xaui_ctl cvmx_gmxx_tx_xaui_ctl_t;
8021
8022/**
8023 * cvmx_gmx#_xaui_ext_loopback
8024 */
8025union cvmx_gmxx_xaui_ext_loopback
8026{
8027	uint64_t u64;
8028	struct cvmx_gmxx_xaui_ext_loopback_s
8029	{
8030#if __BYTE_ORDER == __BIG_ENDIAN
8031	uint64_t reserved_5_63                : 59;
8032	uint64_t en                           : 1;  /**< Loopback enable
8033                                                         Puts the packet interface in external loopback
8034                                                         mode on the XAUI bus in which the RX lines are
8035                                                         reflected on the TX lines.
8036                                                         (XAUI mode only) */
8037	uint64_t thresh                       : 4;  /**< Threshhold on the TX FIFO
8038                                                         SW must only write the typical value.  Any other
8039                                                         value will cause loopback mode not to function
8040                                                         correctly.
8041                                                         (XAUI mode only) */
8042#else
8043	uint64_t thresh                       : 4;
8044	uint64_t en                           : 1;
8045	uint64_t reserved_5_63                : 59;
8046#endif
8047	} s;
8048	struct cvmx_gmxx_xaui_ext_loopback_s  cn52xx;
8049	struct cvmx_gmxx_xaui_ext_loopback_s  cn52xxp1;
8050	struct cvmx_gmxx_xaui_ext_loopback_s  cn56xx;
8051	struct cvmx_gmxx_xaui_ext_loopback_s  cn56xxp1;
8052	struct cvmx_gmxx_xaui_ext_loopback_s  cn63xx;
8053	struct cvmx_gmxx_xaui_ext_loopback_s  cn63xxp1;
8054};
8055typedef union cvmx_gmxx_xaui_ext_loopback cvmx_gmxx_xaui_ext_loopback_t;
8056
8057#endif
8058