cvmx-agl-defs.h revision 215976
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * cvmx-agl-defs.h
43 *
44 * Configuration and status register (CSR) type definitions for
45 * Octeon agl.
46 *
47 * This file is auto generated. Do not edit.
48 *
49 * <hr>$Revision$<hr>
50 *
51 */
52#ifndef __CVMX_AGL_TYPEDEFS_H__
53#define __CVMX_AGL_TYPEDEFS_H__
54
55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56#define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
57static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
58{
59	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
60		cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
61	return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
62}
63#else
64#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
65#endif
66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67#define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
68static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
69{
70	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
71		cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
72	return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
73}
74#else
75#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
76#endif
77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78#define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
79static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
80{
81	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
82		cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
83	return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
84}
85#else
86#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
87#endif
88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89#define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
90static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
91{
92	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
93		cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
94	return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
95}
96#else
97#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
98#endif
99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
101{
102	if (!(
103	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
104	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
105	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
106		cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
107	return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048;
108}
109#else
110#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
111#endif
112#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
113static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
114{
115	if (!(
116	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
117	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
118	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
119		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
120	return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048;
121}
122#else
123#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
124#endif
125#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
126static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
127{
128	if (!(
129	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
130	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
131	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
132		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
133	return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048;
134}
135#else
136#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
137#endif
138#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
139static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
140{
141	if (!(
142	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
143	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
144	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
145		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
146	return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048;
147}
148#else
149#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
150#endif
151#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
152static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
153{
154	if (!(
155	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
156	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
157	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
158		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
159	return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048;
160}
161#else
162#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
163#endif
164#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
165static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
166{
167	if (!(
168	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
169	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
170	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
171		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
172	return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048;
173}
174#else
175#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
176#endif
177#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
178static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
179{
180	if (!(
181	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
182	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
183	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
184		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
185	return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048;
186}
187#else
188#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
189#endif
190#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
191static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
192{
193	if (!(
194	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
195	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
196	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
197		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
198	return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048;
199}
200#else
201#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
202#endif
203#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
204static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
205{
206	if (!(
207	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
208	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
209	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
210		cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
211	return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048;
212}
213#else
214#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
215#endif
216#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
217static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
218{
219	if (!(
220	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
221	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
222	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
223		cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
224	return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048;
225}
226#else
227#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
228#endif
229#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
230static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
231{
232	if (!(
233	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
234	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
235	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
236		cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
237	return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048;
238}
239#else
240#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
241#endif
242#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
244{
245	if (!(
246	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
247	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
248	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
249		cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
250	return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048;
251}
252#else
253#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
254#endif
255#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
256static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
257{
258	if (!(
259	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
260	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
261	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
262		cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
263	return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048;
264}
265#else
266#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
267#endif
268#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
269static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
270{
271	if (!(
272	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
273	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
274	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
275		cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
276	return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048;
277}
278#else
279#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
280#endif
281#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
282static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
283{
284	if (!(
285	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
286	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
287	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
288		cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
289	return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048;
290}
291#else
292#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
293#endif
294#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
295static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
296{
297	if (!(
298	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
299	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
300	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
301		cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
302	return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048;
303}
304#else
305#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
306#endif
307#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
308static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
309{
310	if (!(
311	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
312	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
313	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
314		cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
315	return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048;
316}
317#else
318#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
319#endif
320#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
321static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
322{
323	if (!(
324	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
325	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
326	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
327		cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
328	return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048;
329}
330#else
331#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
332#endif
333#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
334static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
335{
336	if (!(
337	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
338	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
339	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
340		cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
341	return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048;
342}
343#else
344#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
345#endif
346#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
347static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset)
348{
349	if (!(
350	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
351		cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset);
352	return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048;
353}
354#else
355#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
356#endif
357#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
358static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
359{
360	if (!(
361	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
362	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
363	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
364		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
365	return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048;
366}
367#else
368#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
369#endif
370#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
371static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
372{
373	if (!(
374	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
375	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
376	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
377		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
378	return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048;
379}
380#else
381#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
382#endif
383#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
384static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
385{
386	if (!(
387	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
388	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
389	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
390		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
391	return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048;
392}
393#else
394#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
395#endif
396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
398{
399	if (!(
400	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
401	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
402	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
403		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
404	return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048;
405}
406#else
407#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
408#endif
409#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
410static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
411{
412	if (!(
413	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
414	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
415	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
416		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
417	return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048;
418}
419#else
420#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
421#endif
422#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
423static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
424{
425	if (!(
426	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
427	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
428	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
429		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
430	return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048;
431}
432#else
433#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
434#endif
435#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
436static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
437{
438	if (!(
439	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
440	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
441	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
442		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
443	return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048;
444}
445#else
446#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
447#endif
448#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
449static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
450{
451	if (!(
452	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
453	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
454	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
455		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
456	return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048;
457}
458#else
459#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
460#endif
461#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
462static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
463{
464	if (!(
465	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
466	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
467	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
468		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
469	return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048;
470}
471#else
472#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
473#endif
474#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
475static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
476{
477	if (!(
478	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
479	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
480	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
481		cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
482	return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048;
483}
484#else
485#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
486#endif
487#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
488static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
489{
490	if (!(
491	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
492	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
493	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
494		cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
495	return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048;
496}
497#else
498#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
499#endif
500#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
501static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
502{
503	if (!(
504	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
505	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
506	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
507		cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
508	return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8;
509}
510#else
511#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
512#endif
513#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
514static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
515{
516	if (!(
517	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
518	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
519	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
520		cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
521	return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8;
522}
523#else
524#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
525#endif
526#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
527static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
528{
529	if (!(
530	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
531	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
532	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
533		cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
534	return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8;
535}
536#else
537#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
538#endif
539#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
540#define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
541static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
542{
543	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
544		cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
545	return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
546}
547#else
548#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
549#endif
550#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
551#define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
552static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
553{
554	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
555		cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
556	return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
557}
558#else
559#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
560#endif
561#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
562static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
563{
564	if (!(
565	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
566	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
567	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
568		cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
569	return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048;
570}
571#else
572#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
573#endif
574#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
575#define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
576static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
577{
578	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
579		cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
580	return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
581}
582#else
583#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
584#endif
585#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
586static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
587{
588	if (!(
589	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
590	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
591	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
592		cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
593	return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048;
594}
595#else
596#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
597#endif
598#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
599static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset)
600{
601	if (!(
602	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
603		cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset);
604	return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048;
605}
606#else
607#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
608#endif
609#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
610static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
611{
612	if (!(
613	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
614	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
615	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
616		cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
617	return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048;
618}
619#else
620#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
621#endif
622#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
623static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
624{
625	if (!(
626	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
627	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
628	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
629		cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
630	return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048;
631}
632#else
633#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
634#endif
635#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
636static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
637{
638	if (!(
639	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
640	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
641	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
642		cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
643	return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048;
644}
645#else
646#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
647#endif
648#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
649static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
650{
651	if (!(
652	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
653	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
654	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
655		cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
656	return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048;
657}
658#else
659#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
660#endif
661#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
662static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
663{
664	if (!(
665	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
666	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
667	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
668		cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
669	return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048;
670}
671#else
672#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
673#endif
674#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
675static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
676{
677	if (!(
678	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
679	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
680	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
681		cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
682	return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048;
683}
684#else
685#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
686#endif
687#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
688static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
689{
690	if (!(
691	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
692	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
693	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
694		cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
695	return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048;
696}
697#else
698#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
699#endif
700#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
701static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
702{
703	if (!(
704	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
705	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
706	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
707		cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
708	return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048;
709}
710#else
711#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
712#endif
713#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
714static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
715{
716	if (!(
717	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
718	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
719	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
720		cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
721	return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048;
722}
723#else
724#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
725#endif
726#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
727static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
728{
729	if (!(
730	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
731	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
732	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
733		cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
734	return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048;
735}
736#else
737#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
738#endif
739#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
740static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
741{
742	if (!(
743	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
744	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
745	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
746		cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
747	return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048;
748}
749#else
750#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
751#endif
752#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
753static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
754{
755	if (!(
756	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
757	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
758	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
759		cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
760	return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048;
761}
762#else
763#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
764#endif
765#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
766static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
767{
768	if (!(
769	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
770	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
771	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
772		cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
773	return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048;
774}
775#else
776#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
777#endif
778#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
779static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
780{
781	if (!(
782	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
783	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
784	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
785		cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
786	return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048;
787}
788#else
789#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
790#endif
791#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
792static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
793{
794	if (!(
795	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
796	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
797	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
798		cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
799	return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048;
800}
801#else
802#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
803#endif
804#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
805static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
806{
807	if (!(
808	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
809	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
810	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
811		cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
812	return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048;
813}
814#else
815#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
816#endif
817#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
818static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
819{
820	if (!(
821	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
822	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
823	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
824		cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
825	return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048;
826}
827#else
828#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
829#endif
830#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
831static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
832{
833	if (!(
834	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
835	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
836	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
837		cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
838	return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048;
839}
840#else
841#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
842#endif
843#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
844static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
845{
846	if (!(
847	      (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
848	      (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
849	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
850		cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
851	return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048;
852}
853#else
854#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
855#endif
856#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
857#define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
858static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
859{
860	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
861		cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
862	return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
863}
864#else
865#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
866#endif
867#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
868#define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
869static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
870{
871	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
872		cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
873	return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
874}
875#else
876#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
877#endif
878#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
879#define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
880static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
881{
882	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
883		cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
884	return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
885}
886#else
887#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
888#endif
889#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
890#define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
891static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
892{
893	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
894		cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
895	return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
896}
897#else
898#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
899#endif
900#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
901#define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
902static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
903{
904	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
905		cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
906	return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
907}
908#else
909#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
910#endif
911#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
912#define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
913static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
914{
915	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
916		cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
917	return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
918}
919#else
920#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
921#endif
922#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
923#define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
924static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
925{
926	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
927		cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
928	return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
929}
930#else
931#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
932#endif
933#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
934#define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
935static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
936{
937	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
938		cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
939	return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
940}
941#else
942#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
943#endif
944#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
945#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
946static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
947{
948	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
949		cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
950	return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
951}
952#else
953#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
954#endif
955#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
956#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
957static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
958{
959	if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
960		cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
961	return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
962}
963#else
964#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
965#endif
966#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
967static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset)
968{
969	if (!(
970	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
971		cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset);
972	return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8;
973}
974#else
975#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
976#endif
977
978/**
979 * cvmx_agl_gmx_bad_reg
980 *
981 * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
982 *
983 *
984 * Notes:
985 * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH    will be reset when MIX0_CTL[RESET] is set to 1.
986 * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
987 * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1.
988 */
989union cvmx_agl_gmx_bad_reg
990{
991	uint64_t u64;
992	struct cvmx_agl_gmx_bad_reg_s
993	{
994#if __BYTE_ORDER == __BIG_ENDIAN
995	uint64_t reserved_38_63               : 26;
996	uint64_t txpsh1                       : 1;  /**< TX FIFO overflow (MII1) */
997	uint64_t txpop1                       : 1;  /**< TX FIFO underflow (MII1) */
998	uint64_t ovrflw1                      : 1;  /**< RX FIFO overflow (MII1) */
999	uint64_t txpsh                        : 1;  /**< TX FIFO overflow (MII0) */
1000	uint64_t txpop                        : 1;  /**< TX FIFO underflow (MII0) */
1001	uint64_t ovrflw                       : 1;  /**< RX FIFO overflow (MII0) */
1002	uint64_t reserved_27_31               : 5;
1003	uint64_t statovr                      : 1;  /**< TX Statistics overflow */
1004	uint64_t reserved_24_25               : 2;
1005	uint64_t loststat                     : 2;  /**< TX Statistics data was over-written
1006                                                         In MII/RGMII, one bit per port
1007                                                         TX Stats are corrupted */
1008	uint64_t reserved_4_21                : 18;
1009	uint64_t out_ovr                      : 2;  /**< Outbound data FIFO overflow */
1010	uint64_t reserved_0_1                 : 2;
1011#else
1012	uint64_t reserved_0_1                 : 2;
1013	uint64_t out_ovr                      : 2;
1014	uint64_t reserved_4_21                : 18;
1015	uint64_t loststat                     : 2;
1016	uint64_t reserved_24_25               : 2;
1017	uint64_t statovr                      : 1;
1018	uint64_t reserved_27_31               : 5;
1019	uint64_t ovrflw                       : 1;
1020	uint64_t txpop                        : 1;
1021	uint64_t txpsh                        : 1;
1022	uint64_t ovrflw1                      : 1;
1023	uint64_t txpop1                       : 1;
1024	uint64_t txpsh1                       : 1;
1025	uint64_t reserved_38_63               : 26;
1026#endif
1027	} s;
1028	struct cvmx_agl_gmx_bad_reg_cn52xx
1029	{
1030#if __BYTE_ORDER == __BIG_ENDIAN
1031	uint64_t reserved_38_63               : 26;
1032	uint64_t txpsh1                       : 1;  /**< TX FIFO overflow (MII1) */
1033	uint64_t txpop1                       : 1;  /**< TX FIFO underflow (MII1) */
1034	uint64_t ovrflw1                      : 1;  /**< RX FIFO overflow (MII1) */
1035	uint64_t txpsh                        : 1;  /**< TX FIFO overflow (MII0) */
1036	uint64_t txpop                        : 1;  /**< TX FIFO underflow (MII0) */
1037	uint64_t ovrflw                       : 1;  /**< RX FIFO overflow (MII0) */
1038	uint64_t reserved_27_31               : 5;
1039	uint64_t statovr                      : 1;  /**< TX Statistics overflow */
1040	uint64_t reserved_23_25               : 3;
1041	uint64_t loststat                     : 1;  /**< TX Statistics data was over-written
1042                                                         TX Stats are corrupted */
1043	uint64_t reserved_4_21                : 18;
1044	uint64_t out_ovr                      : 2;  /**< Outbound data FIFO overflow */
1045	uint64_t reserved_0_1                 : 2;
1046#else
1047	uint64_t reserved_0_1                 : 2;
1048	uint64_t out_ovr                      : 2;
1049	uint64_t reserved_4_21                : 18;
1050	uint64_t loststat                     : 1;
1051	uint64_t reserved_23_25               : 3;
1052	uint64_t statovr                      : 1;
1053	uint64_t reserved_27_31               : 5;
1054	uint64_t ovrflw                       : 1;
1055	uint64_t txpop                        : 1;
1056	uint64_t txpsh                        : 1;
1057	uint64_t ovrflw1                      : 1;
1058	uint64_t txpop1                       : 1;
1059	uint64_t txpsh1                       : 1;
1060	uint64_t reserved_38_63               : 26;
1061#endif
1062	} cn52xx;
1063	struct cvmx_agl_gmx_bad_reg_cn52xx    cn52xxp1;
1064	struct cvmx_agl_gmx_bad_reg_cn56xx
1065	{
1066#if __BYTE_ORDER == __BIG_ENDIAN
1067	uint64_t reserved_35_63               : 29;
1068	uint64_t txpsh                        : 1;  /**< TX FIFO overflow */
1069	uint64_t txpop                        : 1;  /**< TX FIFO underflow */
1070	uint64_t ovrflw                       : 1;  /**< RX FIFO overflow */
1071	uint64_t reserved_27_31               : 5;
1072	uint64_t statovr                      : 1;  /**< TX Statistics overflow */
1073	uint64_t reserved_23_25               : 3;
1074	uint64_t loststat                     : 1;  /**< TX Statistics data was over-written
1075                                                         TX Stats are corrupted */
1076	uint64_t reserved_3_21                : 19;
1077	uint64_t out_ovr                      : 1;  /**< Outbound data FIFO overflow */
1078	uint64_t reserved_0_1                 : 2;
1079#else
1080	uint64_t reserved_0_1                 : 2;
1081	uint64_t out_ovr                      : 1;
1082	uint64_t reserved_3_21                : 19;
1083	uint64_t loststat                     : 1;
1084	uint64_t reserved_23_25               : 3;
1085	uint64_t statovr                      : 1;
1086	uint64_t reserved_27_31               : 5;
1087	uint64_t ovrflw                       : 1;
1088	uint64_t txpop                        : 1;
1089	uint64_t txpsh                        : 1;
1090	uint64_t reserved_35_63               : 29;
1091#endif
1092	} cn56xx;
1093	struct cvmx_agl_gmx_bad_reg_cn56xx    cn56xxp1;
1094	struct cvmx_agl_gmx_bad_reg_s         cn63xx;
1095	struct cvmx_agl_gmx_bad_reg_s         cn63xxp1;
1096};
1097typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t;
1098
1099/**
1100 * cvmx_agl_gmx_bist
1101 *
1102 * AGL_GMX_BIST = GMX BIST Results
1103 *
1104 *
1105 * Notes:
1106 * Not reset when MIX*_CTL[RESET] is set to 1.
1107 *
1108 */
1109union cvmx_agl_gmx_bist
1110{
1111	uint64_t u64;
1112	struct cvmx_agl_gmx_bist_s
1113	{
1114#if __BYTE_ORDER == __BIG_ENDIAN
1115	uint64_t reserved_25_63               : 39;
1116	uint64_t status                       : 25; /**< BIST Results.
1117                                                         HW sets a bit in BIST for for memory that fails
1118                                                         - 0: gmx#.inb.fif_bnk0
1119                                                         - 1: gmx#.inb.fif_bnk1
1120                                                         - 2: gmx#.inb.fif_bnk2
1121                                                         - 3: gmx#.inb.fif_bnk3
1122                                                         - 4: gmx#.inb.fif_bnk_ext0
1123                                                         - 5: gmx#.inb.fif_bnk_ext1
1124                                                         - 6: gmx#.inb.fif_bnk_ext2
1125                                                         - 7: gmx#.inb.fif_bnk_ext3
1126                                                         - 8: gmx#.outb.fif.fif_bnk0
1127                                                         - 9: gmx#.outb.fif.fif_bnk1
1128                                                         - 10: RAZ
1129                                                         - 11: RAZ
1130                                                         - 12: gmx#.outb.fif.fif_bnk_ext0
1131                                                         - 13: gmx#.outb.fif.fif_bnk_ext1
1132                                                         - 14: RAZ
1133                                                         - 15: RAZ
1134                                                         - 16: gmx#.csr.gmi0.srf8x64m1_bist
1135                                                         - 17: gmx#.csr.gmi1.srf8x64m1_bist
1136                                                         - 18: RAZ
1137                                                         - 19: RAZ
1138                                                         - 20: gmx#.csr.drf20x32m2_bist
1139                                                         - 21: gmx#.csr.drf20x48m2_bist
1140                                                         - 22: gmx#.outb.stat.drf16x27m1_bist
1141                                                         - 23: gmx#.outb.stat.drf40x64m1_bist
1142                                                         - 24: RAZ */
1143#else
1144	uint64_t status                       : 25;
1145	uint64_t reserved_25_63               : 39;
1146#endif
1147	} s;
1148	struct cvmx_agl_gmx_bist_cn52xx
1149	{
1150#if __BYTE_ORDER == __BIG_ENDIAN
1151	uint64_t reserved_10_63               : 54;
1152	uint64_t status                       : 10; /**< BIST Results.
1153                                                          HW sets a bit in BIST for for memory that fails
1154                                                         - 0: gmx#.inb.drf128x78m1_bist
1155                                                         - 1: gmx#.outb.fif.drf128x71m1_bist
1156                                                         - 2: gmx#.csr.gmi0.srf8x64m1_bist
1157                                                         - 3: gmx#.csr.gmi1.srf8x64m1_bist
1158                                                         - 4: 0
1159                                                         - 5: 0
1160                                                         - 6: gmx#.csr.drf20x80m1_bist
1161                                                         - 7: gmx#.outb.stat.drf16x27m1_bist
1162                                                         - 8: gmx#.outb.stat.drf40x64m1_bist
1163                                                         - 9: 0 */
1164#else
1165	uint64_t status                       : 10;
1166	uint64_t reserved_10_63               : 54;
1167#endif
1168	} cn52xx;
1169	struct cvmx_agl_gmx_bist_cn52xx       cn52xxp1;
1170	struct cvmx_agl_gmx_bist_cn52xx       cn56xx;
1171	struct cvmx_agl_gmx_bist_cn52xx       cn56xxp1;
1172	struct cvmx_agl_gmx_bist_s            cn63xx;
1173	struct cvmx_agl_gmx_bist_s            cn63xxp1;
1174};
1175typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t;
1176
1177/**
1178 * cvmx_agl_gmx_drv_ctl
1179 *
1180 * AGL_GMX_DRV_CTL = GMX Drive Control
1181 *
1182 *
1183 * Notes:
1184 * NCTL, PCTL, BYP_EN    will be reset when MIX0_CTL[RESET] is set to 1.
1185 * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
1186 */
1187union cvmx_agl_gmx_drv_ctl
1188{
1189	uint64_t u64;
1190	struct cvmx_agl_gmx_drv_ctl_s
1191	{
1192#if __BYTE_ORDER == __BIG_ENDIAN
1193	uint64_t reserved_49_63               : 15;
1194	uint64_t byp_en1                      : 1;  /**< Compensation Controller Bypass Enable (MII1) */
1195	uint64_t reserved_45_47               : 3;
1196	uint64_t pctl1                        : 5;  /**< AGL PCTL (MII1) */
1197	uint64_t reserved_37_39               : 3;
1198	uint64_t nctl1                        : 5;  /**< AGL NCTL (MII1) */
1199	uint64_t reserved_17_31               : 15;
1200	uint64_t byp_en                       : 1;  /**< Compensation Controller Bypass Enable */
1201	uint64_t reserved_13_15               : 3;
1202	uint64_t pctl                         : 5;  /**< AGL PCTL */
1203	uint64_t reserved_5_7                 : 3;
1204	uint64_t nctl                         : 5;  /**< AGL NCTL */
1205#else
1206	uint64_t nctl                         : 5;
1207	uint64_t reserved_5_7                 : 3;
1208	uint64_t pctl                         : 5;
1209	uint64_t reserved_13_15               : 3;
1210	uint64_t byp_en                       : 1;
1211	uint64_t reserved_17_31               : 15;
1212	uint64_t nctl1                        : 5;
1213	uint64_t reserved_37_39               : 3;
1214	uint64_t pctl1                        : 5;
1215	uint64_t reserved_45_47               : 3;
1216	uint64_t byp_en1                      : 1;
1217	uint64_t reserved_49_63               : 15;
1218#endif
1219	} s;
1220	struct cvmx_agl_gmx_drv_ctl_s         cn52xx;
1221	struct cvmx_agl_gmx_drv_ctl_s         cn52xxp1;
1222	struct cvmx_agl_gmx_drv_ctl_cn56xx
1223	{
1224#if __BYTE_ORDER == __BIG_ENDIAN
1225	uint64_t reserved_17_63               : 47;
1226	uint64_t byp_en                       : 1;  /**< Compensation Controller Bypass Enable */
1227	uint64_t reserved_13_15               : 3;
1228	uint64_t pctl                         : 5;  /**< AGL PCTL */
1229	uint64_t reserved_5_7                 : 3;
1230	uint64_t nctl                         : 5;  /**< AGL NCTL */
1231#else
1232	uint64_t nctl                         : 5;
1233	uint64_t reserved_5_7                 : 3;
1234	uint64_t pctl                         : 5;
1235	uint64_t reserved_13_15               : 3;
1236	uint64_t byp_en                       : 1;
1237	uint64_t reserved_17_63               : 47;
1238#endif
1239	} cn56xx;
1240	struct cvmx_agl_gmx_drv_ctl_cn56xx    cn56xxp1;
1241};
1242typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t;
1243
1244/**
1245 * cvmx_agl_gmx_inf_mode
1246 *
1247 * AGL_GMX_INF_MODE = Interface Mode
1248 *
1249 *
1250 * Notes:
1251 * Not reset when MIX*_CTL[RESET] is set to 1.
1252 *
1253 */
1254union cvmx_agl_gmx_inf_mode
1255{
1256	uint64_t u64;
1257	struct cvmx_agl_gmx_inf_mode_s
1258	{
1259#if __BYTE_ORDER == __BIG_ENDIAN
1260	uint64_t reserved_2_63                : 62;
1261	uint64_t en                           : 1;  /**< Interface Enable */
1262	uint64_t reserved_0_0                 : 1;
1263#else
1264	uint64_t reserved_0_0                 : 1;
1265	uint64_t en                           : 1;
1266	uint64_t reserved_2_63                : 62;
1267#endif
1268	} s;
1269	struct cvmx_agl_gmx_inf_mode_s        cn52xx;
1270	struct cvmx_agl_gmx_inf_mode_s        cn52xxp1;
1271	struct cvmx_agl_gmx_inf_mode_s        cn56xx;
1272	struct cvmx_agl_gmx_inf_mode_s        cn56xxp1;
1273};
1274typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t;
1275
1276/**
1277 * cvmx_agl_gmx_prt#_cfg
1278 *
1279 * AGL_GMX_PRT_CFG = Port description
1280 *
1281 *
1282 * Notes:
1283 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1284 *
1285 */
1286union cvmx_agl_gmx_prtx_cfg
1287{
1288	uint64_t u64;
1289	struct cvmx_agl_gmx_prtx_cfg_s
1290	{
1291#if __BYTE_ORDER == __BIG_ENDIAN
1292	uint64_t reserved_14_63               : 50;
1293	uint64_t tx_idle                      : 1;  /**< TX Machine is idle */
1294	uint64_t rx_idle                      : 1;  /**< RX Machine is idle */
1295	uint64_t reserved_9_11                : 3;
1296	uint64_t speed_msb                    : 1;  /**< Link Speed MSB [SPEED_MSB:SPEED]
1297                                                         10 = 10Mbs operation
1298                                                         00 = 100Mbs operation
1299                                                         01 = 1000Mbs operation
1300                                                         11 = Reserved */
1301	uint64_t reserved_7_7                 : 1;
1302	uint64_t burst                        : 1;  /**< Half-Duplex Burst Enable
1303                                                         Only valid for 1000Mbs half-duplex operation
1304                                                          0 = burst length of 0x2000 (halfdup / 1000Mbs)
1305                                                          1 = burst length of 0x0    (all other modes) */
1306	uint64_t tx_en                        : 1;  /**< Port enable.  Must be set for Octane to send
1307                                                         RMGII traffic.   When this bit clear on a given
1308                                                         port, then all packet cycles will appear as
1309                                                         inter-frame cycles. */
1310	uint64_t rx_en                        : 1;  /**< Port enable.  Must be set for Octane to receive
1311                                                         RMGII traffic.  When this bit clear on a given
1312                                                         port, then the all packet cycles will appear as
1313                                                         inter-frame cycles. */
1314	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
1315                                                         0 = 512 bitimes (10/100Mbs operation)
1316                                                         1 = 4096 bitimes (1000Mbs operation) */
1317	uint64_t duplex                       : 1;  /**< Duplex
1318                                                         0 = Half Duplex (collisions/extentions/bursts)
1319                                                         1 = Full Duplex */
1320	uint64_t speed                        : 1;  /**< Link Speed LSB [SPEED_MSB:SPEED]
1321                                                         10 = 10Mbs operation
1322                                                         00 = 100Mbs operation
1323                                                         01 = 1000Mbs operation
1324                                                         11 = Reserved */
1325	uint64_t en                           : 1;  /**< Link Enable
1326                                                         When EN is clear, packets will not be received
1327                                                         or transmitted (including PAUSE and JAM packets).
1328                                                         If EN is cleared while a packet is currently
1329                                                         being received or transmitted, the packet will
1330                                                         be allowed to complete before the bus is idled.
1331                                                         On the RX side, subsequent packets in a burst
1332                                                         will be ignored. */
1333#else
1334	uint64_t en                           : 1;
1335	uint64_t speed                        : 1;
1336	uint64_t duplex                       : 1;
1337	uint64_t slottime                     : 1;
1338	uint64_t rx_en                        : 1;
1339	uint64_t tx_en                        : 1;
1340	uint64_t burst                        : 1;
1341	uint64_t reserved_7_7                 : 1;
1342	uint64_t speed_msb                    : 1;
1343	uint64_t reserved_9_11                : 3;
1344	uint64_t rx_idle                      : 1;
1345	uint64_t tx_idle                      : 1;
1346	uint64_t reserved_14_63               : 50;
1347#endif
1348	} s;
1349	struct cvmx_agl_gmx_prtx_cfg_cn52xx
1350	{
1351#if __BYTE_ORDER == __BIG_ENDIAN
1352	uint64_t reserved_6_63                : 58;
1353	uint64_t tx_en                        : 1;  /**< Port enable.  Must be set for Octane to send
1354                                                         RMGII traffic.   When this bit clear on a given
1355                                                         port, then all MII cycles will appear as
1356                                                         inter-frame cycles. */
1357	uint64_t rx_en                        : 1;  /**< Port enable.  Must be set for Octane to receive
1358                                                         RMGII traffic.  When this bit clear on a given
1359                                                         port, then the all MII cycles will appear as
1360                                                         inter-frame cycles. */
1361	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
1362                                                         0 = 512 bitimes (10/100Mbs operation)
1363                                                         1 = Reserved */
1364	uint64_t duplex                       : 1;  /**< Duplex
1365                                                         0 = Half Duplex (collisions/extentions/bursts)
1366                                                         1 = Full Duplex */
1367	uint64_t speed                        : 1;  /**< Link Speed
1368                                                         0 = 10/100Mbs operation
1369                                                         1 = Reserved */
1370	uint64_t en                           : 1;  /**< Link Enable
1371                                                         When EN is clear, packets will not be received
1372                                                         or transmitted (including PAUSE and JAM packets).
1373                                                         If EN is cleared while a packet is currently
1374                                                         being received or transmitted, the packet will
1375                                                         be allowed to complete before the bus is idled.
1376                                                         On the RX side, subsequent packets in a burst
1377                                                         will be ignored. */
1378#else
1379	uint64_t en                           : 1;
1380	uint64_t speed                        : 1;
1381	uint64_t duplex                       : 1;
1382	uint64_t slottime                     : 1;
1383	uint64_t rx_en                        : 1;
1384	uint64_t tx_en                        : 1;
1385	uint64_t reserved_6_63                : 58;
1386#endif
1387	} cn52xx;
1388	struct cvmx_agl_gmx_prtx_cfg_cn52xx   cn52xxp1;
1389	struct cvmx_agl_gmx_prtx_cfg_cn52xx   cn56xx;
1390	struct cvmx_agl_gmx_prtx_cfg_cn52xx   cn56xxp1;
1391	struct cvmx_agl_gmx_prtx_cfg_s        cn63xx;
1392	struct cvmx_agl_gmx_prtx_cfg_s        cn63xxp1;
1393};
1394typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t;
1395
1396/**
1397 * cvmx_agl_gmx_rx#_adr_cam0
1398 *
1399 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1400 *
1401 *
1402 * Notes:
1403 * Not reset when MIX*_CTL[RESET] is set to 1.
1404 *
1405 */
1406union cvmx_agl_gmx_rxx_adr_cam0
1407{
1408	uint64_t u64;
1409	struct cvmx_agl_gmx_rxx_adr_cam0_s
1410	{
1411#if __BYTE_ORDER == __BIG_ENDIAN
1412	uint64_t adr                          : 64; /**< The DMAC address to match on
1413                                                         Each entry contributes 8bits to one of 8 matchers
1414                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1415                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1416                                                         The CAM matches against unicst or multicst DMAC
1417                                                         addresses. */
1418#else
1419	uint64_t adr                          : 64;
1420#endif
1421	} s;
1422	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn52xx;
1423	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn52xxp1;
1424	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn56xx;
1425	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn56xxp1;
1426	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn63xx;
1427	struct cvmx_agl_gmx_rxx_adr_cam0_s    cn63xxp1;
1428};
1429typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t;
1430
1431/**
1432 * cvmx_agl_gmx_rx#_adr_cam1
1433 *
1434 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1435 *
1436 *
1437 * Notes:
1438 * Not reset when MIX*_CTL[RESET] is set to 1.
1439 *
1440 */
1441union cvmx_agl_gmx_rxx_adr_cam1
1442{
1443	uint64_t u64;
1444	struct cvmx_agl_gmx_rxx_adr_cam1_s
1445	{
1446#if __BYTE_ORDER == __BIG_ENDIAN
1447	uint64_t adr                          : 64; /**< The DMAC address to match on
1448                                                         Each entry contributes 8bits to one of 8 matchers
1449                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1450                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1451                                                         The CAM matches against unicst or multicst DMAC
1452                                                         addresses. */
1453#else
1454	uint64_t adr                          : 64;
1455#endif
1456	} s;
1457	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn52xx;
1458	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn52xxp1;
1459	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn56xx;
1460	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn56xxp1;
1461	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn63xx;
1462	struct cvmx_agl_gmx_rxx_adr_cam1_s    cn63xxp1;
1463};
1464typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t;
1465
1466/**
1467 * cvmx_agl_gmx_rx#_adr_cam2
1468 *
1469 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1470 *
1471 *
1472 * Notes:
1473 * Not reset when MIX*_CTL[RESET] is set to 1.
1474 *
1475 */
1476union cvmx_agl_gmx_rxx_adr_cam2
1477{
1478	uint64_t u64;
1479	struct cvmx_agl_gmx_rxx_adr_cam2_s
1480	{
1481#if __BYTE_ORDER == __BIG_ENDIAN
1482	uint64_t adr                          : 64; /**< The DMAC address to match on
1483                                                         Each entry contributes 8bits to one of 8 matchers
1484                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1485                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1486                                                         The CAM matches against unicst or multicst DMAC
1487                                                         addresses. */
1488#else
1489	uint64_t adr                          : 64;
1490#endif
1491	} s;
1492	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn52xx;
1493	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn52xxp1;
1494	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn56xx;
1495	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn56xxp1;
1496	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn63xx;
1497	struct cvmx_agl_gmx_rxx_adr_cam2_s    cn63xxp1;
1498};
1499typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t;
1500
1501/**
1502 * cvmx_agl_gmx_rx#_adr_cam3
1503 *
1504 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1505 *
1506 *
1507 * Notes:
1508 * Not reset when MIX*_CTL[RESET] is set to 1.
1509 *
1510 */
1511union cvmx_agl_gmx_rxx_adr_cam3
1512{
1513	uint64_t u64;
1514	struct cvmx_agl_gmx_rxx_adr_cam3_s
1515	{
1516#if __BYTE_ORDER == __BIG_ENDIAN
1517	uint64_t adr                          : 64; /**< The DMAC address to match on
1518                                                         Each entry contributes 8bits to one of 8 matchers
1519                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1520                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1521                                                         The CAM matches against unicst or multicst DMAC
1522                                                         addresses. */
1523#else
1524	uint64_t adr                          : 64;
1525#endif
1526	} s;
1527	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn52xx;
1528	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn52xxp1;
1529	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn56xx;
1530	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn56xxp1;
1531	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn63xx;
1532	struct cvmx_agl_gmx_rxx_adr_cam3_s    cn63xxp1;
1533};
1534typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t;
1535
1536/**
1537 * cvmx_agl_gmx_rx#_adr_cam4
1538 *
1539 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1540 *
1541 *
1542 * Notes:
1543 * Not reset when MIX*_CTL[RESET] is set to 1.
1544 *
1545 */
1546union cvmx_agl_gmx_rxx_adr_cam4
1547{
1548	uint64_t u64;
1549	struct cvmx_agl_gmx_rxx_adr_cam4_s
1550	{
1551#if __BYTE_ORDER == __BIG_ENDIAN
1552	uint64_t adr                          : 64; /**< The DMAC address to match on
1553                                                         Each entry contributes 8bits to one of 8 matchers
1554                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1555                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1556                                                         The CAM matches against unicst or multicst DMAC
1557                                                         addresses. */
1558#else
1559	uint64_t adr                          : 64;
1560#endif
1561	} s;
1562	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn52xx;
1563	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn52xxp1;
1564	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn56xx;
1565	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn56xxp1;
1566	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn63xx;
1567	struct cvmx_agl_gmx_rxx_adr_cam4_s    cn63xxp1;
1568};
1569typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t;
1570
1571/**
1572 * cvmx_agl_gmx_rx#_adr_cam5
1573 *
1574 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1575 *
1576 *
1577 * Notes:
1578 * Not reset when MIX*_CTL[RESET] is set to 1.
1579 *
1580 */
1581union cvmx_agl_gmx_rxx_adr_cam5
1582{
1583	uint64_t u64;
1584	struct cvmx_agl_gmx_rxx_adr_cam5_s
1585	{
1586#if __BYTE_ORDER == __BIG_ENDIAN
1587	uint64_t adr                          : 64; /**< The DMAC address to match on
1588                                                         Each entry contributes 8bits to one of 8 matchers
1589                                                         Write transactions to AGL_GMX_RX_ADR_CAM will not
1590                                                         change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1591                                                         The CAM matches against unicst or multicst DMAC
1592                                                         addresses. */
1593#else
1594	uint64_t adr                          : 64;
1595#endif
1596	} s;
1597	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn52xx;
1598	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn52xxp1;
1599	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn56xx;
1600	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn56xxp1;
1601	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn63xx;
1602	struct cvmx_agl_gmx_rxx_adr_cam5_s    cn63xxp1;
1603};
1604typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t;
1605
1606/**
1607 * cvmx_agl_gmx_rx#_adr_cam_en
1608 *
1609 * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
1610 *
1611 *
1612 * Notes:
1613 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1614 *
1615 */
1616union cvmx_agl_gmx_rxx_adr_cam_en
1617{
1618	uint64_t u64;
1619	struct cvmx_agl_gmx_rxx_adr_cam_en_s
1620	{
1621#if __BYTE_ORDER == __BIG_ENDIAN
1622	uint64_t reserved_8_63                : 56;
1623	uint64_t en                           : 8;  /**< CAM Entry Enables */
1624#else
1625	uint64_t en                           : 8;
1626	uint64_t reserved_8_63                : 56;
1627#endif
1628	} s;
1629	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn52xx;
1630	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn52xxp1;
1631	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn56xx;
1632	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn56xxp1;
1633	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn63xx;
1634	struct cvmx_agl_gmx_rxx_adr_cam_en_s  cn63xxp1;
1635};
1636typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t;
1637
1638/**
1639 * cvmx_agl_gmx_rx#_adr_ctl
1640 *
1641 * AGL_GMX_RX_ADR_CTL = Address Filtering Control
1642 *
1643 *
1644 * Notes:
1645 * * ALGORITHM
1646 *   Here is some pseudo code that represents the address filter behavior.
1647 *
1648 *      @verbatim
1649 *      bool dmac_addr_filter(uint8 prt, uint48 dmac) [
1650 *        ASSERT(prt >= 0 && prt <= 3);
1651 *        if (is_bcst(dmac))                               // broadcast accept
1652 *          return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
1653 *        if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1)   // multicast reject
1654 *          return REJECT;
1655 *        if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2)   // multicast accept
1656 *          return ACCEPT;
1657 *
1658 *        cam_hit = 0;
1659 *
1660 *        for (i=0; i<8; i++) [
1661 *          if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
1662 *            continue;
1663 *          uint48 unswizzled_mac_adr = 0x0;
1664 *          for (j=5; j>=0; j--) [
1665 *             unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
1666 *          ]
1667 *          if (unswizzled_mac_adr == dmac) [
1668 *            cam_hit = 1;
1669 *            break;
1670 *          ]
1671 *        ]
1672 *
1673 *        if (cam_hit)
1674 *          return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
1675 *        else
1676 *          return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
1677 *      ]
1678 *      @endverbatim
1679 *
1680 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1681 */
1682union cvmx_agl_gmx_rxx_adr_ctl
1683{
1684	uint64_t u64;
1685	struct cvmx_agl_gmx_rxx_adr_ctl_s
1686	{
1687#if __BYTE_ORDER == __BIG_ENDIAN
1688	uint64_t reserved_4_63                : 60;
1689	uint64_t cam_mode                     : 1;  /**< Allow or deny DMAC address filter
1690                                                         0 = reject the packet on DMAC address match
1691                                                         1 = accept the packet on DMAC address match */
1692	uint64_t mcst                         : 2;  /**< Multicast Mode
1693                                                         0 = Use the Address Filter CAM
1694                                                         1 = Force reject all multicast packets
1695                                                         2 = Force accept all multicast packets
1696                                                         3 = Reserved */
1697	uint64_t bcst                         : 1;  /**< Accept All Broadcast Packets */
1698#else
1699	uint64_t bcst                         : 1;
1700	uint64_t mcst                         : 2;
1701	uint64_t cam_mode                     : 1;
1702	uint64_t reserved_4_63                : 60;
1703#endif
1704	} s;
1705	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn52xx;
1706	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn52xxp1;
1707	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn56xx;
1708	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn56xxp1;
1709	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn63xx;
1710	struct cvmx_agl_gmx_rxx_adr_ctl_s     cn63xxp1;
1711};
1712typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t;
1713
1714/**
1715 * cvmx_agl_gmx_rx#_decision
1716 *
1717 * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
1718 *
1719 *
1720 * Notes:
1721 * As each byte in a packet is received by GMX, the L2 byte count is compared
1722 * against the AGL_GMX_RX_DECISION[CNT].  The L2 byte count is the number of bytes
1723 * from the beginning of the L2 header (DMAC).  In normal operation, the L2
1724 * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
1725 * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
1726 *
1727 * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
1728 * packet and would require UDD skip length to account for them.
1729 *
1730 *                                                 L2 Size
1731 * Port Mode             <=AGL_GMX_RX_DECISION bytes (default=24)  >AGL_GMX_RX_DECISION bytes (default=24)
1732 *
1733 * MII/Full Duplex       accept packet                             apply filters
1734 *                       no filtering is applied                   accept packet based on DMAC and PAUSE packet filters
1735 *
1736 * MII/Half Duplex       drop packet                               apply filters
1737 *                       packet is unconditionally dropped         accept packet based on DMAC
1738 *
1739 * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
1740 *
1741 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1742 */
1743union cvmx_agl_gmx_rxx_decision
1744{
1745	uint64_t u64;
1746	struct cvmx_agl_gmx_rxx_decision_s
1747	{
1748#if __BYTE_ORDER == __BIG_ENDIAN
1749	uint64_t reserved_5_63                : 59;
1750	uint64_t cnt                          : 5;  /**< The byte count to decide when to accept or filter
1751                                                         a packet. */
1752#else
1753	uint64_t cnt                          : 5;
1754	uint64_t reserved_5_63                : 59;
1755#endif
1756	} s;
1757	struct cvmx_agl_gmx_rxx_decision_s    cn52xx;
1758	struct cvmx_agl_gmx_rxx_decision_s    cn52xxp1;
1759	struct cvmx_agl_gmx_rxx_decision_s    cn56xx;
1760	struct cvmx_agl_gmx_rxx_decision_s    cn56xxp1;
1761	struct cvmx_agl_gmx_rxx_decision_s    cn63xx;
1762	struct cvmx_agl_gmx_rxx_decision_s    cn63xxp1;
1763};
1764typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t;
1765
1766/**
1767 * cvmx_agl_gmx_rx#_frm_chk
1768 *
1769 * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
1770 *
1771 *
1772 * Notes:
1773 * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
1774 *
1775 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1776 */
1777union cvmx_agl_gmx_rxx_frm_chk
1778{
1779	uint64_t u64;
1780	struct cvmx_agl_gmx_rxx_frm_chk_s
1781	{
1782#if __BYTE_ORDER == __BIG_ENDIAN
1783	uint64_t reserved_10_63               : 54;
1784	uint64_t niberr                       : 1;  /**< Nibble error */
1785	uint64_t skperr                       : 1;  /**< Skipper error */
1786	uint64_t rcverr                       : 1;  /**< Frame was received with packet data reception error */
1787	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
1788	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
1789	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
1790	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
1791	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
1792	uint64_t carext                       : 1;  /**< Carrier extend error */
1793	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
1794#else
1795	uint64_t minerr                       : 1;
1796	uint64_t carext                       : 1;
1797	uint64_t maxerr                       : 1;
1798	uint64_t jabber                       : 1;
1799	uint64_t fcserr                       : 1;
1800	uint64_t alnerr                       : 1;
1801	uint64_t lenerr                       : 1;
1802	uint64_t rcverr                       : 1;
1803	uint64_t skperr                       : 1;
1804	uint64_t niberr                       : 1;
1805	uint64_t reserved_10_63               : 54;
1806#endif
1807	} s;
1808	struct cvmx_agl_gmx_rxx_frm_chk_cn52xx
1809	{
1810#if __BYTE_ORDER == __BIG_ENDIAN
1811	uint64_t reserved_9_63                : 55;
1812	uint64_t skperr                       : 1;  /**< Skipper error */
1813	uint64_t rcverr                       : 1;  /**< Frame was received with MII Data reception error */
1814	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
1815	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
1816	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
1817	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
1818	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
1819	uint64_t reserved_1_1                 : 1;
1820	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
1821#else
1822	uint64_t minerr                       : 1;
1823	uint64_t reserved_1_1                 : 1;
1824	uint64_t maxerr                       : 1;
1825	uint64_t jabber                       : 1;
1826	uint64_t fcserr                       : 1;
1827	uint64_t alnerr                       : 1;
1828	uint64_t lenerr                       : 1;
1829	uint64_t rcverr                       : 1;
1830	uint64_t skperr                       : 1;
1831	uint64_t reserved_9_63                : 55;
1832#endif
1833	} cn52xx;
1834	struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
1835	struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
1836	struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
1837	struct cvmx_agl_gmx_rxx_frm_chk_s     cn63xx;
1838	struct cvmx_agl_gmx_rxx_frm_chk_s     cn63xxp1;
1839};
1840typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t;
1841
1842/**
1843 * cvmx_agl_gmx_rx#_frm_ctl
1844 *
1845 * AGL_GMX_RX_FRM_CTL = Frame Control
1846 *
1847 *
1848 * Notes:
1849 * * PRE_STRP
1850 *   When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
1851 *   determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
1852 *   core as part of the packet.
1853 *
1854 *   In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
1855 *   size when checking against the MIN and MAX bounds.  Furthermore, the bytes
1856 *   are skipped when locating the start of the L2 header for DMAC and Control
1857 *   frame recognition.
1858 *
1859 * * CTL_BCK/CTL_DRP
1860 *   These bits control how the HW handles incoming PAUSE packets.  Here are
1861 *   the most common modes of operation:
1862 *     CTL_BCK=1,CTL_DRP=1   - HW does it all
1863 *     CTL_BCK=0,CTL_DRP=0   - SW sees all pause frames
1864 *     CTL_BCK=0,CTL_DRP=1   - all pause frames are completely ignored
1865 *
1866 *   These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
1867 *   Since PAUSE packets only apply to fulldup operation, any PAUSE packet
1868 *   would constitute an exception which should be handled by the processing
1869 *   cores.  PAUSE packets should not be forwarded.
1870 *
1871 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1872 */
1873union cvmx_agl_gmx_rxx_frm_ctl
1874{
1875	uint64_t u64;
1876	struct cvmx_agl_gmx_rxx_frm_ctl_s
1877	{
1878#if __BYTE_ORDER == __BIG_ENDIAN
1879	uint64_t reserved_13_63               : 51;
1880	uint64_t ptp_mode                     : 1;  /**< Timestamp mode
1881                                                         When PTP_MODE is set, a 64-bit timestamp will be
1882                                                         prepended to every incoming packet. The timestamp
1883                                                         bytes are added to the packet in such a way as to
1884                                                         not modify the packet's receive byte count.  This
1885                                                         implies that the AGL_GMX_RX_JABBER,
1886                                                         AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX,
1887                                                         AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the
1888                                                         AGL_GMX_RX_STATS_* do not require any adjustment
1889                                                         as they operate on the received packet size.
1890                                                         If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
1891	uint64_t reserved_11_11               : 1;
1892	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
1893                                                         due to PARITAL packets */
1894	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
1895                                                         regardless of the number of previous PREAMBLE
1896                                                         nibbles.  In this mode, PRE_STRP should be set to
1897                                                         account for the variable nature of the PREAMBLE.
1898                                                         PRE_CHK must be set to enable this and all
1899                                                         PREAMBLE features. */
1900	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
1901                                                         sized pkts with padding in the client data */
1902	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
1903	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is less strict.
1904                                                         AGL will begin the frame at the first SFD.
1905                                                         PRE_FREE must be set if PRE_ALIGN is set.
1906                                                         PRE_CHK must be set to enable this and all
1907                                                         PREAMBLE features. */
1908	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
1909	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
1910                                                         Multicast address */
1911	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
1912	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
1913	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
1914                                                         0=PREAMBLE+SFD is sent to core as part of frame
1915                                                         1=PREAMBLE+SFD is dropped
1916                                                         PRE_STRP must be set if PRE_ALIGN is set.
1917                                                         PRE_CHK must be set to enable this and all
1918                                                         PREAMBLE features. */
1919	uint64_t pre_chk                      : 1;  /**< This port is configured to send a valid 802.3
1920                                                         PREAMBLE to begin every frame. AGL checks that a
1921                                                         valid PREAMBLE is received (based on PRE_FREE).
1922                                                         When a problem does occur within the PREAMBLE
1923                                                         seqeunce, the frame is marked as bad and not sent
1924                                                         into the core.  The AGL_GMX_RX_INT_REG[PCTERR]
1925                                                         interrupt is also raised. */
1926#else
1927	uint64_t pre_chk                      : 1;
1928	uint64_t pre_strp                     : 1;
1929	uint64_t ctl_drp                      : 1;
1930	uint64_t ctl_bck                      : 1;
1931	uint64_t ctl_mcst                     : 1;
1932	uint64_t ctl_smac                     : 1;
1933	uint64_t pre_free                     : 1;
1934	uint64_t vlan_len                     : 1;
1935	uint64_t pad_len                      : 1;
1936	uint64_t pre_align                    : 1;
1937	uint64_t null_dis                     : 1;
1938	uint64_t reserved_11_11               : 1;
1939	uint64_t ptp_mode                     : 1;
1940	uint64_t reserved_13_63               : 51;
1941#endif
1942	} s;
1943	struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx
1944	{
1945#if __BYTE_ORDER == __BIG_ENDIAN
1946	uint64_t reserved_10_63               : 54;
1947	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
1948                                                         regardless of the number of previous PREAMBLE
1949                                                         nibbles.  In this mode, PREAMBLE can be consumed
1950                                                         by the HW so when PRE_ALIGN is set, PRE_FREE,
1951                                                         PRE_STRP must be set for correct operation.
1952                                                         PRE_CHK must be set to enable this and all
1953                                                         PREAMBLE features. */
1954	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
1955                                                         sized pkts with padding in the client data */
1956	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
1957	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
1958                                                         0 - 254 cycles of PREAMBLE followed by SFD
1959                                                         PRE_FREE must be set if PRE_ALIGN is set.
1960                                                         PRE_CHK must be set to enable this and all
1961                                                         PREAMBLE features. */
1962	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
1963	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
1964                                                         Multicast address */
1965	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
1966	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
1967	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
1968                                                         0=PREAMBLE+SFD is sent to core as part of frame
1969                                                         1=PREAMBLE+SFD is dropped
1970                                                         PRE_STRP must be set if PRE_ALIGN is set.
1971                                                         PRE_CHK must be set to enable this and all
1972                                                         PREAMBLE features. */
1973	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
1974                                                         to begin every frame.  GMX checks that the
1975                                                         PREAMBLE is sent correctly */
1976#else
1977	uint64_t pre_chk                      : 1;
1978	uint64_t pre_strp                     : 1;
1979	uint64_t ctl_drp                      : 1;
1980	uint64_t ctl_bck                      : 1;
1981	uint64_t ctl_mcst                     : 1;
1982	uint64_t ctl_smac                     : 1;
1983	uint64_t pre_free                     : 1;
1984	uint64_t vlan_len                     : 1;
1985	uint64_t pad_len                      : 1;
1986	uint64_t pre_align                    : 1;
1987	uint64_t reserved_10_63               : 54;
1988#endif
1989	} cn52xx;
1990	struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
1991	struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
1992	struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
1993	struct cvmx_agl_gmx_rxx_frm_ctl_s     cn63xx;
1994	struct cvmx_agl_gmx_rxx_frm_ctl_s     cn63xxp1;
1995};
1996typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t;
1997
1998/**
1999 * cvmx_agl_gmx_rx#_frm_max
2000 *
2001 * AGL_GMX_RX_FRM_MAX = Frame Max length
2002 *
2003 *
2004 * Notes:
2005 * When changing the LEN field, be sure that LEN does not exceed
2006 * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
2007 * are within the maximum length parameter to be rejected because they exceed
2008 * the AGL_GMX_RX_JABBER[CNT] limit.
2009 *
2010 * Notes:
2011 *
2012 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2013 */
2014union cvmx_agl_gmx_rxx_frm_max
2015{
2016	uint64_t u64;
2017	struct cvmx_agl_gmx_rxx_frm_max_s
2018	{
2019#if __BYTE_ORDER == __BIG_ENDIAN
2020	uint64_t reserved_16_63               : 48;
2021	uint64_t len                          : 16; /**< Byte count for Max-sized frame check
2022                                                         AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check
2023                                                         for port n.
2024                                                         If enabled, failing packets set the MAXERR
2025                                                         interrupt and the MIX opcode is set to OVER_FCS
2026                                                         (0x3, if packet has bad FCS) or OVER_ERR (0x4, if
2027                                                         packet has good FCS).
2028                                                         LEN <= AGL_GMX_RX_JABBER[CNT] */
2029#else
2030	uint64_t len                          : 16;
2031	uint64_t reserved_16_63               : 48;
2032#endif
2033	} s;
2034	struct cvmx_agl_gmx_rxx_frm_max_s     cn52xx;
2035	struct cvmx_agl_gmx_rxx_frm_max_s     cn52xxp1;
2036	struct cvmx_agl_gmx_rxx_frm_max_s     cn56xx;
2037	struct cvmx_agl_gmx_rxx_frm_max_s     cn56xxp1;
2038	struct cvmx_agl_gmx_rxx_frm_max_s     cn63xx;
2039	struct cvmx_agl_gmx_rxx_frm_max_s     cn63xxp1;
2040};
2041typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t;
2042
2043/**
2044 * cvmx_agl_gmx_rx#_frm_min
2045 *
2046 * AGL_GMX_RX_FRM_MIN = Frame Min length
2047 *
2048 *
2049 * Notes:
2050 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2051 *
2052 */
2053union cvmx_agl_gmx_rxx_frm_min
2054{
2055	uint64_t u64;
2056	struct cvmx_agl_gmx_rxx_frm_min_s
2057	{
2058#if __BYTE_ORDER == __BIG_ENDIAN
2059	uint64_t reserved_16_63               : 48;
2060	uint64_t len                          : 16; /**< Byte count for Min-sized frame check
2061                                                         AGL_GMX_RXn_FRM_CHK[MINERR] enables the check
2062                                                         for port n.
2063                                                         If enabled, failing packets set the MINERR
2064                                                         interrupt and the MIX opcode is set to UNDER_FCS
2065                                                         (0x6, if packet has bad FCS) or UNDER_ERR (0x8,
2066                                                         if packet has good FCS). */
2067#else
2068	uint64_t len                          : 16;
2069	uint64_t reserved_16_63               : 48;
2070#endif
2071	} s;
2072	struct cvmx_agl_gmx_rxx_frm_min_s     cn52xx;
2073	struct cvmx_agl_gmx_rxx_frm_min_s     cn52xxp1;
2074	struct cvmx_agl_gmx_rxx_frm_min_s     cn56xx;
2075	struct cvmx_agl_gmx_rxx_frm_min_s     cn56xxp1;
2076	struct cvmx_agl_gmx_rxx_frm_min_s     cn63xx;
2077	struct cvmx_agl_gmx_rxx_frm_min_s     cn63xxp1;
2078};
2079typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t;
2080
2081/**
2082 * cvmx_agl_gmx_rx#_ifg
2083 *
2084 * AGL_GMX_RX_IFG = RX Min IFG
2085 *
2086 *
2087 * Notes:
2088 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2089 *
2090 */
2091union cvmx_agl_gmx_rxx_ifg
2092{
2093	uint64_t u64;
2094	struct cvmx_agl_gmx_rxx_ifg_s
2095	{
2096#if __BYTE_ORDER == __BIG_ENDIAN
2097	uint64_t reserved_4_63                : 60;
2098	uint64_t ifg                          : 4;  /**< Min IFG (in IFG*8 bits) between packets used to
2099                                                         determine IFGERR. Normally IFG is 96 bits.
2100                                                         Note in some operating modes, IFG cycles can be
2101                                                         inserted or removed in order to achieve clock rate
2102                                                         adaptation. For these reasons, the default value
2103                                                         is slightly conservative and does not check upto
2104                                                         the full 96 bits of IFG. */
2105#else
2106	uint64_t ifg                          : 4;
2107	uint64_t reserved_4_63                : 60;
2108#endif
2109	} s;
2110	struct cvmx_agl_gmx_rxx_ifg_s         cn52xx;
2111	struct cvmx_agl_gmx_rxx_ifg_s         cn52xxp1;
2112	struct cvmx_agl_gmx_rxx_ifg_s         cn56xx;
2113	struct cvmx_agl_gmx_rxx_ifg_s         cn56xxp1;
2114	struct cvmx_agl_gmx_rxx_ifg_s         cn63xx;
2115	struct cvmx_agl_gmx_rxx_ifg_s         cn63xxp1;
2116};
2117typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t;
2118
2119/**
2120 * cvmx_agl_gmx_rx#_int_en
2121 *
2122 * AGL_GMX_RX_INT_EN = Interrupt Enable
2123 *
2124 *
2125 * Notes:
2126 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2127 *
2128 */
2129union cvmx_agl_gmx_rxx_int_en
2130{
2131	uint64_t u64;
2132	struct cvmx_agl_gmx_rxx_int_en_s
2133	{
2134#if __BYTE_ORDER == __BIG_ENDIAN
2135	uint64_t reserved_20_63               : 44;
2136	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
2137	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex             |             NS */
2138	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed              |             NS */
2139	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus             |             NS */
2140	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
2141	uint64_t coldet                       : 1;  /**< Collision Detection */
2142	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
2143	uint64_t rsverr                       : 1;  /**< Packet reserved opcodes */
2144	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
2145	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
2146	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble)              |             NS */
2147	uint64_t skperr                       : 1;  /**< Skipper error */
2148	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
2149	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
2150	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2151	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2152	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2153	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
2154	uint64_t carext                       : 1;  /**< Carrier extend error */
2155	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
2156#else
2157	uint64_t minerr                       : 1;
2158	uint64_t carext                       : 1;
2159	uint64_t maxerr                       : 1;
2160	uint64_t jabber                       : 1;
2161	uint64_t fcserr                       : 1;
2162	uint64_t alnerr                       : 1;
2163	uint64_t lenerr                       : 1;
2164	uint64_t rcverr                       : 1;
2165	uint64_t skperr                       : 1;
2166	uint64_t niberr                       : 1;
2167	uint64_t ovrerr                       : 1;
2168	uint64_t pcterr                       : 1;
2169	uint64_t rsverr                       : 1;
2170	uint64_t falerr                       : 1;
2171	uint64_t coldet                       : 1;
2172	uint64_t ifgerr                       : 1;
2173	uint64_t phy_link                     : 1;
2174	uint64_t phy_spd                      : 1;
2175	uint64_t phy_dupx                     : 1;
2176	uint64_t pause_drp                    : 1;
2177	uint64_t reserved_20_63               : 44;
2178#endif
2179	} s;
2180	struct cvmx_agl_gmx_rxx_int_en_cn52xx
2181	{
2182#if __BYTE_ORDER == __BIG_ENDIAN
2183	uint64_t reserved_20_63               : 44;
2184	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
2185	uint64_t reserved_16_18               : 3;
2186	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
2187	uint64_t coldet                       : 1;  /**< Collision Detection */
2188	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
2189	uint64_t rsverr                       : 1;  /**< MII reserved opcodes */
2190	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
2191	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
2192	uint64_t reserved_9_9                 : 1;
2193	uint64_t skperr                       : 1;  /**< Skipper error */
2194	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
2195	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
2196	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2197	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2198	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2199	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
2200	uint64_t reserved_1_1                 : 1;
2201	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
2202#else
2203	uint64_t minerr                       : 1;
2204	uint64_t reserved_1_1                 : 1;
2205	uint64_t maxerr                       : 1;
2206	uint64_t jabber                       : 1;
2207	uint64_t fcserr                       : 1;
2208	uint64_t alnerr                       : 1;
2209	uint64_t lenerr                       : 1;
2210	uint64_t rcverr                       : 1;
2211	uint64_t skperr                       : 1;
2212	uint64_t reserved_9_9                 : 1;
2213	uint64_t ovrerr                       : 1;
2214	uint64_t pcterr                       : 1;
2215	uint64_t rsverr                       : 1;
2216	uint64_t falerr                       : 1;
2217	uint64_t coldet                       : 1;
2218	uint64_t ifgerr                       : 1;
2219	uint64_t reserved_16_18               : 3;
2220	uint64_t pause_drp                    : 1;
2221	uint64_t reserved_20_63               : 44;
2222#endif
2223	} cn52xx;
2224	struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
2225	struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
2226	struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
2227	struct cvmx_agl_gmx_rxx_int_en_s      cn63xx;
2228	struct cvmx_agl_gmx_rxx_int_en_s      cn63xxp1;
2229};
2230typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t;
2231
2232/**
2233 * cvmx_agl_gmx_rx#_int_reg
2234 *
2235 * AGL_GMX_RX_INT_REG = Interrupt Register
2236 *
2237 *
2238 * Notes:
2239 * (1) exceptions will only be raised to the control processor if the
2240 *     corresponding bit in the AGL_GMX_RX_INT_EN register is set.
2241 *
2242 * (2) exception conditions 10:0 can also set the rcv/opcode in the received
2243 *     packet's workQ entry.  The AGL_GMX_RX_FRM_CHK register provides a bit mask
2244 *     for configuring which conditions set the error.
2245 *
2246 * (3) in half duplex operation, the expectation is that collisions will appear
2247 *     as MINERRs.
2248 *
2249 * (4) JABBER - An RX Jabber error indicates that a packet was received which
2250 *              is longer than the maximum allowed packet as defined by the
2251 *              system.  GMX will truncate the packet at the JABBER count.
2252 *              Failure to do so could lead to system instabilty.
2253 *
2254 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
2255 *              AGL_GMX_RX_FRM_MAX.  For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
2256 *              > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
2257 *
2258 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
2259 *
2260 * (8) ALNERR - Indicates that the packet received was not an integer number of
2261 *              bytes.  If FCS checking is enabled, ALNERR will only assert if
2262 *              the FCS is bad.  If FCS checking is disabled, ALNERR will
2263 *              assert in all non-integer frame cases.
2264 *
2265 * (9) Collisions - Collisions can only occur in half-duplex mode.  A collision
2266 *                  is assumed by the receiver when the received
2267 *                  frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
2268 *
2269 * (A) LENERR - Length errors occur when the received packet does not match the
2270 *              length field.  LENERR is only checked for packets between 64
2271 *              and 1500 bytes.  For untagged frames, the length must exact
2272 *              match.  For tagged frames the length or length+4 must match.
2273 *
2274 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
2275 *              Does not check the number of PREAMBLE cycles.
2276 *
2277 * (C) OVRERR - Not to be included in the HRM
2278 *
2279 *              OVRERR is an architectural assertion check internal to GMX to
2280 *              make sure no assumption was violated.  In a correctly operating
2281 *              system, this interrupt can never fire.
2282 *
2283 *              GMX has an internal arbiter which selects which of 4 ports to
2284 *              buffer in the main RX FIFO.  If we normally buffer 8 bytes,
2285 *              then each port will typically push a tick every 8 cycles - if
2286 *              the packet interface is going as fast as possible.  If there
2287 *              are four ports, they push every two cycles.  So that's the
2288 *              assumption.  That the inbound module will always be able to
2289 *              consume the tick before another is produced.  If that doesn't
2290 *              happen - that's when OVRERR will assert.
2291 *
2292 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2293 */
2294union cvmx_agl_gmx_rxx_int_reg
2295{
2296	uint64_t u64;
2297	struct cvmx_agl_gmx_rxx_int_reg_s
2298	{
2299#if __BYTE_ORDER == __BIG_ENDIAN
2300	uint64_t reserved_20_63               : 44;
2301	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
2302	uint64_t phy_dupx                     : 1;  /**< Change in the RGMII inbound LinkDuplex             |             NS */
2303	uint64_t phy_spd                      : 1;  /**< Change in the RGMII inbound LinkSpeed              |             NS */
2304	uint64_t phy_link                     : 1;  /**< Change in the RGMII inbound LinkStatus             |             NS */
2305	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
2306                                                         Does not necessarily indicate a failure */
2307	uint64_t coldet                       : 1;  /**< Collision Detection */
2308	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
2309	uint64_t rsverr                       : 1;  /**< Packet reserved opcodes */
2310	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
2311	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
2312                                                         This interrupt should never assert */
2313	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble)              |             NS */
2314	uint64_t skperr                       : 1;  /**< Skipper error */
2315	uint64_t rcverr                       : 1;  /**< Frame was received with Packet Data reception error */
2316	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
2317	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2318	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2319	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2320	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
2321	uint64_t carext                       : 1;  /**< Carrier extend error */
2322	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
2323#else
2324	uint64_t minerr                       : 1;
2325	uint64_t carext                       : 1;
2326	uint64_t maxerr                       : 1;
2327	uint64_t jabber                       : 1;
2328	uint64_t fcserr                       : 1;
2329	uint64_t alnerr                       : 1;
2330	uint64_t lenerr                       : 1;
2331	uint64_t rcverr                       : 1;
2332	uint64_t skperr                       : 1;
2333	uint64_t niberr                       : 1;
2334	uint64_t ovrerr                       : 1;
2335	uint64_t pcterr                       : 1;
2336	uint64_t rsverr                       : 1;
2337	uint64_t falerr                       : 1;
2338	uint64_t coldet                       : 1;
2339	uint64_t ifgerr                       : 1;
2340	uint64_t phy_link                     : 1;
2341	uint64_t phy_spd                      : 1;
2342	uint64_t phy_dupx                     : 1;
2343	uint64_t pause_drp                    : 1;
2344	uint64_t reserved_20_63               : 44;
2345#endif
2346	} s;
2347	struct cvmx_agl_gmx_rxx_int_reg_cn52xx
2348	{
2349#if __BYTE_ORDER == __BIG_ENDIAN
2350	uint64_t reserved_20_63               : 44;
2351	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
2352	uint64_t reserved_16_18               : 3;
2353	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
2354                                                         Does not necessarily indicate a failure */
2355	uint64_t coldet                       : 1;  /**< Collision Detection */
2356	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
2357	uint64_t rsverr                       : 1;  /**< MII reserved opcodes */
2358	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
2359	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
2360                                                         This interrupt should never assert */
2361	uint64_t reserved_9_9                 : 1;
2362	uint64_t skperr                       : 1;  /**< Skipper error */
2363	uint64_t rcverr                       : 1;  /**< Frame was received with MII Data reception error */
2364	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
2365	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
2366	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
2367	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
2368	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
2369	uint64_t reserved_1_1                 : 1;
2370	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
2371#else
2372	uint64_t minerr                       : 1;
2373	uint64_t reserved_1_1                 : 1;
2374	uint64_t maxerr                       : 1;
2375	uint64_t jabber                       : 1;
2376	uint64_t fcserr                       : 1;
2377	uint64_t alnerr                       : 1;
2378	uint64_t lenerr                       : 1;
2379	uint64_t rcverr                       : 1;
2380	uint64_t skperr                       : 1;
2381	uint64_t reserved_9_9                 : 1;
2382	uint64_t ovrerr                       : 1;
2383	uint64_t pcterr                       : 1;
2384	uint64_t rsverr                       : 1;
2385	uint64_t falerr                       : 1;
2386	uint64_t coldet                       : 1;
2387	uint64_t ifgerr                       : 1;
2388	uint64_t reserved_16_18               : 3;
2389	uint64_t pause_drp                    : 1;
2390	uint64_t reserved_20_63               : 44;
2391#endif
2392	} cn52xx;
2393	struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
2394	struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
2395	struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
2396	struct cvmx_agl_gmx_rxx_int_reg_s     cn63xx;
2397	struct cvmx_agl_gmx_rxx_int_reg_s     cn63xxp1;
2398};
2399typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t;
2400
2401/**
2402 * cvmx_agl_gmx_rx#_jabber
2403 *
2404 * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
2405 *
2406 *
2407 * Notes:
2408 * CNT must be 8-byte aligned such that CNT[2:0] == 0
2409 *
2410 *   The packet that will be sent to the packet input logic will have an
2411 *   additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
2412 *   AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear.  The max packet that will be sent is
2413 *   defined as...
2414 *
2415 *        max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
2416 *
2417 *   Be sure the CNT field value is at least as large as the
2418 *   AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
2419 *   packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
2420 *   because they exceed the CNT limit.
2421 *
2422 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2423 */
2424union cvmx_agl_gmx_rxx_jabber
2425{
2426	uint64_t u64;
2427	struct cvmx_agl_gmx_rxx_jabber_s
2428	{
2429#if __BYTE_ORDER == __BIG_ENDIAN
2430	uint64_t reserved_16_63               : 48;
2431	uint64_t cnt                          : 16; /**< Byte count for jabber check
2432                                                         Failing packets set the JABBER interrupt and are
2433                                                         optionally sent with opcode==JABBER
2434                                                         GMX will truncate the packet to CNT bytes
2435                                                         CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
2436#else
2437	uint64_t cnt                          : 16;
2438	uint64_t reserved_16_63               : 48;
2439#endif
2440	} s;
2441	struct cvmx_agl_gmx_rxx_jabber_s      cn52xx;
2442	struct cvmx_agl_gmx_rxx_jabber_s      cn52xxp1;
2443	struct cvmx_agl_gmx_rxx_jabber_s      cn56xx;
2444	struct cvmx_agl_gmx_rxx_jabber_s      cn56xxp1;
2445	struct cvmx_agl_gmx_rxx_jabber_s      cn63xx;
2446	struct cvmx_agl_gmx_rxx_jabber_s      cn63xxp1;
2447};
2448typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t;
2449
2450/**
2451 * cvmx_agl_gmx_rx#_pause_drop_time
2452 *
2453 * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
2454 *
2455 *
2456 * Notes:
2457 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2458 *
2459 */
2460union cvmx_agl_gmx_rxx_pause_drop_time
2461{
2462	uint64_t u64;
2463	struct cvmx_agl_gmx_rxx_pause_drop_time_s
2464	{
2465#if __BYTE_ORDER == __BIG_ENDIAN
2466	uint64_t reserved_16_63               : 48;
2467	uint64_t status                       : 16; /**< Time extracted from the dropped PAUSE packet */
2468#else
2469	uint64_t status                       : 16;
2470	uint64_t reserved_16_63               : 48;
2471#endif
2472	} s;
2473	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
2474	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
2475	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
2476	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
2477	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
2478	struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
2479};
2480typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t;
2481
2482/**
2483 * cvmx_agl_gmx_rx#_rx_inbnd
2484 *
2485 * AGL_GMX_RX_INBND = RGMII InBand Link Status
2486 *
2487 *
2488 * Notes:
2489 * These fields are only valid if the attached PHY is operating in RGMII mode
2490 * and supports the optional in-band status (see section 3.4.1 of the RGMII
2491 * specification, version 1.3 for more information).
2492 *
2493 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2494 */
2495union cvmx_agl_gmx_rxx_rx_inbnd
2496{
2497	uint64_t u64;
2498	struct cvmx_agl_gmx_rxx_rx_inbnd_s
2499	{
2500#if __BYTE_ORDER == __BIG_ENDIAN
2501	uint64_t reserved_4_63                : 60;
2502	uint64_t duplex                       : 1;  /**< RGMII Inbound LinkDuplex                           |             NS
2503                                                         0=half-duplex
2504                                                         1=full-duplex */
2505	uint64_t speed                        : 2;  /**< RGMII Inbound LinkSpeed                            |             NS
2506                                                         00=2.5MHz
2507                                                         01=25MHz
2508                                                         10=125MHz
2509                                                         11=Reserved */
2510	uint64_t status                       : 1;  /**< RGMII Inbound LinkStatus                           |             NS
2511                                                         0=down
2512                                                         1=up */
2513#else
2514	uint64_t status                       : 1;
2515	uint64_t speed                        : 2;
2516	uint64_t duplex                       : 1;
2517	uint64_t reserved_4_63                : 60;
2518#endif
2519	} s;
2520	struct cvmx_agl_gmx_rxx_rx_inbnd_s    cn63xx;
2521	struct cvmx_agl_gmx_rxx_rx_inbnd_s    cn63xxp1;
2522};
2523typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t;
2524
2525/**
2526 * cvmx_agl_gmx_rx#_stats_ctl
2527 *
2528 * AGL_GMX_RX_STATS_CTL = RX Stats Control register
2529 *
2530 *
2531 * Notes:
2532 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2533 *
2534 */
2535union cvmx_agl_gmx_rxx_stats_ctl
2536{
2537	uint64_t u64;
2538	struct cvmx_agl_gmx_rxx_stats_ctl_s
2539	{
2540#if __BYTE_ORDER == __BIG_ENDIAN
2541	uint64_t reserved_1_63                : 63;
2542	uint64_t rd_clr                       : 1;  /**< RX Stats registers will clear on reads */
2543#else
2544	uint64_t rd_clr                       : 1;
2545	uint64_t reserved_1_63                : 63;
2546#endif
2547	} s;
2548	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn52xx;
2549	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn52xxp1;
2550	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn56xx;
2551	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn56xxp1;
2552	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn63xx;
2553	struct cvmx_agl_gmx_rxx_stats_ctl_s   cn63xxp1;
2554};
2555typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t;
2556
2557/**
2558 * cvmx_agl_gmx_rx#_stats_octs
2559 *
2560 * Notes:
2561 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2562 * - Counters will wrap
2563 * - Not reset when MIX*_CTL[RESET] is set to 1.
2564 */
2565union cvmx_agl_gmx_rxx_stats_octs
2566{
2567	uint64_t u64;
2568	struct cvmx_agl_gmx_rxx_stats_octs_s
2569	{
2570#if __BYTE_ORDER == __BIG_ENDIAN
2571	uint64_t reserved_48_63               : 16;
2572	uint64_t cnt                          : 48; /**< Octet count of received good packets */
2573#else
2574	uint64_t cnt                          : 48;
2575	uint64_t reserved_48_63               : 16;
2576#endif
2577	} s;
2578	struct cvmx_agl_gmx_rxx_stats_octs_s  cn52xx;
2579	struct cvmx_agl_gmx_rxx_stats_octs_s  cn52xxp1;
2580	struct cvmx_agl_gmx_rxx_stats_octs_s  cn56xx;
2581	struct cvmx_agl_gmx_rxx_stats_octs_s  cn56xxp1;
2582	struct cvmx_agl_gmx_rxx_stats_octs_s  cn63xx;
2583	struct cvmx_agl_gmx_rxx_stats_octs_s  cn63xxp1;
2584};
2585typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t;
2586
2587/**
2588 * cvmx_agl_gmx_rx#_stats_octs_ctl
2589 *
2590 * Notes:
2591 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2592 * - Counters will wrap
2593 * - Not reset when MIX*_CTL[RESET] is set to 1.
2594 */
2595union cvmx_agl_gmx_rxx_stats_octs_ctl
2596{
2597	uint64_t u64;
2598	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s
2599	{
2600#if __BYTE_ORDER == __BIG_ENDIAN
2601	uint64_t reserved_48_63               : 16;
2602	uint64_t cnt                          : 48; /**< Octet count of received pause packets */
2603#else
2604	uint64_t cnt                          : 48;
2605	uint64_t reserved_48_63               : 16;
2606#endif
2607	} s;
2608	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
2609	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
2610	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
2611	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
2612	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
2613	struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
2614};
2615typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t;
2616
2617/**
2618 * cvmx_agl_gmx_rx#_stats_octs_dmac
2619 *
2620 * Notes:
2621 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2622 * - Counters will wrap
2623 * - Not reset when MIX*_CTL[RESET] is set to 1.
2624 */
2625union cvmx_agl_gmx_rxx_stats_octs_dmac
2626{
2627	uint64_t u64;
2628	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s
2629	{
2630#if __BYTE_ORDER == __BIG_ENDIAN
2631	uint64_t reserved_48_63               : 16;
2632	uint64_t cnt                          : 48; /**< Octet count of filtered dmac packets */
2633#else
2634	uint64_t cnt                          : 48;
2635	uint64_t reserved_48_63               : 16;
2636#endif
2637	} s;
2638	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
2639	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
2640	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
2641	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
2642	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
2643	struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
2644};
2645typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t;
2646
2647/**
2648 * cvmx_agl_gmx_rx#_stats_octs_drp
2649 *
2650 * Notes:
2651 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2652 * - Counters will wrap
2653 * - Not reset when MIX*_CTL[RESET] is set to 1.
2654 */
2655union cvmx_agl_gmx_rxx_stats_octs_drp
2656{
2657	uint64_t u64;
2658	struct cvmx_agl_gmx_rxx_stats_octs_drp_s
2659	{
2660#if __BYTE_ORDER == __BIG_ENDIAN
2661	uint64_t reserved_48_63               : 16;
2662	uint64_t cnt                          : 48; /**< Octet count of dropped packets */
2663#else
2664	uint64_t cnt                          : 48;
2665	uint64_t reserved_48_63               : 16;
2666#endif
2667	} s;
2668	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
2669	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
2670	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
2671	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
2672	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
2673	struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
2674};
2675typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t;
2676
2677/**
2678 * cvmx_agl_gmx_rx#_stats_pkts
2679 *
2680 * AGL_GMX_RX_STATS_PKTS
2681 *
2682 * Count of good received packets - packets that are not recognized as PAUSE
2683 * packets, dropped due the DMAC filter, dropped due FIFO full status, or
2684 * have any other OPCODE (FCS, Length, etc).
2685 *
2686 * Notes:
2687 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2688 * - Counters will wrap
2689 * - Not reset when MIX*_CTL[RESET] is set to 1.
2690 */
2691union cvmx_agl_gmx_rxx_stats_pkts
2692{
2693	uint64_t u64;
2694	struct cvmx_agl_gmx_rxx_stats_pkts_s
2695	{
2696#if __BYTE_ORDER == __BIG_ENDIAN
2697	uint64_t reserved_32_63               : 32;
2698	uint64_t cnt                          : 32; /**< Count of received good packets */
2699#else
2700	uint64_t cnt                          : 32;
2701	uint64_t reserved_32_63               : 32;
2702#endif
2703	} s;
2704	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn52xx;
2705	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn52xxp1;
2706	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn56xx;
2707	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn56xxp1;
2708	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn63xx;
2709	struct cvmx_agl_gmx_rxx_stats_pkts_s  cn63xxp1;
2710};
2711typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t;
2712
2713/**
2714 * cvmx_agl_gmx_rx#_stats_pkts_bad
2715 *
2716 * AGL_GMX_RX_STATS_PKTS_BAD
2717 *
2718 * Count of all packets received with some error that were not dropped
2719 * either due to the dmac filter or lack of room in the receive FIFO.
2720 *
2721 * Notes:
2722 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2723 * - Counters will wrap
2724 * - Not reset when MIX*_CTL[RESET] is set to 1.
2725 */
2726union cvmx_agl_gmx_rxx_stats_pkts_bad
2727{
2728	uint64_t u64;
2729	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s
2730	{
2731#if __BYTE_ORDER == __BIG_ENDIAN
2732	uint64_t reserved_32_63               : 32;
2733	uint64_t cnt                          : 32; /**< Count of bad packets */
2734#else
2735	uint64_t cnt                          : 32;
2736	uint64_t reserved_32_63               : 32;
2737#endif
2738	} s;
2739	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
2740	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
2741	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
2742	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
2743	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
2744	struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
2745};
2746typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t;
2747
2748/**
2749 * cvmx_agl_gmx_rx#_stats_pkts_ctl
2750 *
2751 * AGL_GMX_RX_STATS_PKTS_CTL
2752 *
2753 * Count of all packets received that were recognized as Flow Control or
2754 * PAUSE packets.  PAUSE packets with any kind of error are counted in
2755 * AGL_GMX_RX_STATS_PKTS_BAD.  Pause packets can be optionally dropped or
2756 * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit.  This count
2757 * increments regardless of whether the packet is dropped.  Pause packets
2758 * will never be counted in AGL_GMX_RX_STATS_PKTS.  Packets dropped due the dmac
2759 * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
2760 *
2761 * Notes:
2762 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2763 * - Counters will wrap
2764 * - Not reset when MIX*_CTL[RESET] is set to 1.
2765 */
2766union cvmx_agl_gmx_rxx_stats_pkts_ctl
2767{
2768	uint64_t u64;
2769	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s
2770	{
2771#if __BYTE_ORDER == __BIG_ENDIAN
2772	uint64_t reserved_32_63               : 32;
2773	uint64_t cnt                          : 32; /**< Count of received pause packets */
2774#else
2775	uint64_t cnt                          : 32;
2776	uint64_t reserved_32_63               : 32;
2777#endif
2778	} s;
2779	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
2780	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
2781	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
2782	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
2783	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
2784	struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
2785};
2786typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
2787
2788/**
2789 * cvmx_agl_gmx_rx#_stats_pkts_dmac
2790 *
2791 * AGL_GMX_RX_STATS_PKTS_DMAC
2792 *
2793 * Count of all packets received that were dropped by the dmac filter.
2794 * Packets that match the DMAC will be dropped and counted here regardless
2795 * of if they were bad packets.  These packets will never be counted in
2796 * AGL_GMX_RX_STATS_PKTS.
2797 *
2798 * Some packets that were not able to satisify the DECISION_CNT may not
2799 * actually be dropped by Octeon, but they will be counted here as if they
2800 * were dropped.
2801 *
2802 * Notes:
2803 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2804 * - Counters will wrap
2805 * - Not reset when MIX*_CTL[RESET] is set to 1.
2806 */
2807union cvmx_agl_gmx_rxx_stats_pkts_dmac
2808{
2809	uint64_t u64;
2810	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s
2811	{
2812#if __BYTE_ORDER == __BIG_ENDIAN
2813	uint64_t reserved_32_63               : 32;
2814	uint64_t cnt                          : 32; /**< Count of filtered dmac packets */
2815#else
2816	uint64_t cnt                          : 32;
2817	uint64_t reserved_32_63               : 32;
2818#endif
2819	} s;
2820	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
2821	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
2822	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
2823	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
2824	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
2825	struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
2826};
2827typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
2828
2829/**
2830 * cvmx_agl_gmx_rx#_stats_pkts_drp
2831 *
2832 * AGL_GMX_RX_STATS_PKTS_DRP
2833 *
2834 * Count of all packets received that were dropped due to a full receive
2835 * FIFO.  This counts good and bad packets received - all packets dropped by
2836 * the FIFO.  It does not count packets dropped by the dmac or pause packet
2837 * filters.
2838 *
2839 * Notes:
2840 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2841 * - Counters will wrap
2842 * - Not reset when MIX*_CTL[RESET] is set to 1.
2843 */
2844union cvmx_agl_gmx_rxx_stats_pkts_drp
2845{
2846	uint64_t u64;
2847	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s
2848	{
2849#if __BYTE_ORDER == __BIG_ENDIAN
2850	uint64_t reserved_32_63               : 32;
2851	uint64_t cnt                          : 32; /**< Count of dropped packets */
2852#else
2853	uint64_t cnt                          : 32;
2854	uint64_t reserved_32_63               : 32;
2855#endif
2856	} s;
2857	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
2858	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
2859	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
2860	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
2861	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
2862	struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
2863};
2864typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t;
2865
2866/**
2867 * cvmx_agl_gmx_rx#_udd_skp
2868 *
2869 * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
2870 *
2871 *
2872 * Notes:
2873 * (1) The skip bytes are part of the packet and will be sent down the NCB
2874 *     packet interface and will be handled by PKI.
2875 *
2876 * (2) The system can determine if the UDD bytes are included in the FCS check
2877 *     by using the FCSSEL field - if the FCS check is enabled.
2878 *
2879 * (3) Assume that the preamble/sfd is always at the start of the frame - even
2880 *     before UDD bytes.  In most cases, there will be no preamble in these
2881 *     cases since it will be MII to MII communication without a PHY
2882 *     involved.
2883 *
2884 * (4) We can still do address filtering and control packet filtering is the
2885 *     user desires.
2886 *
2887 * (5) UDD_SKP must be 0 in half-duplex operation unless
2888 *     AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear.  If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
2889 *     then UDD_SKP will normally be 8.
2890 *
2891 * (6) In all cases, the UDD bytes will be sent down the packet interface as
2892 *     part of the packet.  The UDD bytes are never stripped from the actual
2893 *     packet.
2894 *
2895 * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
2896 *
2897 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2898 */
2899union cvmx_agl_gmx_rxx_udd_skp
2900{
2901	uint64_t u64;
2902	struct cvmx_agl_gmx_rxx_udd_skp_s
2903	{
2904#if __BYTE_ORDER == __BIG_ENDIAN
2905	uint64_t reserved_9_63                : 55;
2906	uint64_t fcssel                       : 1;  /**< Include the skip bytes in the FCS calculation
2907                                                         0 = all skip bytes are included in FCS
2908                                                         1 = the skip bytes are not included in FCS */
2909	uint64_t reserved_7_7                 : 1;
2910	uint64_t len                          : 7;  /**< Amount of User-defined data before the start of
2911                                                         the L2 data.  Zero means L2 comes first.
2912                                                         Max value is 64. */
2913#else
2914	uint64_t len                          : 7;
2915	uint64_t reserved_7_7                 : 1;
2916	uint64_t fcssel                       : 1;
2917	uint64_t reserved_9_63                : 55;
2918#endif
2919	} s;
2920	struct cvmx_agl_gmx_rxx_udd_skp_s     cn52xx;
2921	struct cvmx_agl_gmx_rxx_udd_skp_s     cn52xxp1;
2922	struct cvmx_agl_gmx_rxx_udd_skp_s     cn56xx;
2923	struct cvmx_agl_gmx_rxx_udd_skp_s     cn56xxp1;
2924	struct cvmx_agl_gmx_rxx_udd_skp_s     cn63xx;
2925	struct cvmx_agl_gmx_rxx_udd_skp_s     cn63xxp1;
2926};
2927typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t;
2928
2929/**
2930 * cvmx_agl_gmx_rx_bp_drop#
2931 *
2932 * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
2933 *
2934 *
2935 * Notes:
2936 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2937 *
2938 */
2939union cvmx_agl_gmx_rx_bp_dropx
2940{
2941	uint64_t u64;
2942	struct cvmx_agl_gmx_rx_bp_dropx_s
2943	{
2944#if __BYTE_ORDER == __BIG_ENDIAN
2945	uint64_t reserved_6_63                : 58;
2946	uint64_t mark                         : 6;  /**< Number of 8B ticks to reserve in the RX FIFO.
2947                                                         When the FIFO exceeds this count, packets will
2948                                                         be dropped and not buffered.
2949                                                         MARK should typically be programmed to 2.
2950                                                         Failure to program correctly can lead to system
2951                                                         instability. */
2952#else
2953	uint64_t mark                         : 6;
2954	uint64_t reserved_6_63                : 58;
2955#endif
2956	} s;
2957	struct cvmx_agl_gmx_rx_bp_dropx_s     cn52xx;
2958	struct cvmx_agl_gmx_rx_bp_dropx_s     cn52xxp1;
2959	struct cvmx_agl_gmx_rx_bp_dropx_s     cn56xx;
2960	struct cvmx_agl_gmx_rx_bp_dropx_s     cn56xxp1;
2961	struct cvmx_agl_gmx_rx_bp_dropx_s     cn63xx;
2962	struct cvmx_agl_gmx_rx_bp_dropx_s     cn63xxp1;
2963};
2964typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t;
2965
2966/**
2967 * cvmx_agl_gmx_rx_bp_off#
2968 *
2969 * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
2970 *
2971 *
2972 * Notes:
2973 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2974 *
2975 */
2976union cvmx_agl_gmx_rx_bp_offx
2977{
2978	uint64_t u64;
2979	struct cvmx_agl_gmx_rx_bp_offx_s
2980	{
2981#if __BYTE_ORDER == __BIG_ENDIAN
2982	uint64_t reserved_6_63                : 58;
2983	uint64_t mark                         : 6;  /**< Water mark (8B ticks) to deassert backpressure */
2984#else
2985	uint64_t mark                         : 6;
2986	uint64_t reserved_6_63                : 58;
2987#endif
2988	} s;
2989	struct cvmx_agl_gmx_rx_bp_offx_s      cn52xx;
2990	struct cvmx_agl_gmx_rx_bp_offx_s      cn52xxp1;
2991	struct cvmx_agl_gmx_rx_bp_offx_s      cn56xx;
2992	struct cvmx_agl_gmx_rx_bp_offx_s      cn56xxp1;
2993	struct cvmx_agl_gmx_rx_bp_offx_s      cn63xx;
2994	struct cvmx_agl_gmx_rx_bp_offx_s      cn63xxp1;
2995};
2996typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t;
2997
2998/**
2999 * cvmx_agl_gmx_rx_bp_on#
3000 *
3001 * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
3002 *
3003 *
3004 * Notes:
3005 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3006 *
3007 */
3008union cvmx_agl_gmx_rx_bp_onx
3009{
3010	uint64_t u64;
3011	struct cvmx_agl_gmx_rx_bp_onx_s
3012	{
3013#if __BYTE_ORDER == __BIG_ENDIAN
3014	uint64_t reserved_9_63                : 55;
3015	uint64_t mark                         : 9;  /**< Hiwater mark (8B ticks) for backpressure. */
3016#else
3017	uint64_t mark                         : 9;
3018	uint64_t reserved_9_63                : 55;
3019#endif
3020	} s;
3021	struct cvmx_agl_gmx_rx_bp_onx_s       cn52xx;
3022	struct cvmx_agl_gmx_rx_bp_onx_s       cn52xxp1;
3023	struct cvmx_agl_gmx_rx_bp_onx_s       cn56xx;
3024	struct cvmx_agl_gmx_rx_bp_onx_s       cn56xxp1;
3025	struct cvmx_agl_gmx_rx_bp_onx_s       cn63xx;
3026	struct cvmx_agl_gmx_rx_bp_onx_s       cn63xxp1;
3027};
3028typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t;
3029
3030/**
3031 * cvmx_agl_gmx_rx_prt_info
3032 *
3033 * AGL_GMX_RX_PRT_INFO = state information for the ports
3034 *
3035 *
3036 * Notes:
3037 * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
3038 * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
3039 */
3040union cvmx_agl_gmx_rx_prt_info
3041{
3042	uint64_t u64;
3043	struct cvmx_agl_gmx_rx_prt_info_s
3044	{
3045#if __BYTE_ORDER == __BIG_ENDIAN
3046	uint64_t reserved_18_63               : 46;
3047	uint64_t drop                         : 2;  /**< Port indication that data was dropped */
3048	uint64_t reserved_2_15                : 14;
3049	uint64_t commit                       : 2;  /**< Port indication that SOP was accepted */
3050#else
3051	uint64_t commit                       : 2;
3052	uint64_t reserved_2_15                : 14;
3053	uint64_t drop                         : 2;
3054	uint64_t reserved_18_63               : 46;
3055#endif
3056	} s;
3057	struct cvmx_agl_gmx_rx_prt_info_s     cn52xx;
3058	struct cvmx_agl_gmx_rx_prt_info_s     cn52xxp1;
3059	struct cvmx_agl_gmx_rx_prt_info_cn56xx
3060	{
3061#if __BYTE_ORDER == __BIG_ENDIAN
3062	uint64_t reserved_17_63               : 47;
3063	uint64_t drop                         : 1;  /**< Port indication that data was dropped */
3064	uint64_t reserved_1_15                : 15;
3065	uint64_t commit                       : 1;  /**< Port indication that SOP was accepted */
3066#else
3067	uint64_t commit                       : 1;
3068	uint64_t reserved_1_15                : 15;
3069	uint64_t drop                         : 1;
3070	uint64_t reserved_17_63               : 47;
3071#endif
3072	} cn56xx;
3073	struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
3074	struct cvmx_agl_gmx_rx_prt_info_s     cn63xx;
3075	struct cvmx_agl_gmx_rx_prt_info_s     cn63xxp1;
3076};
3077typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t;
3078
3079/**
3080 * cvmx_agl_gmx_rx_tx_status
3081 *
3082 * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
3083 *
3084 *
3085 * Notes:
3086 * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
3087 * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
3088 */
3089union cvmx_agl_gmx_rx_tx_status
3090{
3091	uint64_t u64;
3092	struct cvmx_agl_gmx_rx_tx_status_s
3093	{
3094#if __BYTE_ORDER == __BIG_ENDIAN
3095	uint64_t reserved_6_63                : 58;
3096	uint64_t tx                           : 2;  /**< Transmit data since last read */
3097	uint64_t reserved_2_3                 : 2;
3098	uint64_t rx                           : 2;  /**< Receive data since last read */
3099#else
3100	uint64_t rx                           : 2;
3101	uint64_t reserved_2_3                 : 2;
3102	uint64_t tx                           : 2;
3103	uint64_t reserved_6_63                : 58;
3104#endif
3105	} s;
3106	struct cvmx_agl_gmx_rx_tx_status_s    cn52xx;
3107	struct cvmx_agl_gmx_rx_tx_status_s    cn52xxp1;
3108	struct cvmx_agl_gmx_rx_tx_status_cn56xx
3109	{
3110#if __BYTE_ORDER == __BIG_ENDIAN
3111	uint64_t reserved_5_63                : 59;
3112	uint64_t tx                           : 1;  /**< Transmit data since last read */
3113	uint64_t reserved_1_3                 : 3;
3114	uint64_t rx                           : 1;  /**< Receive data since last read */
3115#else
3116	uint64_t rx                           : 1;
3117	uint64_t reserved_1_3                 : 3;
3118	uint64_t tx                           : 1;
3119	uint64_t reserved_5_63                : 59;
3120#endif
3121	} cn56xx;
3122	struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
3123	struct cvmx_agl_gmx_rx_tx_status_s    cn63xx;
3124	struct cvmx_agl_gmx_rx_tx_status_s    cn63xxp1;
3125};
3126typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t;
3127
3128/**
3129 * cvmx_agl_gmx_smac#
3130 *
3131 * AGL_GMX_SMAC = Packet SMAC
3132 *
3133 *
3134 * Notes:
3135 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3136 *
3137 */
3138union cvmx_agl_gmx_smacx
3139{
3140	uint64_t u64;
3141	struct cvmx_agl_gmx_smacx_s
3142	{
3143#if __BYTE_ORDER == __BIG_ENDIAN
3144	uint64_t reserved_48_63               : 16;
3145	uint64_t smac                         : 48; /**< The SMAC field is used for generating and
3146                                                         accepting Control Pause packets */
3147#else
3148	uint64_t smac                         : 48;
3149	uint64_t reserved_48_63               : 16;
3150#endif
3151	} s;
3152	struct cvmx_agl_gmx_smacx_s           cn52xx;
3153	struct cvmx_agl_gmx_smacx_s           cn52xxp1;
3154	struct cvmx_agl_gmx_smacx_s           cn56xx;
3155	struct cvmx_agl_gmx_smacx_s           cn56xxp1;
3156	struct cvmx_agl_gmx_smacx_s           cn63xx;
3157	struct cvmx_agl_gmx_smacx_s           cn63xxp1;
3158};
3159typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t;
3160
3161/**
3162 * cvmx_agl_gmx_stat_bp
3163 *
3164 * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
3165 *
3166 *
3167 * Notes:
3168 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
3169 *
3170 */
3171union cvmx_agl_gmx_stat_bp
3172{
3173	uint64_t u64;
3174	struct cvmx_agl_gmx_stat_bp_s
3175	{
3176#if __BYTE_ORDER == __BIG_ENDIAN
3177	uint64_t reserved_17_63               : 47;
3178	uint64_t bp                           : 1;  /**< Current BP state */
3179	uint64_t cnt                          : 16; /**< Number of cycles that BP has been asserted
3180                                                         Saturating counter */
3181#else
3182	uint64_t cnt                          : 16;
3183	uint64_t bp                           : 1;
3184	uint64_t reserved_17_63               : 47;
3185#endif
3186	} s;
3187	struct cvmx_agl_gmx_stat_bp_s         cn52xx;
3188	struct cvmx_agl_gmx_stat_bp_s         cn52xxp1;
3189	struct cvmx_agl_gmx_stat_bp_s         cn56xx;
3190	struct cvmx_agl_gmx_stat_bp_s         cn56xxp1;
3191	struct cvmx_agl_gmx_stat_bp_s         cn63xx;
3192	struct cvmx_agl_gmx_stat_bp_s         cn63xxp1;
3193};
3194typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t;
3195
3196/**
3197 * cvmx_agl_gmx_tx#_append
3198 *
3199 * AGL_GMX_TX_APPEND = Packet TX Append Control
3200 *
3201 *
3202 * Notes:
3203 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3204 *
3205 */
3206union cvmx_agl_gmx_txx_append
3207{
3208	uint64_t u64;
3209	struct cvmx_agl_gmx_txx_append_s
3210	{
3211#if __BYTE_ORDER == __BIG_ENDIAN
3212	uint64_t reserved_4_63                : 60;
3213	uint64_t force_fcs                    : 1;  /**< Append the Ethernet FCS on each pause packet
3214                                                         when FCS is clear.  Pause packets are normally
3215                                                         padded to 60 bytes.  If
3216                                                         AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
3217                                                         FORCE_FCS will not be used. */
3218	uint64_t fcs                          : 1;  /**< Append the Ethernet FCS on each packet */
3219	uint64_t pad                          : 1;  /**< Append PAD bytes such that min sized */
3220	uint64_t preamble                     : 1;  /**< Prepend the Ethernet preamble on each transfer */
3221#else
3222	uint64_t preamble                     : 1;
3223	uint64_t pad                          : 1;
3224	uint64_t fcs                          : 1;
3225	uint64_t force_fcs                    : 1;
3226	uint64_t reserved_4_63                : 60;
3227#endif
3228	} s;
3229	struct cvmx_agl_gmx_txx_append_s      cn52xx;
3230	struct cvmx_agl_gmx_txx_append_s      cn52xxp1;
3231	struct cvmx_agl_gmx_txx_append_s      cn56xx;
3232	struct cvmx_agl_gmx_txx_append_s      cn56xxp1;
3233	struct cvmx_agl_gmx_txx_append_s      cn63xx;
3234	struct cvmx_agl_gmx_txx_append_s      cn63xxp1;
3235};
3236typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t;
3237
3238/**
3239 * cvmx_agl_gmx_tx#_clk
3240 *
3241 * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register
3242 *
3243 *
3244 * Notes:
3245 * Normal Programming Values:
3246 *  (1) RGMII, 1000Mbs   (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1
3247 *  (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5
3248 *  (3) MII,   10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1
3249 *
3250 * RGMII Example:
3251 *  Given a 125MHz PLL reference clock...
3252 *   CLK_CNT ==  1 ==> 125.0MHz TXC clock period (8ns* 1)
3253 *   CLK_CNT ==  5 ==>  25.0MHz TXC clock period (8ns* 5)
3254 *   CLK_CNT == 50 ==>   2.5MHz TXC clock period (8ns*50)
3255 *
3256 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3257 */
3258union cvmx_agl_gmx_txx_clk
3259{
3260	uint64_t u64;
3261	struct cvmx_agl_gmx_txx_clk_s
3262	{
3263#if __BYTE_ORDER == __BIG_ENDIAN
3264	uint64_t reserved_6_63                : 58;
3265	uint64_t clk_cnt                      : 6;  /**< Controls the RGMII TXC frequency                   |             NS
3266                                                         TXC(period) =
3267                                                          rgm_ref_clk(period)*CLK_CNT */
3268#else
3269	uint64_t clk_cnt                      : 6;
3270	uint64_t reserved_6_63                : 58;
3271#endif
3272	} s;
3273	struct cvmx_agl_gmx_txx_clk_s         cn63xx;
3274	struct cvmx_agl_gmx_txx_clk_s         cn63xxp1;
3275};
3276typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t;
3277
3278/**
3279 * cvmx_agl_gmx_tx#_ctl
3280 *
3281 * AGL_GMX_TX_CTL = TX Control register
3282 *
3283 *
3284 * Notes:
3285 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3286 *
3287 */
3288union cvmx_agl_gmx_txx_ctl
3289{
3290	uint64_t u64;
3291	struct cvmx_agl_gmx_txx_ctl_s
3292	{
3293#if __BYTE_ORDER == __BIG_ENDIAN
3294	uint64_t reserved_2_63                : 62;
3295	uint64_t xsdef_en                     : 1;  /**< Enables the excessive deferral check for stats
3296                                                         and interrupts */
3297	uint64_t xscol_en                     : 1;  /**< Enables the excessive collision check for stats
3298                                                         and interrupts */
3299#else
3300	uint64_t xscol_en                     : 1;
3301	uint64_t xsdef_en                     : 1;
3302	uint64_t reserved_2_63                : 62;
3303#endif
3304	} s;
3305	struct cvmx_agl_gmx_txx_ctl_s         cn52xx;
3306	struct cvmx_agl_gmx_txx_ctl_s         cn52xxp1;
3307	struct cvmx_agl_gmx_txx_ctl_s         cn56xx;
3308	struct cvmx_agl_gmx_txx_ctl_s         cn56xxp1;
3309	struct cvmx_agl_gmx_txx_ctl_s         cn63xx;
3310	struct cvmx_agl_gmx_txx_ctl_s         cn63xxp1;
3311};
3312typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t;
3313
3314/**
3315 * cvmx_agl_gmx_tx#_min_pkt
3316 *
3317 * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
3318 *
3319 *
3320 * Notes:
3321 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3322 *
3323 */
3324union cvmx_agl_gmx_txx_min_pkt
3325{
3326	uint64_t u64;
3327	struct cvmx_agl_gmx_txx_min_pkt_s
3328	{
3329#if __BYTE_ORDER == __BIG_ENDIAN
3330	uint64_t reserved_8_63                : 56;
3331	uint64_t min_size                     : 8;  /**< Min frame in bytes before the FCS is applied
3332                                                         Padding is only appened when
3333                                                         AGL_GMX_TX_APPEND[PAD] for the coresponding packet
3334                                                         port is set. Packets will be padded to
3335                                                         MIN_SIZE+1 The reset value will pad to 60 bytes. */
3336#else
3337	uint64_t min_size                     : 8;
3338	uint64_t reserved_8_63                : 56;
3339#endif
3340	} s;
3341	struct cvmx_agl_gmx_txx_min_pkt_s     cn52xx;
3342	struct cvmx_agl_gmx_txx_min_pkt_s     cn52xxp1;
3343	struct cvmx_agl_gmx_txx_min_pkt_s     cn56xx;
3344	struct cvmx_agl_gmx_txx_min_pkt_s     cn56xxp1;
3345	struct cvmx_agl_gmx_txx_min_pkt_s     cn63xx;
3346	struct cvmx_agl_gmx_txx_min_pkt_s     cn63xxp1;
3347};
3348typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t;
3349
3350/**
3351 * cvmx_agl_gmx_tx#_pause_pkt_interval
3352 *
3353 * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
3354 *
3355 *
3356 * Notes:
3357 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3358 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3359 * designer.  It is suggested that TIME be much greater than INTERVAL and
3360 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
3361 * count and then when the backpressure condition is lifted, a PAUSE packet
3362 * with TIME==0 will be sent indicating that Octane is ready for additional
3363 * data.
3364 *
3365 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3366 * suggested that TIME and INTERVAL are programmed such that they satisify the
3367 * following rule...
3368 *
3369 *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3370 *
3371 * where largest_pkt_size is that largest packet that the system can send
3372 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3373 * of the PAUSE packet (normally 64B).
3374 *
3375 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3376 */
3377union cvmx_agl_gmx_txx_pause_pkt_interval
3378{
3379	uint64_t u64;
3380	struct cvmx_agl_gmx_txx_pause_pkt_interval_s
3381	{
3382#if __BYTE_ORDER == __BIG_ENDIAN
3383	uint64_t reserved_16_63               : 48;
3384	uint64_t interval                     : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
3385                                                         bit-times.
3386                                                         Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
3387                                                         INTERVAL=0, will only send a single PAUSE packet
3388                                                         for each backpressure event */
3389#else
3390	uint64_t interval                     : 16;
3391	uint64_t reserved_16_63               : 48;
3392#endif
3393	} s;
3394	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
3395	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
3396	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
3397	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
3398	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
3399	struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
3400};
3401typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t;
3402
3403/**
3404 * cvmx_agl_gmx_tx#_pause_pkt_time
3405 *
3406 * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
3407 *
3408 *
3409 * Notes:
3410 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3411 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3412 * designer.  It is suggested that TIME be much greater than INTERVAL and
3413 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
3414 * count and then when the backpressure condition is lifted, a PAUSE packet
3415 * with TIME==0 will be sent indicating that Octane is ready for additional
3416 * data.
3417 *
3418 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3419 * suggested that TIME and INTERVAL are programmed such that they satisify the
3420 * following rule...
3421 *
3422 *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3423 *
3424 * where largest_pkt_size is that largest packet that the system can send
3425 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3426 * of the PAUSE packet (normally 64B).
3427 *
3428 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3429 */
3430union cvmx_agl_gmx_txx_pause_pkt_time
3431{
3432	uint64_t u64;
3433	struct cvmx_agl_gmx_txx_pause_pkt_time_s
3434	{
3435#if __BYTE_ORDER == __BIG_ENDIAN
3436	uint64_t reserved_16_63               : 48;
3437	uint64_t time                         : 16; /**< The pause_time field placed is outbnd pause pkts
3438                                                         pause_time is in 512 bit-times
3439                                                         Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
3440#else
3441	uint64_t time                         : 16;
3442	uint64_t reserved_16_63               : 48;
3443#endif
3444	} s;
3445	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
3446	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
3447	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
3448	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
3449	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
3450	struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
3451};
3452typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t;
3453
3454/**
3455 * cvmx_agl_gmx_tx#_pause_togo
3456 *
3457 * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
3458 *
3459 *
3460 * Notes:
3461 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3462 *
3463 */
3464union cvmx_agl_gmx_txx_pause_togo
3465{
3466	uint64_t u64;
3467	struct cvmx_agl_gmx_txx_pause_togo_s
3468	{
3469#if __BYTE_ORDER == __BIG_ENDIAN
3470	uint64_t reserved_16_63               : 48;
3471	uint64_t time                         : 16; /**< Amount of time remaining to backpressure */
3472#else
3473	uint64_t time                         : 16;
3474	uint64_t reserved_16_63               : 48;
3475#endif
3476	} s;
3477	struct cvmx_agl_gmx_txx_pause_togo_s  cn52xx;
3478	struct cvmx_agl_gmx_txx_pause_togo_s  cn52xxp1;
3479	struct cvmx_agl_gmx_txx_pause_togo_s  cn56xx;
3480	struct cvmx_agl_gmx_txx_pause_togo_s  cn56xxp1;
3481	struct cvmx_agl_gmx_txx_pause_togo_s  cn63xx;
3482	struct cvmx_agl_gmx_txx_pause_togo_s  cn63xxp1;
3483};
3484typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t;
3485
3486/**
3487 * cvmx_agl_gmx_tx#_pause_zero
3488 *
3489 * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
3490 *
3491 *
3492 * Notes:
3493 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3494 *
3495 */
3496union cvmx_agl_gmx_txx_pause_zero
3497{
3498	uint64_t u64;
3499	struct cvmx_agl_gmx_txx_pause_zero_s
3500	{
3501#if __BYTE_ORDER == __BIG_ENDIAN
3502	uint64_t reserved_1_63                : 63;
3503	uint64_t send                         : 1;  /**< When backpressure condition clear, send PAUSE
3504                                                         packet with pause_time of zero to enable the
3505                                                         channel */
3506#else
3507	uint64_t send                         : 1;
3508	uint64_t reserved_1_63                : 63;
3509#endif
3510	} s;
3511	struct cvmx_agl_gmx_txx_pause_zero_s  cn52xx;
3512	struct cvmx_agl_gmx_txx_pause_zero_s  cn52xxp1;
3513	struct cvmx_agl_gmx_txx_pause_zero_s  cn56xx;
3514	struct cvmx_agl_gmx_txx_pause_zero_s  cn56xxp1;
3515	struct cvmx_agl_gmx_txx_pause_zero_s  cn63xx;
3516	struct cvmx_agl_gmx_txx_pause_zero_s  cn63xxp1;
3517};
3518typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t;
3519
3520/**
3521 * cvmx_agl_gmx_tx#_soft_pause
3522 *
3523 * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause
3524 *
3525 *
3526 * Notes:
3527 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3528 *
3529 */
3530union cvmx_agl_gmx_txx_soft_pause
3531{
3532	uint64_t u64;
3533	struct cvmx_agl_gmx_txx_soft_pause_s
3534	{
3535#if __BYTE_ORDER == __BIG_ENDIAN
3536	uint64_t reserved_16_63               : 48;
3537	uint64_t time                         : 16; /**< Back off the TX bus for (TIME*512) bit-times
3538                                                         for full-duplex operation only */
3539#else
3540	uint64_t time                         : 16;
3541	uint64_t reserved_16_63               : 48;
3542#endif
3543	} s;
3544	struct cvmx_agl_gmx_txx_soft_pause_s  cn52xx;
3545	struct cvmx_agl_gmx_txx_soft_pause_s  cn52xxp1;
3546	struct cvmx_agl_gmx_txx_soft_pause_s  cn56xx;
3547	struct cvmx_agl_gmx_txx_soft_pause_s  cn56xxp1;
3548	struct cvmx_agl_gmx_txx_soft_pause_s  cn63xx;
3549	struct cvmx_agl_gmx_txx_soft_pause_s  cn63xxp1;
3550};
3551typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t;
3552
3553/**
3554 * cvmx_agl_gmx_tx#_stat0
3555 *
3556 * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
3557 *
3558 *
3559 * Notes:
3560 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3561 * - Counters will wrap
3562 * - Not reset when MIX*_CTL[RESET] is set to 1.
3563 */
3564union cvmx_agl_gmx_txx_stat0
3565{
3566	uint64_t u64;
3567	struct cvmx_agl_gmx_txx_stat0_s
3568	{
3569#if __BYTE_ORDER == __BIG_ENDIAN
3570	uint64_t xsdef                        : 32; /**< Number of packets dropped (never successfully
3571                                                         sent) due to excessive deferal */
3572	uint64_t xscol                        : 32; /**< Number of packets dropped (never successfully
3573                                                         sent) due to excessive collision.  Defined by
3574                                                         AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3575#else
3576	uint64_t xscol                        : 32;
3577	uint64_t xsdef                        : 32;
3578#endif
3579	} s;
3580	struct cvmx_agl_gmx_txx_stat0_s       cn52xx;
3581	struct cvmx_agl_gmx_txx_stat0_s       cn52xxp1;
3582	struct cvmx_agl_gmx_txx_stat0_s       cn56xx;
3583	struct cvmx_agl_gmx_txx_stat0_s       cn56xxp1;
3584	struct cvmx_agl_gmx_txx_stat0_s       cn63xx;
3585	struct cvmx_agl_gmx_txx_stat0_s       cn63xxp1;
3586};
3587typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t;
3588
3589/**
3590 * cvmx_agl_gmx_tx#_stat1
3591 *
3592 * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL  / AGL_GMX_TX_STATS_MCOL
3593 *
3594 *
3595 * Notes:
3596 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3597 * - Counters will wrap
3598 * - Not reset when MIX*_CTL[RESET] is set to 1.
3599 */
3600union cvmx_agl_gmx_txx_stat1
3601{
3602	uint64_t u64;
3603	struct cvmx_agl_gmx_txx_stat1_s
3604	{
3605#if __BYTE_ORDER == __BIG_ENDIAN
3606	uint64_t scol                         : 32; /**< Number of packets sent with a single collision */
3607	uint64_t mcol                         : 32; /**< Number of packets sent with multiple collisions
3608                                                         but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3609#else
3610	uint64_t mcol                         : 32;
3611	uint64_t scol                         : 32;
3612#endif
3613	} s;
3614	struct cvmx_agl_gmx_txx_stat1_s       cn52xx;
3615	struct cvmx_agl_gmx_txx_stat1_s       cn52xxp1;
3616	struct cvmx_agl_gmx_txx_stat1_s       cn56xx;
3617	struct cvmx_agl_gmx_txx_stat1_s       cn56xxp1;
3618	struct cvmx_agl_gmx_txx_stat1_s       cn63xx;
3619	struct cvmx_agl_gmx_txx_stat1_s       cn63xxp1;
3620};
3621typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t;
3622
3623/**
3624 * cvmx_agl_gmx_tx#_stat2
3625 *
3626 * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
3627 *
3628 *
3629 * Notes:
3630 * - Octect counts are the sum of all data transmitted on the wire including
3631 *   packet data, pad bytes, fcs bytes, pause bytes, and jam bytes.  The octect
3632 *   counts do not include PREAMBLE byte or EXTEND cycles.
3633 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3634 * - Counters will wrap
3635 * - Not reset when MIX*_CTL[RESET] is set to 1.
3636 */
3637union cvmx_agl_gmx_txx_stat2
3638{
3639	uint64_t u64;
3640	struct cvmx_agl_gmx_txx_stat2_s
3641	{
3642#if __BYTE_ORDER == __BIG_ENDIAN
3643	uint64_t reserved_48_63               : 16;
3644	uint64_t octs                         : 48; /**< Number of total octets sent on the interface.
3645                                                         Does not count octets from frames that were
3646                                                         truncated due to collisions in halfdup mode. */
3647#else
3648	uint64_t octs                         : 48;
3649	uint64_t reserved_48_63               : 16;
3650#endif
3651	} s;
3652	struct cvmx_agl_gmx_txx_stat2_s       cn52xx;
3653	struct cvmx_agl_gmx_txx_stat2_s       cn52xxp1;
3654	struct cvmx_agl_gmx_txx_stat2_s       cn56xx;
3655	struct cvmx_agl_gmx_txx_stat2_s       cn56xxp1;
3656	struct cvmx_agl_gmx_txx_stat2_s       cn63xx;
3657	struct cvmx_agl_gmx_txx_stat2_s       cn63xxp1;
3658};
3659typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t;
3660
3661/**
3662 * cvmx_agl_gmx_tx#_stat3
3663 *
3664 * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
3665 *
3666 *
3667 * Notes:
3668 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3669 * - Counters will wrap
3670 * - Not reset when MIX*_CTL[RESET] is set to 1.
3671 */
3672union cvmx_agl_gmx_txx_stat3
3673{
3674	uint64_t u64;
3675	struct cvmx_agl_gmx_txx_stat3_s
3676	{
3677#if __BYTE_ORDER == __BIG_ENDIAN
3678	uint64_t reserved_32_63               : 32;
3679	uint64_t pkts                         : 32; /**< Number of total frames sent on the interface.
3680                                                         Does not count frames that were truncated due to
3681                                                          collisions in halfdup mode. */
3682#else
3683	uint64_t pkts                         : 32;
3684	uint64_t reserved_32_63               : 32;
3685#endif
3686	} s;
3687	struct cvmx_agl_gmx_txx_stat3_s       cn52xx;
3688	struct cvmx_agl_gmx_txx_stat3_s       cn52xxp1;
3689	struct cvmx_agl_gmx_txx_stat3_s       cn56xx;
3690	struct cvmx_agl_gmx_txx_stat3_s       cn56xxp1;
3691	struct cvmx_agl_gmx_txx_stat3_s       cn63xx;
3692	struct cvmx_agl_gmx_txx_stat3_s       cn63xxp1;
3693};
3694typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t;
3695
3696/**
3697 * cvmx_agl_gmx_tx#_stat4
3698 *
3699 * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
3700 *
3701 *
3702 * Notes:
3703 * - Packet length is the sum of all data transmitted on the wire for the given
3704 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3705 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
3706 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3707 * - Counters will wrap
3708 * - Not reset when MIX*_CTL[RESET] is set to 1.
3709 */
3710union cvmx_agl_gmx_txx_stat4
3711{
3712	uint64_t u64;
3713	struct cvmx_agl_gmx_txx_stat4_s
3714	{
3715#if __BYTE_ORDER == __BIG_ENDIAN
3716	uint64_t hist1                        : 32; /**< Number of packets sent with an octet count of 64. */
3717	uint64_t hist0                        : 32; /**< Number of packets sent with an octet count
3718                                                         of < 64. */
3719#else
3720	uint64_t hist0                        : 32;
3721	uint64_t hist1                        : 32;
3722#endif
3723	} s;
3724	struct cvmx_agl_gmx_txx_stat4_s       cn52xx;
3725	struct cvmx_agl_gmx_txx_stat4_s       cn52xxp1;
3726	struct cvmx_agl_gmx_txx_stat4_s       cn56xx;
3727	struct cvmx_agl_gmx_txx_stat4_s       cn56xxp1;
3728	struct cvmx_agl_gmx_txx_stat4_s       cn63xx;
3729	struct cvmx_agl_gmx_txx_stat4_s       cn63xxp1;
3730};
3731typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t;
3732
3733/**
3734 * cvmx_agl_gmx_tx#_stat5
3735 *
3736 * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
3737 *
3738 *
3739 * Notes:
3740 * - Packet length is the sum of all data transmitted on the wire for the given
3741 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3742 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
3743 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3744 * - Counters will wrap
3745 * - Not reset when MIX*_CTL[RESET] is set to 1.
3746 */
3747union cvmx_agl_gmx_txx_stat5
3748{
3749	uint64_t u64;
3750	struct cvmx_agl_gmx_txx_stat5_s
3751	{
3752#if __BYTE_ORDER == __BIG_ENDIAN
3753	uint64_t hist3                        : 32; /**< Number of packets sent with an octet count of
3754                                                         128 - 255. */
3755	uint64_t hist2                        : 32; /**< Number of packets sent with an octet count of
3756                                                         65 - 127. */
3757#else
3758	uint64_t hist2                        : 32;
3759	uint64_t hist3                        : 32;
3760#endif
3761	} s;
3762	struct cvmx_agl_gmx_txx_stat5_s       cn52xx;
3763	struct cvmx_agl_gmx_txx_stat5_s       cn52xxp1;
3764	struct cvmx_agl_gmx_txx_stat5_s       cn56xx;
3765	struct cvmx_agl_gmx_txx_stat5_s       cn56xxp1;
3766	struct cvmx_agl_gmx_txx_stat5_s       cn63xx;
3767	struct cvmx_agl_gmx_txx_stat5_s       cn63xxp1;
3768};
3769typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t;
3770
3771/**
3772 * cvmx_agl_gmx_tx#_stat6
3773 *
3774 * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
3775 *
3776 *
3777 * Notes:
3778 * - Packet length is the sum of all data transmitted on the wire for the given
3779 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3780 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
3781 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3782 * - Counters will wrap
3783 * - Not reset when MIX*_CTL[RESET] is set to 1.
3784 */
3785union cvmx_agl_gmx_txx_stat6
3786{
3787	uint64_t u64;
3788	struct cvmx_agl_gmx_txx_stat6_s
3789	{
3790#if __BYTE_ORDER == __BIG_ENDIAN
3791	uint64_t hist5                        : 32; /**< Number of packets sent with an octet count of
3792                                                         512 - 1023. */
3793	uint64_t hist4                        : 32; /**< Number of packets sent with an octet count of
3794                                                         256 - 511. */
3795#else
3796	uint64_t hist4                        : 32;
3797	uint64_t hist5                        : 32;
3798#endif
3799	} s;
3800	struct cvmx_agl_gmx_txx_stat6_s       cn52xx;
3801	struct cvmx_agl_gmx_txx_stat6_s       cn52xxp1;
3802	struct cvmx_agl_gmx_txx_stat6_s       cn56xx;
3803	struct cvmx_agl_gmx_txx_stat6_s       cn56xxp1;
3804	struct cvmx_agl_gmx_txx_stat6_s       cn63xx;
3805	struct cvmx_agl_gmx_txx_stat6_s       cn63xxp1;
3806};
3807typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t;
3808
3809/**
3810 * cvmx_agl_gmx_tx#_stat7
3811 *
3812 * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
3813 *
3814 *
3815 * Notes:
3816 * - Packet length is the sum of all data transmitted on the wire for the given
3817 *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3818 *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
3819 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3820 * - Counters will wrap
3821 * - Not reset when MIX*_CTL[RESET] is set to 1.
3822 */
3823union cvmx_agl_gmx_txx_stat7
3824{
3825	uint64_t u64;
3826	struct cvmx_agl_gmx_txx_stat7_s
3827	{
3828#if __BYTE_ORDER == __BIG_ENDIAN
3829	uint64_t hist7                        : 32; /**< Number of packets sent with an octet count
3830                                                         of > 1518. */
3831	uint64_t hist6                        : 32; /**< Number of packets sent with an octet count of
3832                                                         1024 - 1518. */
3833#else
3834	uint64_t hist6                        : 32;
3835	uint64_t hist7                        : 32;
3836#endif
3837	} s;
3838	struct cvmx_agl_gmx_txx_stat7_s       cn52xx;
3839	struct cvmx_agl_gmx_txx_stat7_s       cn52xxp1;
3840	struct cvmx_agl_gmx_txx_stat7_s       cn56xx;
3841	struct cvmx_agl_gmx_txx_stat7_s       cn56xxp1;
3842	struct cvmx_agl_gmx_txx_stat7_s       cn63xx;
3843	struct cvmx_agl_gmx_txx_stat7_s       cn63xxp1;
3844};
3845typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t;
3846
3847/**
3848 * cvmx_agl_gmx_tx#_stat8
3849 *
3850 * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST  / AGL_GMX_TX_STATS_BCST
3851 *
3852 *
3853 * Notes:
3854 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3855 * - Counters will wrap
3856 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
3857 *   packet.  GMX assumes that the DMAC lies in the first 6 bytes of the packet
3858 *   as per the 802.3 frame definition.  If the system requires additional data
3859 *   before the L2 header, then the MCST and BCST counters may not reflect
3860 *   reality and should be ignored by software.
3861 * - Not reset when MIX*_CTL[RESET] is set to 1.
3862 */
3863union cvmx_agl_gmx_txx_stat8
3864{
3865	uint64_t u64;
3866	struct cvmx_agl_gmx_txx_stat8_s
3867	{
3868#if __BYTE_ORDER == __BIG_ENDIAN
3869	uint64_t mcst                         : 32; /**< Number of packets sent to multicast DMAC.
3870                                                         Does not include BCST packets. */
3871	uint64_t bcst                         : 32; /**< Number of packets sent to broadcast DMAC.
3872                                                         Does not include MCST packets. */
3873#else
3874	uint64_t bcst                         : 32;
3875	uint64_t mcst                         : 32;
3876#endif
3877	} s;
3878	struct cvmx_agl_gmx_txx_stat8_s       cn52xx;
3879	struct cvmx_agl_gmx_txx_stat8_s       cn52xxp1;
3880	struct cvmx_agl_gmx_txx_stat8_s       cn56xx;
3881	struct cvmx_agl_gmx_txx_stat8_s       cn56xxp1;
3882	struct cvmx_agl_gmx_txx_stat8_s       cn63xx;
3883	struct cvmx_agl_gmx_txx_stat8_s       cn63xxp1;
3884};
3885typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t;
3886
3887/**
3888 * cvmx_agl_gmx_tx#_stat9
3889 *
3890 * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
3891 *
3892 *
3893 * Notes:
3894 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3895 * - Counters will wrap
3896 * - Not reset when MIX*_CTL[RESET] is set to 1.
3897 */
3898union cvmx_agl_gmx_txx_stat9
3899{
3900	uint64_t u64;
3901	struct cvmx_agl_gmx_txx_stat9_s
3902	{
3903#if __BYTE_ORDER == __BIG_ENDIAN
3904	uint64_t undflw                       : 32; /**< Number of underflow packets */
3905	uint64_t ctl                          : 32; /**< Number of Control packets (PAUSE flow control)
3906                                                         generated by GMX.  It does not include control
3907                                                         packets forwarded or generated by the PP's. */
3908#else
3909	uint64_t ctl                          : 32;
3910	uint64_t undflw                       : 32;
3911#endif
3912	} s;
3913	struct cvmx_agl_gmx_txx_stat9_s       cn52xx;
3914	struct cvmx_agl_gmx_txx_stat9_s       cn52xxp1;
3915	struct cvmx_agl_gmx_txx_stat9_s       cn56xx;
3916	struct cvmx_agl_gmx_txx_stat9_s       cn56xxp1;
3917	struct cvmx_agl_gmx_txx_stat9_s       cn63xx;
3918	struct cvmx_agl_gmx_txx_stat9_s       cn63xxp1;
3919};
3920typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t;
3921
3922/**
3923 * cvmx_agl_gmx_tx#_stats_ctl
3924 *
3925 * AGL_GMX_TX_STATS_CTL = TX Stats Control register
3926 *
3927 *
3928 * Notes:
3929 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3930 *
3931 */
3932union cvmx_agl_gmx_txx_stats_ctl
3933{
3934	uint64_t u64;
3935	struct cvmx_agl_gmx_txx_stats_ctl_s
3936	{
3937#if __BYTE_ORDER == __BIG_ENDIAN
3938	uint64_t reserved_1_63                : 63;
3939	uint64_t rd_clr                       : 1;  /**< Stats registers will clear on reads */
3940#else
3941	uint64_t rd_clr                       : 1;
3942	uint64_t reserved_1_63                : 63;
3943#endif
3944	} s;
3945	struct cvmx_agl_gmx_txx_stats_ctl_s   cn52xx;
3946	struct cvmx_agl_gmx_txx_stats_ctl_s   cn52xxp1;
3947	struct cvmx_agl_gmx_txx_stats_ctl_s   cn56xx;
3948	struct cvmx_agl_gmx_txx_stats_ctl_s   cn56xxp1;
3949	struct cvmx_agl_gmx_txx_stats_ctl_s   cn63xx;
3950	struct cvmx_agl_gmx_txx_stats_ctl_s   cn63xxp1;
3951};
3952typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t;
3953
3954/**
3955 * cvmx_agl_gmx_tx#_thresh
3956 *
3957 * AGL_GMX_TX_THRESH = Packet TX Threshold
3958 *
3959 *
3960 * Notes:
3961 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3962 *
3963 */
3964union cvmx_agl_gmx_txx_thresh
3965{
3966	uint64_t u64;
3967	struct cvmx_agl_gmx_txx_thresh_s
3968	{
3969#if __BYTE_ORDER == __BIG_ENDIAN
3970	uint64_t reserved_6_63                : 58;
3971	uint64_t cnt                          : 6;  /**< Number of 16B ticks to accumulate in the TX FIFO
3972                                                         before sending on the packet interface
3973                                                         This register should be large enough to prevent
3974                                                         underflow on the packet interface and must never
3975                                                         be set below 4.  This register cannot exceed the
3976                                                         the TX FIFO depth which is 128, 8B entries. */
3977#else
3978	uint64_t cnt                          : 6;
3979	uint64_t reserved_6_63                : 58;
3980#endif
3981	} s;
3982	struct cvmx_agl_gmx_txx_thresh_s      cn52xx;
3983	struct cvmx_agl_gmx_txx_thresh_s      cn52xxp1;
3984	struct cvmx_agl_gmx_txx_thresh_s      cn56xx;
3985	struct cvmx_agl_gmx_txx_thresh_s      cn56xxp1;
3986	struct cvmx_agl_gmx_txx_thresh_s      cn63xx;
3987	struct cvmx_agl_gmx_txx_thresh_s      cn63xxp1;
3988};
3989typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t;
3990
3991/**
3992 * cvmx_agl_gmx_tx_bp
3993 *
3994 * AGL_GMX_TX_BP = Packet TX BackPressure Register
3995 *
3996 *
3997 * Notes:
3998 * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
3999 * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
4000 */
4001union cvmx_agl_gmx_tx_bp
4002{
4003	uint64_t u64;
4004	struct cvmx_agl_gmx_tx_bp_s
4005	{
4006#if __BYTE_ORDER == __BIG_ENDIAN
4007	uint64_t reserved_2_63                : 62;
4008	uint64_t bp                           : 2;  /**< Port BackPressure status
4009                                                         0=Port is available
4010                                                         1=Port should be back pressured */
4011#else
4012	uint64_t bp                           : 2;
4013	uint64_t reserved_2_63                : 62;
4014#endif
4015	} s;
4016	struct cvmx_agl_gmx_tx_bp_s           cn52xx;
4017	struct cvmx_agl_gmx_tx_bp_s           cn52xxp1;
4018	struct cvmx_agl_gmx_tx_bp_cn56xx
4019	{
4020#if __BYTE_ORDER == __BIG_ENDIAN
4021	uint64_t reserved_1_63                : 63;
4022	uint64_t bp                           : 1;  /**< Port BackPressure status
4023                                                         0=Port is available
4024                                                         1=Port should be back pressured */
4025#else
4026	uint64_t bp                           : 1;
4027	uint64_t reserved_1_63                : 63;
4028#endif
4029	} cn56xx;
4030	struct cvmx_agl_gmx_tx_bp_cn56xx      cn56xxp1;
4031	struct cvmx_agl_gmx_tx_bp_s           cn63xx;
4032	struct cvmx_agl_gmx_tx_bp_s           cn63xxp1;
4033};
4034typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t;
4035
4036/**
4037 * cvmx_agl_gmx_tx_col_attempt
4038 *
4039 * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
4040 *
4041 *
4042 * Notes:
4043 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4044 *
4045 */
4046union cvmx_agl_gmx_tx_col_attempt
4047{
4048	uint64_t u64;
4049	struct cvmx_agl_gmx_tx_col_attempt_s
4050	{
4051#if __BYTE_ORDER == __BIG_ENDIAN
4052	uint64_t reserved_5_63                : 59;
4053	uint64_t limit                        : 5;  /**< Collision Attempts */
4054#else
4055	uint64_t limit                        : 5;
4056	uint64_t reserved_5_63                : 59;
4057#endif
4058	} s;
4059	struct cvmx_agl_gmx_tx_col_attempt_s  cn52xx;
4060	struct cvmx_agl_gmx_tx_col_attempt_s  cn52xxp1;
4061	struct cvmx_agl_gmx_tx_col_attempt_s  cn56xx;
4062	struct cvmx_agl_gmx_tx_col_attempt_s  cn56xxp1;
4063	struct cvmx_agl_gmx_tx_col_attempt_s  cn63xx;
4064	struct cvmx_agl_gmx_tx_col_attempt_s  cn63xxp1;
4065};
4066typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t;
4067
4068/**
4069 * cvmx_agl_gmx_tx_ifg
4070 *
4071 * Common
4072 *
4073 *
4074 * AGL_GMX_TX_IFG = Packet TX Interframe Gap
4075 *
4076 * Notes:
4077 * Notes:
4078 * * Programming IFG1 and IFG2.
4079 *
4080 *   For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4081 *   be in the range of 1-8, IFG2 must be in the range of 4-12, and the
4082 *   IFG1+IFG2 sum must be 12.
4083 *
4084 *   For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4085 *   be in the range of 1-11, IFG2 must be in the range of 1-11, and the
4086 *   IFG1+IFG2 sum must be 12.
4087 *
4088 *   For all other systems, IFG1 and IFG2 can be any value in the range of
4089 *   1-15.  Allowing for a total possible IFG sum of 2-30.
4090 *
4091 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4092 */
4093union cvmx_agl_gmx_tx_ifg
4094{
4095	uint64_t u64;
4096	struct cvmx_agl_gmx_tx_ifg_s
4097	{
4098#if __BYTE_ORDER == __BIG_ENDIAN
4099	uint64_t reserved_8_63                : 56;
4100	uint64_t ifg2                         : 4;  /**< 1/3 of the interframe gap timing
4101                                                         If CRS is detected during IFG2, then the
4102                                                         interFrameSpacing timer is not reset and a frame
4103                                                         is transmited once the timer expires. */
4104	uint64_t ifg1                         : 4;  /**< 2/3 of the interframe gap timing
4105                                                         If CRS is detected during IFG1, then the
4106                                                         interFrameSpacing timer is reset and a frame is
4107                                                         not transmited. */
4108#else
4109	uint64_t ifg1                         : 4;
4110	uint64_t ifg2                         : 4;
4111	uint64_t reserved_8_63                : 56;
4112#endif
4113	} s;
4114	struct cvmx_agl_gmx_tx_ifg_s          cn52xx;
4115	struct cvmx_agl_gmx_tx_ifg_s          cn52xxp1;
4116	struct cvmx_agl_gmx_tx_ifg_s          cn56xx;
4117	struct cvmx_agl_gmx_tx_ifg_s          cn56xxp1;
4118	struct cvmx_agl_gmx_tx_ifg_s          cn63xx;
4119	struct cvmx_agl_gmx_tx_ifg_s          cn63xxp1;
4120};
4121typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t;
4122
4123/**
4124 * cvmx_agl_gmx_tx_int_en
4125 *
4126 * AGL_GMX_TX_INT_EN = Interrupt Enable
4127 *
4128 *
4129 * Notes:
4130 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4131 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4132 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4133 */
4134union cvmx_agl_gmx_tx_int_en
4135{
4136	uint64_t u64;
4137	struct cvmx_agl_gmx_tx_int_en_s
4138	{
4139#if __BYTE_ORDER == __BIG_ENDIAN
4140	uint64_t reserved_22_63               : 42;
4141	uint64_t ptp_lost                     : 2;  /**< A packet with a PTP request was not able to be
4142                                                         sent due to XSCOL */
4143	uint64_t reserved_18_19               : 2;
4144	uint64_t late_col                     : 2;  /**< TX Late Collision */
4145	uint64_t reserved_14_15               : 2;
4146	uint64_t xsdef                        : 2;  /**< TX Excessive deferral (halfdup mode only) */
4147	uint64_t reserved_10_11               : 2;
4148	uint64_t xscol                        : 2;  /**< TX Excessive collisions (halfdup mode only) */
4149	uint64_t reserved_4_7                 : 4;
4150	uint64_t undflw                       : 2;  /**< TX Underflow */
4151	uint64_t reserved_1_1                 : 1;
4152	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4153#else
4154	uint64_t pko_nxa                      : 1;
4155	uint64_t reserved_1_1                 : 1;
4156	uint64_t undflw                       : 2;
4157	uint64_t reserved_4_7                 : 4;
4158	uint64_t xscol                        : 2;
4159	uint64_t reserved_10_11               : 2;
4160	uint64_t xsdef                        : 2;
4161	uint64_t reserved_14_15               : 2;
4162	uint64_t late_col                     : 2;
4163	uint64_t reserved_18_19               : 2;
4164	uint64_t ptp_lost                     : 2;
4165	uint64_t reserved_22_63               : 42;
4166#endif
4167	} s;
4168	struct cvmx_agl_gmx_tx_int_en_cn52xx
4169	{
4170#if __BYTE_ORDER == __BIG_ENDIAN
4171	uint64_t reserved_18_63               : 46;
4172	uint64_t late_col                     : 2;  /**< TX Late Collision */
4173	uint64_t reserved_14_15               : 2;
4174	uint64_t xsdef                        : 2;  /**< TX Excessive deferral (MII/halfdup mode only) */
4175	uint64_t reserved_10_11               : 2;
4176	uint64_t xscol                        : 2;  /**< TX Excessive collisions (MII/halfdup mode only) */
4177	uint64_t reserved_4_7                 : 4;
4178	uint64_t undflw                       : 2;  /**< TX Underflow (MII mode only) */
4179	uint64_t reserved_1_1                 : 1;
4180	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4181#else
4182	uint64_t pko_nxa                      : 1;
4183	uint64_t reserved_1_1                 : 1;
4184	uint64_t undflw                       : 2;
4185	uint64_t reserved_4_7                 : 4;
4186	uint64_t xscol                        : 2;
4187	uint64_t reserved_10_11               : 2;
4188	uint64_t xsdef                        : 2;
4189	uint64_t reserved_14_15               : 2;
4190	uint64_t late_col                     : 2;
4191	uint64_t reserved_18_63               : 46;
4192#endif
4193	} cn52xx;
4194	struct cvmx_agl_gmx_tx_int_en_cn52xx  cn52xxp1;
4195	struct cvmx_agl_gmx_tx_int_en_cn56xx
4196	{
4197#if __BYTE_ORDER == __BIG_ENDIAN
4198	uint64_t reserved_17_63               : 47;
4199	uint64_t late_col                     : 1;  /**< TX Late Collision */
4200	uint64_t reserved_13_15               : 3;
4201	uint64_t xsdef                        : 1;  /**< TX Excessive deferral (MII/halfdup mode only) */
4202	uint64_t reserved_9_11                : 3;
4203	uint64_t xscol                        : 1;  /**< TX Excessive collisions (MII/halfdup mode only) */
4204	uint64_t reserved_3_7                 : 5;
4205	uint64_t undflw                       : 1;  /**< TX Underflow (MII mode only) */
4206	uint64_t reserved_1_1                 : 1;
4207	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4208#else
4209	uint64_t pko_nxa                      : 1;
4210	uint64_t reserved_1_1                 : 1;
4211	uint64_t undflw                       : 1;
4212	uint64_t reserved_3_7                 : 5;
4213	uint64_t xscol                        : 1;
4214	uint64_t reserved_9_11                : 3;
4215	uint64_t xsdef                        : 1;
4216	uint64_t reserved_13_15               : 3;
4217	uint64_t late_col                     : 1;
4218	uint64_t reserved_17_63               : 47;
4219#endif
4220	} cn56xx;
4221	struct cvmx_agl_gmx_tx_int_en_cn56xx  cn56xxp1;
4222	struct cvmx_agl_gmx_tx_int_en_s       cn63xx;
4223	struct cvmx_agl_gmx_tx_int_en_s       cn63xxp1;
4224};
4225typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t;
4226
4227/**
4228 * cvmx_agl_gmx_tx_int_reg
4229 *
4230 * AGL_GMX_TX_INT_REG = Interrupt Register
4231 *
4232 *
4233 * Notes:
4234 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4235 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4236 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4237 */
4238union cvmx_agl_gmx_tx_int_reg
4239{
4240	uint64_t u64;
4241	struct cvmx_agl_gmx_tx_int_reg_s
4242	{
4243#if __BYTE_ORDER == __BIG_ENDIAN
4244	uint64_t reserved_22_63               : 42;
4245	uint64_t ptp_lost                     : 2;  /**< A packet with a PTP request was not able to be
4246                                                         sent due to XSCOL */
4247	uint64_t reserved_18_19               : 2;
4248	uint64_t late_col                     : 2;  /**< TX Late Collision */
4249	uint64_t reserved_14_15               : 2;
4250	uint64_t xsdef                        : 2;  /**< TX Excessive deferral (halfdup mode only) */
4251	uint64_t reserved_10_11               : 2;
4252	uint64_t xscol                        : 2;  /**< TX Excessive collisions (halfdup mode only) */
4253	uint64_t reserved_4_7                 : 4;
4254	uint64_t undflw                       : 2;  /**< TX Underflow */
4255	uint64_t reserved_1_1                 : 1;
4256	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4257#else
4258	uint64_t pko_nxa                      : 1;
4259	uint64_t reserved_1_1                 : 1;
4260	uint64_t undflw                       : 2;
4261	uint64_t reserved_4_7                 : 4;
4262	uint64_t xscol                        : 2;
4263	uint64_t reserved_10_11               : 2;
4264	uint64_t xsdef                        : 2;
4265	uint64_t reserved_14_15               : 2;
4266	uint64_t late_col                     : 2;
4267	uint64_t reserved_18_19               : 2;
4268	uint64_t ptp_lost                     : 2;
4269	uint64_t reserved_22_63               : 42;
4270#endif
4271	} s;
4272	struct cvmx_agl_gmx_tx_int_reg_cn52xx
4273	{
4274#if __BYTE_ORDER == __BIG_ENDIAN
4275	uint64_t reserved_18_63               : 46;
4276	uint64_t late_col                     : 2;  /**< TX Late Collision */
4277	uint64_t reserved_14_15               : 2;
4278	uint64_t xsdef                        : 2;  /**< TX Excessive deferral (MII/halfdup mode only) */
4279	uint64_t reserved_10_11               : 2;
4280	uint64_t xscol                        : 2;  /**< TX Excessive collisions (MII/halfdup mode only) */
4281	uint64_t reserved_4_7                 : 4;
4282	uint64_t undflw                       : 2;  /**< TX Underflow (MII mode only) */
4283	uint64_t reserved_1_1                 : 1;
4284	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4285#else
4286	uint64_t pko_nxa                      : 1;
4287	uint64_t reserved_1_1                 : 1;
4288	uint64_t undflw                       : 2;
4289	uint64_t reserved_4_7                 : 4;
4290	uint64_t xscol                        : 2;
4291	uint64_t reserved_10_11               : 2;
4292	uint64_t xsdef                        : 2;
4293	uint64_t reserved_14_15               : 2;
4294	uint64_t late_col                     : 2;
4295	uint64_t reserved_18_63               : 46;
4296#endif
4297	} cn52xx;
4298	struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
4299	struct cvmx_agl_gmx_tx_int_reg_cn56xx
4300	{
4301#if __BYTE_ORDER == __BIG_ENDIAN
4302	uint64_t reserved_17_63               : 47;
4303	uint64_t late_col                     : 1;  /**< TX Late Collision */
4304	uint64_t reserved_13_15               : 3;
4305	uint64_t xsdef                        : 1;  /**< TX Excessive deferral (MII/halfdup mode only) */
4306	uint64_t reserved_9_11                : 3;
4307	uint64_t xscol                        : 1;  /**< TX Excessive collisions (MII/halfdup mode only) */
4308	uint64_t reserved_3_7                 : 5;
4309	uint64_t undflw                       : 1;  /**< TX Underflow (MII mode only) */
4310	uint64_t reserved_1_1                 : 1;
4311	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
4312#else
4313	uint64_t pko_nxa                      : 1;
4314	uint64_t reserved_1_1                 : 1;
4315	uint64_t undflw                       : 1;
4316	uint64_t reserved_3_7                 : 5;
4317	uint64_t xscol                        : 1;
4318	uint64_t reserved_9_11                : 3;
4319	uint64_t xsdef                        : 1;
4320	uint64_t reserved_13_15               : 3;
4321	uint64_t late_col                     : 1;
4322	uint64_t reserved_17_63               : 47;
4323#endif
4324	} cn56xx;
4325	struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
4326	struct cvmx_agl_gmx_tx_int_reg_s      cn63xx;
4327	struct cvmx_agl_gmx_tx_int_reg_s      cn63xxp1;
4328};
4329typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t;
4330
4331/**
4332 * cvmx_agl_gmx_tx_jam
4333 *
4334 * AGL_GMX_TX_JAM = Packet TX Jam Pattern
4335 *
4336 *
4337 * Notes:
4338 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4339 *
4340 */
4341union cvmx_agl_gmx_tx_jam
4342{
4343	uint64_t u64;
4344	struct cvmx_agl_gmx_tx_jam_s
4345	{
4346#if __BYTE_ORDER == __BIG_ENDIAN
4347	uint64_t reserved_8_63                : 56;
4348	uint64_t jam                          : 8;  /**< Jam pattern */
4349#else
4350	uint64_t jam                          : 8;
4351	uint64_t reserved_8_63                : 56;
4352#endif
4353	} s;
4354	struct cvmx_agl_gmx_tx_jam_s          cn52xx;
4355	struct cvmx_agl_gmx_tx_jam_s          cn52xxp1;
4356	struct cvmx_agl_gmx_tx_jam_s          cn56xx;
4357	struct cvmx_agl_gmx_tx_jam_s          cn56xxp1;
4358	struct cvmx_agl_gmx_tx_jam_s          cn63xx;
4359	struct cvmx_agl_gmx_tx_jam_s          cn63xxp1;
4360};
4361typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t;
4362
4363/**
4364 * cvmx_agl_gmx_tx_lfsr
4365 *
4366 * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
4367 *
4368 *
4369 * Notes:
4370 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4371 *
4372 */
4373union cvmx_agl_gmx_tx_lfsr
4374{
4375	uint64_t u64;
4376	struct cvmx_agl_gmx_tx_lfsr_s
4377	{
4378#if __BYTE_ORDER == __BIG_ENDIAN
4379	uint64_t reserved_16_63               : 48;
4380	uint64_t lfsr                         : 16; /**< The current state of the LFSR used to feed random
4381                                                         numbers to compute truncated binary exponential
4382                                                         backoff. */
4383#else
4384	uint64_t lfsr                         : 16;
4385	uint64_t reserved_16_63               : 48;
4386#endif
4387	} s;
4388	struct cvmx_agl_gmx_tx_lfsr_s         cn52xx;
4389	struct cvmx_agl_gmx_tx_lfsr_s         cn52xxp1;
4390	struct cvmx_agl_gmx_tx_lfsr_s         cn56xx;
4391	struct cvmx_agl_gmx_tx_lfsr_s         cn56xxp1;
4392	struct cvmx_agl_gmx_tx_lfsr_s         cn63xx;
4393	struct cvmx_agl_gmx_tx_lfsr_s         cn63xxp1;
4394};
4395typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t;
4396
4397/**
4398 * cvmx_agl_gmx_tx_ovr_bp
4399 *
4400 * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure
4401 *
4402 *
4403 * Notes:
4404 * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
4405 * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
4406 */
4407union cvmx_agl_gmx_tx_ovr_bp
4408{
4409	uint64_t u64;
4410	struct cvmx_agl_gmx_tx_ovr_bp_s
4411	{
4412#if __BYTE_ORDER == __BIG_ENDIAN
4413	uint64_t reserved_10_63               : 54;
4414	uint64_t en                           : 2;  /**< Per port Enable back pressure override */
4415	uint64_t reserved_6_7                 : 2;
4416	uint64_t bp                           : 2;  /**< Port BackPressure status to use
4417                                                         0=Port is available
4418                                                         1=Port should be back pressured */
4419	uint64_t reserved_2_3                 : 2;
4420	uint64_t ign_full                     : 2;  /**< Ignore the RX FIFO full when computing BP */
4421#else
4422	uint64_t ign_full                     : 2;
4423	uint64_t reserved_2_3                 : 2;
4424	uint64_t bp                           : 2;
4425	uint64_t reserved_6_7                 : 2;
4426	uint64_t en                           : 2;
4427	uint64_t reserved_10_63               : 54;
4428#endif
4429	} s;
4430	struct cvmx_agl_gmx_tx_ovr_bp_s       cn52xx;
4431	struct cvmx_agl_gmx_tx_ovr_bp_s       cn52xxp1;
4432	struct cvmx_agl_gmx_tx_ovr_bp_cn56xx
4433	{
4434#if __BYTE_ORDER == __BIG_ENDIAN
4435	uint64_t reserved_9_63                : 55;
4436	uint64_t en                           : 1;  /**< Per port Enable back pressure override */
4437	uint64_t reserved_5_7                 : 3;
4438	uint64_t bp                           : 1;  /**< Port BackPressure status to use
4439                                                         0=Port is available
4440                                                         1=Port should be back pressured */
4441	uint64_t reserved_1_3                 : 3;
4442	uint64_t ign_full                     : 1;  /**< Ignore the RX FIFO full when computing BP */
4443#else
4444	uint64_t ign_full                     : 1;
4445	uint64_t reserved_1_3                 : 3;
4446	uint64_t bp                           : 1;
4447	uint64_t reserved_5_7                 : 3;
4448	uint64_t en                           : 1;
4449	uint64_t reserved_9_63                : 55;
4450#endif
4451	} cn56xx;
4452	struct cvmx_agl_gmx_tx_ovr_bp_cn56xx  cn56xxp1;
4453	struct cvmx_agl_gmx_tx_ovr_bp_s       cn63xx;
4454	struct cvmx_agl_gmx_tx_ovr_bp_s       cn63xxp1;
4455};
4456typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t;
4457
4458/**
4459 * cvmx_agl_gmx_tx_pause_pkt_dmac
4460 *
4461 * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
4462 *
4463 *
4464 * Notes:
4465 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4466 *
4467 */
4468union cvmx_agl_gmx_tx_pause_pkt_dmac
4469{
4470	uint64_t u64;
4471	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s
4472	{
4473#if __BYTE_ORDER == __BIG_ENDIAN
4474	uint64_t reserved_48_63               : 16;
4475	uint64_t dmac                         : 48; /**< The DMAC field placed is outbnd pause pkts */
4476#else
4477	uint64_t dmac                         : 48;
4478	uint64_t reserved_48_63               : 16;
4479#endif
4480	} s;
4481	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
4482	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
4483	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
4484	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
4485	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
4486	struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
4487};
4488typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t;
4489
4490/**
4491 * cvmx_agl_gmx_tx_pause_pkt_type
4492 *
4493 * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field
4494 *
4495 *
4496 * Notes:
4497 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4498 *
4499 */
4500union cvmx_agl_gmx_tx_pause_pkt_type
4501{
4502	uint64_t u64;
4503	struct cvmx_agl_gmx_tx_pause_pkt_type_s
4504	{
4505#if __BYTE_ORDER == __BIG_ENDIAN
4506	uint64_t reserved_16_63               : 48;
4507	uint64_t type                         : 16; /**< The TYPE field placed is outbnd pause pkts */
4508#else
4509	uint64_t type                         : 16;
4510	uint64_t reserved_16_63               : 48;
4511#endif
4512	} s;
4513	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
4514	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
4515	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
4516	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
4517	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
4518	struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
4519};
4520typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t;
4521
4522/**
4523 * cvmx_agl_prt#_ctl
4524 *
4525 * AGL_PRT_CTL = AGL Port Control
4526 *
4527 *
4528 * Notes:
4529 * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1.
4530 * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1.
4531 */
4532union cvmx_agl_prtx_ctl
4533{
4534	uint64_t u64;
4535	struct cvmx_agl_prtx_ctl_s
4536	{
4537#if __BYTE_ORDER == __BIG_ENDIAN
4538	uint64_t drv_byp                      : 1;  /**< Bypass the compensation controller and use
4539                                                         DRV_NCTL and DRV_PCTL
4540                                                         Note: the reset value was changed from pass1
4541                                                         to pass2. */
4542	uint64_t reserved_62_62               : 1;
4543	uint64_t cmp_pctl                     : 6;  /**< PCTL drive strength from the compensation ctl */
4544	uint64_t reserved_54_55               : 2;
4545	uint64_t cmp_nctl                     : 6;  /**< NCTL drive strength from the compensation ctl */
4546	uint64_t reserved_46_47               : 2;
4547	uint64_t drv_pctl                     : 6;  /**< PCTL drive strength to use in bypass mode
4548                                                         Reset value of 19 is for 50 ohm termination */
4549	uint64_t reserved_38_39               : 2;
4550	uint64_t drv_nctl                     : 6;  /**< NCTL drive strength to use in bypass mode
4551                                                         Reset value of 15 is for 50 ohm termination */
4552	uint64_t reserved_29_31               : 3;
4553	uint64_t clk_set                      : 5;  /**< The clock delay as determined by the DLL */
4554	uint64_t clkrx_byp                    : 1;  /**< Bypass the RX clock delay setting
4555                                                         Skews RXC from RXD,RXCTL in RGMII mode
4556                                                         By default, HW internally shifts the RXC clock
4557                                                         to sample RXD,RXCTL assuming clock and data and
4558                                                         sourced synchronously from the link partner.
4559                                                         In MII mode, the CLKRX_BYP is forced to 1. */
4560	uint64_t reserved_21_22               : 2;
4561	uint64_t clkrx_set                    : 5;  /**< RX clock delay setting to use in bypass mode
4562                                                         Skews RXC from RXD in RGMII mode */
4563	uint64_t clktx_byp                    : 1;  /**< Bypass the TX clock delay setting
4564                                                         Skews TXC from TXD,TXCTL in RGMII mode
4565                                                         Skews RXC from RXD,RXCTL in RGMII mode
4566                                                         By default, clock and data and sourced
4567                                                         synchronously.
4568                                                         In MII mode, the CLKRX_BYP is forced to 1. */
4569	uint64_t reserved_13_14               : 2;
4570	uint64_t clktx_set                    : 5;  /**< TX clock delay setting to use in bypass mode
4571                                                         Skews TXC from TXD in RGMII mode */
4572	uint64_t reserved_5_7                 : 3;
4573	uint64_t dllrst                       : 1;  /**< DLL Reset */
4574	uint64_t comp                         : 1;  /**< Compensation Enable */
4575	uint64_t enable                       : 1;  /**< Port Enable
4576                                                         Note: the reset value was changed from pass1
4577                                                         to pass2. */
4578	uint64_t clkrst                       : 1;  /**< Clock Tree Reset */
4579	uint64_t mode                         : 1;  /**< Port Mode
4580                                                         MODE must be set the same for all ports in which
4581                                                         AGL_PRTx_CTL[ENABLE] is set.
4582                                                         0=RGMII
4583                                                         1=MII */
4584#else
4585	uint64_t mode                         : 1;
4586	uint64_t clkrst                       : 1;
4587	uint64_t enable                       : 1;
4588	uint64_t comp                         : 1;
4589	uint64_t dllrst                       : 1;
4590	uint64_t reserved_5_7                 : 3;
4591	uint64_t clktx_set                    : 5;
4592	uint64_t reserved_13_14               : 2;
4593	uint64_t clktx_byp                    : 1;
4594	uint64_t clkrx_set                    : 5;
4595	uint64_t reserved_21_22               : 2;
4596	uint64_t clkrx_byp                    : 1;
4597	uint64_t clk_set                      : 5;
4598	uint64_t reserved_29_31               : 3;
4599	uint64_t drv_nctl                     : 6;
4600	uint64_t reserved_38_39               : 2;
4601	uint64_t drv_pctl                     : 6;
4602	uint64_t reserved_46_47               : 2;
4603	uint64_t cmp_nctl                     : 6;
4604	uint64_t reserved_54_55               : 2;
4605	uint64_t cmp_pctl                     : 6;
4606	uint64_t reserved_62_62               : 1;
4607	uint64_t drv_byp                      : 1;
4608#endif
4609	} s;
4610	struct cvmx_agl_prtx_ctl_s            cn63xx;
4611	struct cvmx_agl_prtx_ctl_s            cn63xxp1;
4612};
4613typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t;
4614
4615#endif
4616