1/***********************license start***************
2 * Copyright (c) 2003-2012  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * cvmx-sso-defs.h
43 *
44 * Configuration and status register (CSR) type definitions for
45 * Octeon sso.
46 *
47 * This file is auto generated. Do not edit.
48 *
49 * <hr>$Revision$<hr>
50 *
51 */
52#ifndef __CVMX_SSO_DEFS_H__
53#define __CVMX_SSO_DEFS_H__
54
55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56#define CVMX_SSO_ACTIVE_CYCLES CVMX_SSO_ACTIVE_CYCLES_FUNC()
57static inline uint64_t CVMX_SSO_ACTIVE_CYCLES_FUNC(void)
58{
59	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
60		cvmx_warn("CVMX_SSO_ACTIVE_CYCLES not supported on this chip\n");
61	return CVMX_ADD_IO_SEG(0x00016700000010E8ull);
62}
63#else
64#define CVMX_SSO_ACTIVE_CYCLES (CVMX_ADD_IO_SEG(0x00016700000010E8ull))
65#endif
66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67#define CVMX_SSO_BIST_STAT CVMX_SSO_BIST_STAT_FUNC()
68static inline uint64_t CVMX_SSO_BIST_STAT_FUNC(void)
69{
70	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
71		cvmx_warn("CVMX_SSO_BIST_STAT not supported on this chip\n");
72	return CVMX_ADD_IO_SEG(0x0001670000001078ull);
73}
74#else
75#define CVMX_SSO_BIST_STAT (CVMX_ADD_IO_SEG(0x0001670000001078ull))
76#endif
77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78#define CVMX_SSO_CFG CVMX_SSO_CFG_FUNC()
79static inline uint64_t CVMX_SSO_CFG_FUNC(void)
80{
81	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
82		cvmx_warn("CVMX_SSO_CFG not supported on this chip\n");
83	return CVMX_ADD_IO_SEG(0x0001670000001088ull);
84}
85#else
86#define CVMX_SSO_CFG (CVMX_ADD_IO_SEG(0x0001670000001088ull))
87#endif
88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89#define CVMX_SSO_DS_PC CVMX_SSO_DS_PC_FUNC()
90static inline uint64_t CVMX_SSO_DS_PC_FUNC(void)
91{
92	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
93		cvmx_warn("CVMX_SSO_DS_PC not supported on this chip\n");
94	return CVMX_ADD_IO_SEG(0x0001670000001070ull);
95}
96#else
97#define CVMX_SSO_DS_PC (CVMX_ADD_IO_SEG(0x0001670000001070ull))
98#endif
99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100#define CVMX_SSO_ERR CVMX_SSO_ERR_FUNC()
101static inline uint64_t CVMX_SSO_ERR_FUNC(void)
102{
103	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
104		cvmx_warn("CVMX_SSO_ERR not supported on this chip\n");
105	return CVMX_ADD_IO_SEG(0x0001670000001038ull);
106}
107#else
108#define CVMX_SSO_ERR (CVMX_ADD_IO_SEG(0x0001670000001038ull))
109#endif
110#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
111#define CVMX_SSO_ERR_ENB CVMX_SSO_ERR_ENB_FUNC()
112static inline uint64_t CVMX_SSO_ERR_ENB_FUNC(void)
113{
114	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
115		cvmx_warn("CVMX_SSO_ERR_ENB not supported on this chip\n");
116	return CVMX_ADD_IO_SEG(0x0001670000001030ull);
117}
118#else
119#define CVMX_SSO_ERR_ENB (CVMX_ADD_IO_SEG(0x0001670000001030ull))
120#endif
121#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
122#define CVMX_SSO_FIDX_ECC_CTL CVMX_SSO_FIDX_ECC_CTL_FUNC()
123static inline uint64_t CVMX_SSO_FIDX_ECC_CTL_FUNC(void)
124{
125	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
126		cvmx_warn("CVMX_SSO_FIDX_ECC_CTL not supported on this chip\n");
127	return CVMX_ADD_IO_SEG(0x00016700000010D0ull);
128}
129#else
130#define CVMX_SSO_FIDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010D0ull))
131#endif
132#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
133#define CVMX_SSO_FIDX_ECC_ST CVMX_SSO_FIDX_ECC_ST_FUNC()
134static inline uint64_t CVMX_SSO_FIDX_ECC_ST_FUNC(void)
135{
136	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
137		cvmx_warn("CVMX_SSO_FIDX_ECC_ST not supported on this chip\n");
138	return CVMX_ADD_IO_SEG(0x00016700000010D8ull);
139}
140#else
141#define CVMX_SSO_FIDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010D8ull))
142#endif
143#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
144#define CVMX_SSO_FPAGE_CNT CVMX_SSO_FPAGE_CNT_FUNC()
145static inline uint64_t CVMX_SSO_FPAGE_CNT_FUNC(void)
146{
147	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
148		cvmx_warn("CVMX_SSO_FPAGE_CNT not supported on this chip\n");
149	return CVMX_ADD_IO_SEG(0x0001670000001090ull);
150}
151#else
152#define CVMX_SSO_FPAGE_CNT (CVMX_ADD_IO_SEG(0x0001670000001090ull))
153#endif
154#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
155#define CVMX_SSO_GWE_CFG CVMX_SSO_GWE_CFG_FUNC()
156static inline uint64_t CVMX_SSO_GWE_CFG_FUNC(void)
157{
158	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
159		cvmx_warn("CVMX_SSO_GWE_CFG not supported on this chip\n");
160	return CVMX_ADD_IO_SEG(0x0001670000001098ull);
161}
162#else
163#define CVMX_SSO_GWE_CFG (CVMX_ADD_IO_SEG(0x0001670000001098ull))
164#endif
165#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
166#define CVMX_SSO_IDX_ECC_CTL CVMX_SSO_IDX_ECC_CTL_FUNC()
167static inline uint64_t CVMX_SSO_IDX_ECC_CTL_FUNC(void)
168{
169	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
170		cvmx_warn("CVMX_SSO_IDX_ECC_CTL not supported on this chip\n");
171	return CVMX_ADD_IO_SEG(0x00016700000010C0ull);
172}
173#else
174#define CVMX_SSO_IDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010C0ull))
175#endif
176#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
177#define CVMX_SSO_IDX_ECC_ST CVMX_SSO_IDX_ECC_ST_FUNC()
178static inline uint64_t CVMX_SSO_IDX_ECC_ST_FUNC(void)
179{
180	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
181		cvmx_warn("CVMX_SSO_IDX_ECC_ST not supported on this chip\n");
182	return CVMX_ADD_IO_SEG(0x00016700000010C8ull);
183}
184#else
185#define CVMX_SSO_IDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010C8ull))
186#endif
187#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188static inline uint64_t CVMX_SSO_IQ_CNTX(unsigned long offset)
189{
190	if (!(
191	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
192		cvmx_warn("CVMX_SSO_IQ_CNTX(%lu) is invalid on this chip\n", offset);
193	return CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8;
194}
195#else
196#define CVMX_SSO_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8)
197#endif
198#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
199#define CVMX_SSO_IQ_COM_CNT CVMX_SSO_IQ_COM_CNT_FUNC()
200static inline uint64_t CVMX_SSO_IQ_COM_CNT_FUNC(void)
201{
202	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
203		cvmx_warn("CVMX_SSO_IQ_COM_CNT not supported on this chip\n");
204	return CVMX_ADD_IO_SEG(0x0001670000001058ull);
205}
206#else
207#define CVMX_SSO_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000001058ull))
208#endif
209#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210#define CVMX_SSO_IQ_INT CVMX_SSO_IQ_INT_FUNC()
211static inline uint64_t CVMX_SSO_IQ_INT_FUNC(void)
212{
213	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
214		cvmx_warn("CVMX_SSO_IQ_INT not supported on this chip\n");
215	return CVMX_ADD_IO_SEG(0x0001670000001048ull);
216}
217#else
218#define CVMX_SSO_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000001048ull))
219#endif
220#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221#define CVMX_SSO_IQ_INT_EN CVMX_SSO_IQ_INT_EN_FUNC()
222static inline uint64_t CVMX_SSO_IQ_INT_EN_FUNC(void)
223{
224	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
225		cvmx_warn("CVMX_SSO_IQ_INT_EN not supported on this chip\n");
226	return CVMX_ADD_IO_SEG(0x0001670000001050ull);
227}
228#else
229#define CVMX_SSO_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000001050ull))
230#endif
231#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
232static inline uint64_t CVMX_SSO_IQ_THRX(unsigned long offset)
233{
234	if (!(
235	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
236		cvmx_warn("CVMX_SSO_IQ_THRX(%lu) is invalid on this chip\n", offset);
237	return CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8;
238}
239#else
240#define CVMX_SSO_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8)
241#endif
242#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243#define CVMX_SSO_NOS_CNT CVMX_SSO_NOS_CNT_FUNC()
244static inline uint64_t CVMX_SSO_NOS_CNT_FUNC(void)
245{
246	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
247		cvmx_warn("CVMX_SSO_NOS_CNT not supported on this chip\n");
248	return CVMX_ADD_IO_SEG(0x0001670000001040ull);
249}
250#else
251#define CVMX_SSO_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000001040ull))
252#endif
253#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
254#define CVMX_SSO_NW_TIM CVMX_SSO_NW_TIM_FUNC()
255static inline uint64_t CVMX_SSO_NW_TIM_FUNC(void)
256{
257	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
258		cvmx_warn("CVMX_SSO_NW_TIM not supported on this chip\n");
259	return CVMX_ADD_IO_SEG(0x0001670000001028ull);
260}
261#else
262#define CVMX_SSO_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000001028ull))
263#endif
264#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
265#define CVMX_SSO_OTH_ECC_CTL CVMX_SSO_OTH_ECC_CTL_FUNC()
266static inline uint64_t CVMX_SSO_OTH_ECC_CTL_FUNC(void)
267{
268	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
269		cvmx_warn("CVMX_SSO_OTH_ECC_CTL not supported on this chip\n");
270	return CVMX_ADD_IO_SEG(0x00016700000010B0ull);
271}
272#else
273#define CVMX_SSO_OTH_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010B0ull))
274#endif
275#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276#define CVMX_SSO_OTH_ECC_ST CVMX_SSO_OTH_ECC_ST_FUNC()
277static inline uint64_t CVMX_SSO_OTH_ECC_ST_FUNC(void)
278{
279	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
280		cvmx_warn("CVMX_SSO_OTH_ECC_ST not supported on this chip\n");
281	return CVMX_ADD_IO_SEG(0x00016700000010B8ull);
282}
283#else
284#define CVMX_SSO_OTH_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010B8ull))
285#endif
286#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
287#define CVMX_SSO_PND_ECC_CTL CVMX_SSO_PND_ECC_CTL_FUNC()
288static inline uint64_t CVMX_SSO_PND_ECC_CTL_FUNC(void)
289{
290	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
291		cvmx_warn("CVMX_SSO_PND_ECC_CTL not supported on this chip\n");
292	return CVMX_ADD_IO_SEG(0x00016700000010A0ull);
293}
294#else
295#define CVMX_SSO_PND_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010A0ull))
296#endif
297#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298#define CVMX_SSO_PND_ECC_ST CVMX_SSO_PND_ECC_ST_FUNC()
299static inline uint64_t CVMX_SSO_PND_ECC_ST_FUNC(void)
300{
301	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
302		cvmx_warn("CVMX_SSO_PND_ECC_ST not supported on this chip\n");
303	return CVMX_ADD_IO_SEG(0x00016700000010A8ull);
304}
305#else
306#define CVMX_SSO_PND_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010A8ull))
307#endif
308#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
309static inline uint64_t CVMX_SSO_PPX_GRP_MSK(unsigned long offset)
310{
311	if (!(
312	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
313		cvmx_warn("CVMX_SSO_PPX_GRP_MSK(%lu) is invalid on this chip\n", offset);
314	return CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8;
315}
316#else
317#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
318#endif
319#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
320static inline uint64_t CVMX_SSO_PPX_QOS_PRI(unsigned long offset)
321{
322	if (!(
323	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
324		cvmx_warn("CVMX_SSO_PPX_QOS_PRI(%lu) is invalid on this chip\n", offset);
325	return CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8;
326}
327#else
328#define CVMX_SSO_PPX_QOS_PRI(offset) (CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8)
329#endif
330#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
331#define CVMX_SSO_PP_STRICT CVMX_SSO_PP_STRICT_FUNC()
332static inline uint64_t CVMX_SSO_PP_STRICT_FUNC(void)
333{
334	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
335		cvmx_warn("CVMX_SSO_PP_STRICT not supported on this chip\n");
336	return CVMX_ADD_IO_SEG(0x00016700000010E0ull);
337}
338#else
339#define CVMX_SSO_PP_STRICT (CVMX_ADD_IO_SEG(0x00016700000010E0ull))
340#endif
341#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
342static inline uint64_t CVMX_SSO_QOSX_RND(unsigned long offset)
343{
344	if (!(
345	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
346		cvmx_warn("CVMX_SSO_QOSX_RND(%lu) is invalid on this chip\n", offset);
347	return CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8;
348}
349#else
350#define CVMX_SSO_QOSX_RND(offset) (CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8)
351#endif
352#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
353static inline uint64_t CVMX_SSO_QOS_THRX(unsigned long offset)
354{
355	if (!(
356	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
357		cvmx_warn("CVMX_SSO_QOS_THRX(%lu) is invalid on this chip\n", offset);
358	return CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8;
359}
360#else
361#define CVMX_SSO_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8)
362#endif
363#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
364#define CVMX_SSO_QOS_WE CVMX_SSO_QOS_WE_FUNC()
365static inline uint64_t CVMX_SSO_QOS_WE_FUNC(void)
366{
367	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
368		cvmx_warn("CVMX_SSO_QOS_WE not supported on this chip\n");
369	return CVMX_ADD_IO_SEG(0x0001670000001080ull);
370}
371#else
372#define CVMX_SSO_QOS_WE (CVMX_ADD_IO_SEG(0x0001670000001080ull))
373#endif
374#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
375#define CVMX_SSO_RESET CVMX_SSO_RESET_FUNC()
376static inline uint64_t CVMX_SSO_RESET_FUNC(void)
377{
378	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
379		cvmx_warn("CVMX_SSO_RESET not supported on this chip\n");
380	return CVMX_ADD_IO_SEG(0x00016700000010F0ull);
381}
382#else
383#define CVMX_SSO_RESET (CVMX_ADD_IO_SEG(0x00016700000010F0ull))
384#endif
385#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
386static inline uint64_t CVMX_SSO_RWQ_HEAD_PTRX(unsigned long offset)
387{
388	if (!(
389	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
390		cvmx_warn("CVMX_SSO_RWQ_HEAD_PTRX(%lu) is invalid on this chip\n", offset);
391	return CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8;
392}
393#else
394#define CVMX_SSO_RWQ_HEAD_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8)
395#endif
396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397#define CVMX_SSO_RWQ_POP_FPTR CVMX_SSO_RWQ_POP_FPTR_FUNC()
398static inline uint64_t CVMX_SSO_RWQ_POP_FPTR_FUNC(void)
399{
400	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
401		cvmx_warn("CVMX_SSO_RWQ_POP_FPTR not supported on this chip\n");
402	return CVMX_ADD_IO_SEG(0x000167000000C408ull);
403}
404#else
405#define CVMX_SSO_RWQ_POP_FPTR (CVMX_ADD_IO_SEG(0x000167000000C408ull))
406#endif
407#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
408#define CVMX_SSO_RWQ_PSH_FPTR CVMX_SSO_RWQ_PSH_FPTR_FUNC()
409static inline uint64_t CVMX_SSO_RWQ_PSH_FPTR_FUNC(void)
410{
411	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
412		cvmx_warn("CVMX_SSO_RWQ_PSH_FPTR not supported on this chip\n");
413	return CVMX_ADD_IO_SEG(0x000167000000C400ull);
414}
415#else
416#define CVMX_SSO_RWQ_PSH_FPTR (CVMX_ADD_IO_SEG(0x000167000000C400ull))
417#endif
418#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
419static inline uint64_t CVMX_SSO_RWQ_TAIL_PTRX(unsigned long offset)
420{
421	if (!(
422	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
423		cvmx_warn("CVMX_SSO_RWQ_TAIL_PTRX(%lu) is invalid on this chip\n", offset);
424	return CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8;
425}
426#else
427#define CVMX_SSO_RWQ_TAIL_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8)
428#endif
429#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
430#define CVMX_SSO_TS_PC CVMX_SSO_TS_PC_FUNC()
431static inline uint64_t CVMX_SSO_TS_PC_FUNC(void)
432{
433	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
434		cvmx_warn("CVMX_SSO_TS_PC not supported on this chip\n");
435	return CVMX_ADD_IO_SEG(0x0001670000001068ull);
436}
437#else
438#define CVMX_SSO_TS_PC (CVMX_ADD_IO_SEG(0x0001670000001068ull))
439#endif
440#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
441#define CVMX_SSO_WA_COM_PC CVMX_SSO_WA_COM_PC_FUNC()
442static inline uint64_t CVMX_SSO_WA_COM_PC_FUNC(void)
443{
444	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
445		cvmx_warn("CVMX_SSO_WA_COM_PC not supported on this chip\n");
446	return CVMX_ADD_IO_SEG(0x0001670000001060ull);
447}
448#else
449#define CVMX_SSO_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000001060ull))
450#endif
451#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
452static inline uint64_t CVMX_SSO_WA_PCX(unsigned long offset)
453{
454	if (!(
455	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
456		cvmx_warn("CVMX_SSO_WA_PCX(%lu) is invalid on this chip\n", offset);
457	return CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8;
458}
459#else
460#define CVMX_SSO_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8)
461#endif
462#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
463#define CVMX_SSO_WQ_INT CVMX_SSO_WQ_INT_FUNC()
464static inline uint64_t CVMX_SSO_WQ_INT_FUNC(void)
465{
466	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
467		cvmx_warn("CVMX_SSO_WQ_INT not supported on this chip\n");
468	return CVMX_ADD_IO_SEG(0x0001670000001000ull);
469}
470#else
471#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
472#endif
473#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
474static inline uint64_t CVMX_SSO_WQ_INT_CNTX(unsigned long offset)
475{
476	if (!(
477	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
478		cvmx_warn("CVMX_SSO_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
479	return CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8;
480}
481#else
482#define CVMX_SSO_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8)
483#endif
484#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
485#define CVMX_SSO_WQ_INT_PC CVMX_SSO_WQ_INT_PC_FUNC()
486static inline uint64_t CVMX_SSO_WQ_INT_PC_FUNC(void)
487{
488	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
489		cvmx_warn("CVMX_SSO_WQ_INT_PC not supported on this chip\n");
490	return CVMX_ADD_IO_SEG(0x0001670000001020ull);
491}
492#else
493#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
494#endif
495#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
496static inline uint64_t CVMX_SSO_WQ_INT_THRX(unsigned long offset)
497{
498	if (!(
499	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
500		cvmx_warn("CVMX_SSO_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
501	return CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8;
502}
503#else
504#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
505#endif
506#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
507#define CVMX_SSO_WQ_IQ_DIS CVMX_SSO_WQ_IQ_DIS_FUNC()
508static inline uint64_t CVMX_SSO_WQ_IQ_DIS_FUNC(void)
509{
510	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
511		cvmx_warn("CVMX_SSO_WQ_IQ_DIS not supported on this chip\n");
512	return CVMX_ADD_IO_SEG(0x0001670000001010ull);
513}
514#else
515#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
516#endif
517#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
518static inline uint64_t CVMX_SSO_WS_PCX(unsigned long offset)
519{
520	if (!(
521	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
522		cvmx_warn("CVMX_SSO_WS_PCX(%lu) is invalid on this chip\n", offset);
523	return CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8;
524}
525#else
526#define CVMX_SSO_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8)
527#endif
528
529/**
530 * cvmx_sso_active_cycles
531 *
532 * SSO_ACTIVE_CYCLES = SSO cycles SSO active
533 *
534 * This register counts every sclk cycle that the SSO clocks are active.
535 * **NOTE: Added in pass 2.0
536 */
537union cvmx_sso_active_cycles {
538	uint64_t u64;
539	struct cvmx_sso_active_cycles_s {
540#ifdef __BIG_ENDIAN_BITFIELD
541	uint64_t act_cyc                      : 64; /**< Counts number of active cycles. */
542#else
543	uint64_t act_cyc                      : 64;
544#endif
545	} s;
546	struct cvmx_sso_active_cycles_s       cn68xx;
547};
548typedef union cvmx_sso_active_cycles cvmx_sso_active_cycles_t;
549
550/**
551 * cvmx_sso_bist_stat
552 *
553 * SSO_BIST_STAT = SSO BIST Status Register
554 *
555 * Contains the BIST status for the SSO memories ('0' = pass, '1' = fail).
556 * Note that PP BIST status is not reported here as it was in previous designs.
557 *
558 *   There may be more for DDR interface buffers.
559 *   It's possible that a RAM will be used for SSO_PP_QOS_RND.
560 */
561union cvmx_sso_bist_stat {
562	uint64_t u64;
563	struct cvmx_sso_bist_stat_s {
564#ifdef __BIG_ENDIAN_BITFIELD
565	uint64_t reserved_62_63               : 2;
566	uint64_t odu_pref                     : 2;  /**< ODU Prefetch memory BIST status */
567	uint64_t reserved_54_59               : 6;
568	uint64_t fptr                         : 2;  /**< FPTR memory BIST status */
569	uint64_t reserved_45_51               : 7;
570	uint64_t rwo_dat                      : 1;  /**< RWO_DAT memory BIST status */
571	uint64_t rwo                          : 2;  /**< RWO memory BIST status */
572	uint64_t reserved_35_41               : 7;
573	uint64_t rwi_dat                      : 1;  /**< RWI_DAT memory BIST status */
574	uint64_t reserved_32_33               : 2;
575	uint64_t soc                          : 1;  /**< SSO CAM BIST status */
576	uint64_t reserved_28_30               : 3;
577	uint64_t ncbo                         : 4;  /**< NCBO transmitter memory BIST status */
578	uint64_t reserved_21_23               : 3;
579	uint64_t index                        : 1;  /**< Index memory BIST status */
580	uint64_t reserved_17_19               : 3;
581	uint64_t fidx                         : 1;  /**< Forward index memory BIST status */
582	uint64_t reserved_10_15               : 6;
583	uint64_t pend                         : 2;  /**< Pending switch memory BIST status */
584	uint64_t reserved_2_7                 : 6;
585	uint64_t oth                          : 2;  /**< WQP, GRP memory BIST status */
586#else
587	uint64_t oth                          : 2;
588	uint64_t reserved_2_7                 : 6;
589	uint64_t pend                         : 2;
590	uint64_t reserved_10_15               : 6;
591	uint64_t fidx                         : 1;
592	uint64_t reserved_17_19               : 3;
593	uint64_t index                        : 1;
594	uint64_t reserved_21_23               : 3;
595	uint64_t ncbo                         : 4;
596	uint64_t reserved_28_30               : 3;
597	uint64_t soc                          : 1;
598	uint64_t reserved_32_33               : 2;
599	uint64_t rwi_dat                      : 1;
600	uint64_t reserved_35_41               : 7;
601	uint64_t rwo                          : 2;
602	uint64_t rwo_dat                      : 1;
603	uint64_t reserved_45_51               : 7;
604	uint64_t fptr                         : 2;
605	uint64_t reserved_54_59               : 6;
606	uint64_t odu_pref                     : 2;
607	uint64_t reserved_62_63               : 2;
608#endif
609	} s;
610	struct cvmx_sso_bist_stat_s           cn68xx;
611	struct cvmx_sso_bist_stat_cn68xxp1 {
612#ifdef __BIG_ENDIAN_BITFIELD
613	uint64_t reserved_54_63               : 10;
614	uint64_t fptr                         : 2;  /**< FPTR memory BIST status */
615	uint64_t reserved_45_51               : 7;
616	uint64_t rwo_dat                      : 1;  /**< RWO_DAT memory BIST status */
617	uint64_t rwo                          : 2;  /**< RWO memory BIST status */
618	uint64_t reserved_35_41               : 7;
619	uint64_t rwi_dat                      : 1;  /**< RWI_DAT memory BIST status */
620	uint64_t reserved_32_33               : 2;
621	uint64_t soc                          : 1;  /**< SSO CAM BIST status */
622	uint64_t reserved_28_30               : 3;
623	uint64_t ncbo                         : 4;  /**< NCBO transmitter memory BIST status */
624	uint64_t reserved_21_23               : 3;
625	uint64_t index                        : 1;  /**< Index memory BIST status */
626	uint64_t reserved_17_19               : 3;
627	uint64_t fidx                         : 1;  /**< Forward index memory BIST status */
628	uint64_t reserved_10_15               : 6;
629	uint64_t pend                         : 2;  /**< Pending switch memory BIST status */
630	uint64_t reserved_2_7                 : 6;
631	uint64_t oth                          : 2;  /**< WQP, GRP memory BIST status */
632#else
633	uint64_t oth                          : 2;
634	uint64_t reserved_2_7                 : 6;
635	uint64_t pend                         : 2;
636	uint64_t reserved_10_15               : 6;
637	uint64_t fidx                         : 1;
638	uint64_t reserved_17_19               : 3;
639	uint64_t index                        : 1;
640	uint64_t reserved_21_23               : 3;
641	uint64_t ncbo                         : 4;
642	uint64_t reserved_28_30               : 3;
643	uint64_t soc                          : 1;
644	uint64_t reserved_32_33               : 2;
645	uint64_t rwi_dat                      : 1;
646	uint64_t reserved_35_41               : 7;
647	uint64_t rwo                          : 2;
648	uint64_t rwo_dat                      : 1;
649	uint64_t reserved_45_51               : 7;
650	uint64_t fptr                         : 2;
651	uint64_t reserved_54_63               : 10;
652#endif
653	} cn68xxp1;
654};
655typedef union cvmx_sso_bist_stat cvmx_sso_bist_stat_t;
656
657/**
658 * cvmx_sso_cfg
659 *
660 * SSO_CFG = SSO Config
661 *
662 * This register is an assortment of various SSO configuration bits.
663 */
664union cvmx_sso_cfg {
665	uint64_t u64;
666	struct cvmx_sso_cfg_s {
667#ifdef __BIG_ENDIAN_BITFIELD
668	uint64_t reserved_16_63               : 48;
669	uint64_t qck_gw_rsp_adj               : 3;  /**< Fast GET_WORK response fine adjustment
670                                                         Allowed values are 0, 1, and 2 (0 is quickest) */
671	uint64_t qck_gw_rsp_dis               : 1;  /**< Disable faster response to GET_WORK */
672	uint64_t qck_sw_dis                   : 1;  /**< Disable faster switch to UNSCHEDULED on GET_WORK */
673	uint64_t rwq_alloc_dis                : 1;  /**< Disable FPA Alloc Requests when SSO_FPAGE_CNT < 16 */
674	uint64_t soc_ccam_dis                 : 1;  /**< Disable power saving SOC conditional CAM
675                                                         (**NOTE: Added in pass 2.0) */
676	uint64_t sso_cclk_dis                 : 1;  /**< Disable power saving SSO conditional clocking
677                                                         (**NOTE: Added in pass 2.0) */
678	uint64_t rwo_flush                    : 1;  /**< Flush RWO engine
679                                                         Allows outbound NCB entries to go immediately rather
680                                                         than waiting for a complete fill packet. This register
681                                                         is one-shot and clears itself each time it is set. */
682	uint64_t wfe_thr                      : 1;  /**< Use 1 Work-fetch engine (instead of 4) */
683	uint64_t rwio_byp_dis                 : 1;  /**< Disable Bypass path in RWI/RWO Engines */
684	uint64_t rwq_byp_dis                  : 1;  /**< Disable Bypass path in RWQ Engine */
685	uint64_t stt                          : 1;  /**< STT Setting for RW Stores */
686	uint64_t ldt                          : 1;  /**< LDT Setting for RW Loads */
687	uint64_t dwb                          : 1;  /**< DWB Setting for Return Page Requests
688                                                         1 = 2 128B cache pages to issue DWB for
689                                                         0 = 0 128B cache pages ro issue DWB for */
690	uint64_t rwen                         : 1;  /**< Enable RWI/RWO operations
691                                                         This bit should be set after SSO_RWQ_HEAD_PTRX and
692                                                         SSO_RWQ_TAIL_PTRX have been programmed. */
693#else
694	uint64_t rwen                         : 1;
695	uint64_t dwb                          : 1;
696	uint64_t ldt                          : 1;
697	uint64_t stt                          : 1;
698	uint64_t rwq_byp_dis                  : 1;
699	uint64_t rwio_byp_dis                 : 1;
700	uint64_t wfe_thr                      : 1;
701	uint64_t rwo_flush                    : 1;
702	uint64_t sso_cclk_dis                 : 1;
703	uint64_t soc_ccam_dis                 : 1;
704	uint64_t rwq_alloc_dis                : 1;
705	uint64_t qck_sw_dis                   : 1;
706	uint64_t qck_gw_rsp_dis               : 1;
707	uint64_t qck_gw_rsp_adj               : 3;
708	uint64_t reserved_16_63               : 48;
709#endif
710	} s;
711	struct cvmx_sso_cfg_s                 cn68xx;
712	struct cvmx_sso_cfg_cn68xxp1 {
713#ifdef __BIG_ENDIAN_BITFIELD
714	uint64_t reserved_8_63                : 56;
715	uint64_t rwo_flush                    : 1;  /**< Flush RWO engine
716                                                         Allows outbound NCB entries to go immediately rather
717                                                         than waiting for a complete fill packet. This register
718                                                         is one-shot and clears itself each time it is set. */
719	uint64_t wfe_thr                      : 1;  /**< Use 1 Work-fetch engine (instead of 4) */
720	uint64_t rwio_byp_dis                 : 1;  /**< Disable Bypass path in RWI/RWO Engines */
721	uint64_t rwq_byp_dis                  : 1;  /**< Disable Bypass path in RWQ Engine */
722	uint64_t stt                          : 1;  /**< STT Setting for RW Stores */
723	uint64_t ldt                          : 1;  /**< LDT Setting for RW Loads */
724	uint64_t dwb                          : 1;  /**< DWB Setting for Return Page Requests
725                                                         1 = 2 128B cache pages to issue DWB for
726                                                         0 = 0 128B cache pages ro issue DWB for */
727	uint64_t rwen                         : 1;  /**< Enable RWI/RWO operations
728                                                         This bit should be set after SSO_RWQ_HEAD_PTRX and
729                                                         SSO_RWQ_TAIL_PTRX have been programmed. */
730#else
731	uint64_t rwen                         : 1;
732	uint64_t dwb                          : 1;
733	uint64_t ldt                          : 1;
734	uint64_t stt                          : 1;
735	uint64_t rwq_byp_dis                  : 1;
736	uint64_t rwio_byp_dis                 : 1;
737	uint64_t wfe_thr                      : 1;
738	uint64_t rwo_flush                    : 1;
739	uint64_t reserved_8_63                : 56;
740#endif
741	} cn68xxp1;
742};
743typedef union cvmx_sso_cfg cvmx_sso_cfg_t;
744
745/**
746 * cvmx_sso_ds_pc
747 *
748 * SSO_DS_PC = SSO De-Schedule Performance Counter
749 *
750 * Counts the number of de-schedule requests.
751 * Counter rolls over through zero when max value exceeded.
752 */
753union cvmx_sso_ds_pc {
754	uint64_t u64;
755	struct cvmx_sso_ds_pc_s {
756#ifdef __BIG_ENDIAN_BITFIELD
757	uint64_t ds_pc                        : 64; /**< De-schedule performance counter */
758#else
759	uint64_t ds_pc                        : 64;
760#endif
761	} s;
762	struct cvmx_sso_ds_pc_s               cn68xx;
763	struct cvmx_sso_ds_pc_s               cn68xxp1;
764};
765typedef union cvmx_sso_ds_pc cvmx_sso_ds_pc_t;
766
767/**
768 * cvmx_sso_err
769 *
770 * SSO_ERR = SSO Error Register
771 *
772 * Contains ECC and other misc error bits.
773 *
774 * <45> The free page error bit will assert when SSO_FPAGE_CNT <= 16 and
775 *      SSO_CFG[RWEN] is 1.  Software will want to disable the interrupt
776 *      associated with this error when recovering SSO pointers from the
777 *      FPA and SSO.
778 *
779 * This register also contains the illegal operation error bits:
780 *
781 * <42> Received ADDWQ with tag specified as EMPTY
782 * <41> Received illegal opcode
783 * <40> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
784 *      from WS with CLR_NSCHED pending
785 * <39> Received CLR_NSCHED
786 *      from WS with SWTAG_DESCH/DESCH/CLR_NSCHED pending
787 * <38> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
788 *      from WS with ALLOC_WE pending
789 * <37> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE/CLR_NSCHED
790 *      from WS with GET_WORK pending
791 * <36> Received SWTAG_FULL/SWTAG_DESCH
792 *      with tag specified as UNSCHEDULED
793 * <35> Received SWTAG/SWTAG_FULL/SWTAG_DESCH
794 *      with tag specified as EMPTY
795 * <34> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK
796 *      from WS with pending tag switch to ORDERED or ATOMIC
797 * <33> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP
798 *      from WS in UNSCHEDULED state
799 * <32> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP
800 *      from WS in EMPTY state
801 */
802union cvmx_sso_err {
803	uint64_t u64;
804	struct cvmx_sso_err_s {
805#ifdef __BIG_ENDIAN_BITFIELD
806	uint64_t reserved_48_63               : 16;
807	uint64_t bfp                          : 1;  /**< Bad Fill Packet error
808                                                         Last byte of the fill packet did not match 8'h1a */
809	uint64_t awe                          : 1;  /**< Out-of-memory error (ADDWQ Request is dropped) */
810	uint64_t fpe                          : 1;  /**< Free page error */
811	uint64_t reserved_43_44               : 2;
812	uint64_t iop                          : 11; /**< Illegal operation errors */
813	uint64_t reserved_12_31               : 20;
814	uint64_t pnd_dbe0                     : 1;  /**< Double bit error for even PND RAM */
815	uint64_t pnd_sbe0                     : 1;  /**< Single bit error for even PND RAM */
816	uint64_t pnd_dbe1                     : 1;  /**< Double bit error for odd PND RAM */
817	uint64_t pnd_sbe1                     : 1;  /**< Single bit error for odd PND RAM */
818	uint64_t oth_dbe0                     : 1;  /**< Double bit error for even OTH RAM */
819	uint64_t oth_sbe0                     : 1;  /**< Single bit error for even OTH RAM */
820	uint64_t oth_dbe1                     : 1;  /**< Double bit error for odd OTH RAM */
821	uint64_t oth_sbe1                     : 1;  /**< Single bit error for odd OTH RAM */
822	uint64_t idx_dbe                      : 1;  /**< Double bit error for IDX RAM */
823	uint64_t idx_sbe                      : 1;  /**< Single bit error for IDX RAM */
824	uint64_t fidx_dbe                     : 1;  /**< Double bit error for FIDX RAM */
825	uint64_t fidx_sbe                     : 1;  /**< Single bit error for FIDX RAM */
826#else
827	uint64_t fidx_sbe                     : 1;
828	uint64_t fidx_dbe                     : 1;
829	uint64_t idx_sbe                      : 1;
830	uint64_t idx_dbe                      : 1;
831	uint64_t oth_sbe1                     : 1;
832	uint64_t oth_dbe1                     : 1;
833	uint64_t oth_sbe0                     : 1;
834	uint64_t oth_dbe0                     : 1;
835	uint64_t pnd_sbe1                     : 1;
836	uint64_t pnd_dbe1                     : 1;
837	uint64_t pnd_sbe0                     : 1;
838	uint64_t pnd_dbe0                     : 1;
839	uint64_t reserved_12_31               : 20;
840	uint64_t iop                          : 11;
841	uint64_t reserved_43_44               : 2;
842	uint64_t fpe                          : 1;
843	uint64_t awe                          : 1;
844	uint64_t bfp                          : 1;
845	uint64_t reserved_48_63               : 16;
846#endif
847	} s;
848	struct cvmx_sso_err_s                 cn68xx;
849	struct cvmx_sso_err_s                 cn68xxp1;
850};
851typedef union cvmx_sso_err cvmx_sso_err_t;
852
853/**
854 * cvmx_sso_err_enb
855 *
856 * SSO_ERR_ENB = SSO Error Enable Register
857 *
858 * Contains the interrupt enables corresponding to SSO_ERR.
859 */
860union cvmx_sso_err_enb {
861	uint64_t u64;
862	struct cvmx_sso_err_enb_s {
863#ifdef __BIG_ENDIAN_BITFIELD
864	uint64_t reserved_48_63               : 16;
865	uint64_t bfp_ie                       : 1;  /**< Bad Fill Packet error interrupt enable */
866	uint64_t awe_ie                       : 1;  /**< Add-work error interrupt enable */
867	uint64_t fpe_ie                       : 1;  /**< Free Page error interrupt enable */
868	uint64_t reserved_43_44               : 2;
869	uint64_t iop_ie                       : 11; /**< Illegal operation interrupt enables */
870	uint64_t reserved_12_31               : 20;
871	uint64_t pnd_dbe0_ie                  : 1;  /**< Double bit error interrupt enable for even PND RAM */
872	uint64_t pnd_sbe0_ie                  : 1;  /**< Single bit error interrupt enable for even PND RAM */
873	uint64_t pnd_dbe1_ie                  : 1;  /**< Double bit error interrupt enable for odd PND RAM */
874	uint64_t pnd_sbe1_ie                  : 1;  /**< Single bit error interrupt enable for odd PND RAM */
875	uint64_t oth_dbe0_ie                  : 1;  /**< Double bit error interrupt enable for even OTH RAM */
876	uint64_t oth_sbe0_ie                  : 1;  /**< Single bit error interrupt enable for even OTH RAM */
877	uint64_t oth_dbe1_ie                  : 1;  /**< Double bit error interrupt enable for odd OTH RAM */
878	uint64_t oth_sbe1_ie                  : 1;  /**< Single bit error interrupt enable for odd OTH RAM */
879	uint64_t idx_dbe_ie                   : 1;  /**< Double bit error interrupt enable for IDX RAM */
880	uint64_t idx_sbe_ie                   : 1;  /**< Single bit error interrupt enable for IDX RAM */
881	uint64_t fidx_dbe_ie                  : 1;  /**< Double bit error interrupt enable for FIDX RAM */
882	uint64_t fidx_sbe_ie                  : 1;  /**< Single bit error interrupt enable for FIDX RAM */
883#else
884	uint64_t fidx_sbe_ie                  : 1;
885	uint64_t fidx_dbe_ie                  : 1;
886	uint64_t idx_sbe_ie                   : 1;
887	uint64_t idx_dbe_ie                   : 1;
888	uint64_t oth_sbe1_ie                  : 1;
889	uint64_t oth_dbe1_ie                  : 1;
890	uint64_t oth_sbe0_ie                  : 1;
891	uint64_t oth_dbe0_ie                  : 1;
892	uint64_t pnd_sbe1_ie                  : 1;
893	uint64_t pnd_dbe1_ie                  : 1;
894	uint64_t pnd_sbe0_ie                  : 1;
895	uint64_t pnd_dbe0_ie                  : 1;
896	uint64_t reserved_12_31               : 20;
897	uint64_t iop_ie                       : 11;
898	uint64_t reserved_43_44               : 2;
899	uint64_t fpe_ie                       : 1;
900	uint64_t awe_ie                       : 1;
901	uint64_t bfp_ie                       : 1;
902	uint64_t reserved_48_63               : 16;
903#endif
904	} s;
905	struct cvmx_sso_err_enb_s             cn68xx;
906	struct cvmx_sso_err_enb_s             cn68xxp1;
907};
908typedef union cvmx_sso_err_enb cvmx_sso_err_enb_t;
909
910/**
911 * cvmx_sso_fidx_ecc_ctl
912 *
913 * SSO_FIDX_ECC_CTL = SSO FIDX ECC Control
914 *
915 */
916union cvmx_sso_fidx_ecc_ctl {
917	uint64_t u64;
918	struct cvmx_sso_fidx_ecc_ctl_s {
919#ifdef __BIG_ENDIAN_BITFIELD
920	uint64_t reserved_3_63                : 61;
921	uint64_t flip_synd                    : 2;  /**< Testing feature. Flip Syndrom to generate single or
922                                                         double bit error for the FIDX RAM. */
923	uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 5 bit ECC
924                                                         correct logic for the FIDX RAM. */
925#else
926	uint64_t ecc_ena                      : 1;
927	uint64_t flip_synd                    : 2;
928	uint64_t reserved_3_63                : 61;
929#endif
930	} s;
931	struct cvmx_sso_fidx_ecc_ctl_s        cn68xx;
932	struct cvmx_sso_fidx_ecc_ctl_s        cn68xxp1;
933};
934typedef union cvmx_sso_fidx_ecc_ctl cvmx_sso_fidx_ecc_ctl_t;
935
936/**
937 * cvmx_sso_fidx_ecc_st
938 *
939 * SSO_FIDX_ECC_ST = SSO FIDX ECC Status
940 *
941 */
942union cvmx_sso_fidx_ecc_st {
943	uint64_t u64;
944	struct cvmx_sso_fidx_ecc_st_s {
945#ifdef __BIG_ENDIAN_BITFIELD
946	uint64_t reserved_27_63               : 37;
947	uint64_t addr                         : 11; /**< Latch the address for latest sde/dbe occured
948                                                         for the FIDX RAM */
949	uint64_t reserved_9_15                : 7;
950	uint64_t syndrom                      : 5;  /**< Report the latest error syndrom for the
951                                                         FIDX RAM */
952	uint64_t reserved_0_3                 : 4;
953#else
954	uint64_t reserved_0_3                 : 4;
955	uint64_t syndrom                      : 5;
956	uint64_t reserved_9_15                : 7;
957	uint64_t addr                         : 11;
958	uint64_t reserved_27_63               : 37;
959#endif
960	} s;
961	struct cvmx_sso_fidx_ecc_st_s         cn68xx;
962	struct cvmx_sso_fidx_ecc_st_s         cn68xxp1;
963};
964typedef union cvmx_sso_fidx_ecc_st cvmx_sso_fidx_ecc_st_t;
965
966/**
967 * cvmx_sso_fpage_cnt
968 *
969 * SSO_FPAGE_CNT = SSO Free Page Cnt
970 *
971 * This register keeps track of the number of free pages pointers available for use in external memory.
972 */
973union cvmx_sso_fpage_cnt {
974	uint64_t u64;
975	struct cvmx_sso_fpage_cnt_s {
976#ifdef __BIG_ENDIAN_BITFIELD
977	uint64_t reserved_32_63               : 32;
978	uint64_t fpage_cnt                    : 32; /**< Free Page Cnt
979                                                         HW updates this register. Writes to this register
980                                                         are only for diagnostic purposes */
981#else
982	uint64_t fpage_cnt                    : 32;
983	uint64_t reserved_32_63               : 32;
984#endif
985	} s;
986	struct cvmx_sso_fpage_cnt_s           cn68xx;
987	struct cvmx_sso_fpage_cnt_s           cn68xxp1;
988};
989typedef union cvmx_sso_fpage_cnt cvmx_sso_fpage_cnt_t;
990
991/**
992 * cvmx_sso_gwe_cfg
993 *
994 * SSO_GWE_CFG = SSO Get-Work Examiner Configuration
995 *
996 * This register controls the operation of the Get-Work Examiner (GWE)
997 */
998union cvmx_sso_gwe_cfg {
999	uint64_t u64;
1000	struct cvmx_sso_gwe_cfg_s {
1001#ifdef __BIG_ENDIAN_BITFIELD
1002	uint64_t reserved_12_63               : 52;
1003	uint64_t odu_ffpgw_dis                : 1;  /**< Disable flushing ODU on periodic restart of GWE */
1004	uint64_t gwe_rfpgw_dis                : 1;  /**< Disable periodic restart of GWE for pending get_work */
1005	uint64_t odu_prf_dis                  : 1;  /**< Disable ODU-initiated prefetches of WQEs into L2C
1006                                                         For diagnostic use only. */
1007	uint64_t odu_bmp_dis                  : 1;  /**< Disable ODU bumps.
1008                                                         If SSO_PP_STRICT is true, could
1009                                                         prevent forward progress under some circumstances.
1010                                                         For diagnostic use only. */
1011	uint64_t reserved_5_7                 : 3;
1012	uint64_t gwe_hvy_dis                  : 1;  /**< Disable GWE automatic, proportional weight-increase
1013                                                         mechanism and use SSO_QOSX_RND values as-is.
1014                                                         For diagnostic use only. */
1015	uint64_t gwe_poe                      : 1;  /**< Pause GWE on extracts
1016                                                         For diagnostic use only. */
1017	uint64_t gwe_fpor                     : 1;  /**< Flush GWE pipeline when restarting GWE.
1018                                                         For diagnostic use only. */
1019	uint64_t gwe_rah                      : 1;  /**< Begin at head of input queues when restarting GWE.
1020                                                         For diagnostic use only. */
1021	uint64_t gwe_dis                      : 1;  /**< Disable Get-Work Examiner */
1022#else
1023	uint64_t gwe_dis                      : 1;
1024	uint64_t gwe_rah                      : 1;
1025	uint64_t gwe_fpor                     : 1;
1026	uint64_t gwe_poe                      : 1;
1027	uint64_t gwe_hvy_dis                  : 1;
1028	uint64_t reserved_5_7                 : 3;
1029	uint64_t odu_bmp_dis                  : 1;
1030	uint64_t odu_prf_dis                  : 1;
1031	uint64_t gwe_rfpgw_dis                : 1;
1032	uint64_t odu_ffpgw_dis                : 1;
1033	uint64_t reserved_12_63               : 52;
1034#endif
1035	} s;
1036	struct cvmx_sso_gwe_cfg_s             cn68xx;
1037	struct cvmx_sso_gwe_cfg_cn68xxp1 {
1038#ifdef __BIG_ENDIAN_BITFIELD
1039	uint64_t reserved_4_63                : 60;
1040	uint64_t gwe_poe                      : 1;  /**< Pause GWE on extracts
1041                                                         For diagnostic use only. */
1042	uint64_t gwe_fpor                     : 1;  /**< Flush GWE pipeline when restarting GWE.
1043                                                         For diagnostic use only. */
1044	uint64_t gwe_rah                      : 1;  /**< Begin at head of input queues when restarting GWE.
1045                                                         For diagnostic use only. */
1046	uint64_t gwe_dis                      : 1;  /**< Disable Get-Work Examiner */
1047#else
1048	uint64_t gwe_dis                      : 1;
1049	uint64_t gwe_rah                      : 1;
1050	uint64_t gwe_fpor                     : 1;
1051	uint64_t gwe_poe                      : 1;
1052	uint64_t reserved_4_63                : 60;
1053#endif
1054	} cn68xxp1;
1055};
1056typedef union cvmx_sso_gwe_cfg cvmx_sso_gwe_cfg_t;
1057
1058/**
1059 * cvmx_sso_idx_ecc_ctl
1060 *
1061 * SSO_IDX_ECC_CTL = SSO IDX ECC Control
1062 *
1063 */
1064union cvmx_sso_idx_ecc_ctl {
1065	uint64_t u64;
1066	struct cvmx_sso_idx_ecc_ctl_s {
1067#ifdef __BIG_ENDIAN_BITFIELD
1068	uint64_t reserved_3_63                : 61;
1069	uint64_t flip_synd                    : 2;  /**< Testing feature. Flip Syndrom to generate single or
1070                                                         double bit error for the IDX RAM. */
1071	uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 5 bit ECC
1072                                                         correct logic for the IDX RAM. */
1073#else
1074	uint64_t ecc_ena                      : 1;
1075	uint64_t flip_synd                    : 2;
1076	uint64_t reserved_3_63                : 61;
1077#endif
1078	} s;
1079	struct cvmx_sso_idx_ecc_ctl_s         cn68xx;
1080	struct cvmx_sso_idx_ecc_ctl_s         cn68xxp1;
1081};
1082typedef union cvmx_sso_idx_ecc_ctl cvmx_sso_idx_ecc_ctl_t;
1083
1084/**
1085 * cvmx_sso_idx_ecc_st
1086 *
1087 * SSO_IDX_ECC_ST = SSO IDX ECC Status
1088 *
1089 */
1090union cvmx_sso_idx_ecc_st {
1091	uint64_t u64;
1092	struct cvmx_sso_idx_ecc_st_s {
1093#ifdef __BIG_ENDIAN_BITFIELD
1094	uint64_t reserved_27_63               : 37;
1095	uint64_t addr                         : 11; /**< Latch the address for latest sde/dbe occured
1096                                                         for the IDX RAM */
1097	uint64_t reserved_9_15                : 7;
1098	uint64_t syndrom                      : 5;  /**< Report the latest error syndrom for the
1099                                                         IDX RAM */
1100	uint64_t reserved_0_3                 : 4;
1101#else
1102	uint64_t reserved_0_3                 : 4;
1103	uint64_t syndrom                      : 5;
1104	uint64_t reserved_9_15                : 7;
1105	uint64_t addr                         : 11;
1106	uint64_t reserved_27_63               : 37;
1107#endif
1108	} s;
1109	struct cvmx_sso_idx_ecc_st_s          cn68xx;
1110	struct cvmx_sso_idx_ecc_st_s          cn68xxp1;
1111};
1112typedef union cvmx_sso_idx_ecc_st cvmx_sso_idx_ecc_st_t;
1113
1114/**
1115 * cvmx_sso_iq_cnt#
1116 *
1117 * CSR reserved addresses: (64): 0x8200..0x83f8
1118 * CSR align addresses: ===========================================================================================================
1119 * SSO_IQ_CNTX = SSO Input Queue Count Register
1120 *               (one per QOS level)
1121 *
1122 * Contains a read-only count of the number of work queue entries for each QOS
1123 * level. Counts both in-unit and in-memory entries.
1124 */
1125union cvmx_sso_iq_cntx {
1126	uint64_t u64;
1127	struct cvmx_sso_iq_cntx_s {
1128#ifdef __BIG_ENDIAN_BITFIELD
1129	uint64_t reserved_32_63               : 32;
1130	uint64_t iq_cnt                       : 32; /**< Input queue count for QOS level X */
1131#else
1132	uint64_t iq_cnt                       : 32;
1133	uint64_t reserved_32_63               : 32;
1134#endif
1135	} s;
1136	struct cvmx_sso_iq_cntx_s             cn68xx;
1137	struct cvmx_sso_iq_cntx_s             cn68xxp1;
1138};
1139typedef union cvmx_sso_iq_cntx cvmx_sso_iq_cntx_t;
1140
1141/**
1142 * cvmx_sso_iq_com_cnt
1143 *
1144 * SSO_IQ_COM_CNT = SSO Input Queue Combined Count Register
1145 *
1146 * Contains a read-only count of the total number of work queue entries in all
1147 * QOS levels.  Counts both in-unit and in-memory entries.
1148 */
1149union cvmx_sso_iq_com_cnt {
1150	uint64_t u64;
1151	struct cvmx_sso_iq_com_cnt_s {
1152#ifdef __BIG_ENDIAN_BITFIELD
1153	uint64_t reserved_32_63               : 32;
1154	uint64_t iq_cnt                       : 32; /**< Input queue combined count */
1155#else
1156	uint64_t iq_cnt                       : 32;
1157	uint64_t reserved_32_63               : 32;
1158#endif
1159	} s;
1160	struct cvmx_sso_iq_com_cnt_s          cn68xx;
1161	struct cvmx_sso_iq_com_cnt_s          cn68xxp1;
1162};
1163typedef union cvmx_sso_iq_com_cnt cvmx_sso_iq_com_cnt_t;
1164
1165/**
1166 * cvmx_sso_iq_int
1167 *
1168 * SSO_IQ_INT = SSO Input Queue Interrupt Register
1169 *
1170 * Contains the bits (one per QOS level) that can trigger the input queue
1171 * interrupt.  An IQ_INT bit will be set if SSO_IQ_CNT#QOS# changes and the
1172 * resulting value is equal to SSO_IQ_THR#QOS#.
1173 */
1174union cvmx_sso_iq_int {
1175	uint64_t u64;
1176	struct cvmx_sso_iq_int_s {
1177#ifdef __BIG_ENDIAN_BITFIELD
1178	uint64_t reserved_8_63                : 56;
1179	uint64_t iq_int                       : 8;  /**< Input queue interrupt bits */
1180#else
1181	uint64_t iq_int                       : 8;
1182	uint64_t reserved_8_63                : 56;
1183#endif
1184	} s;
1185	struct cvmx_sso_iq_int_s              cn68xx;
1186	struct cvmx_sso_iq_int_s              cn68xxp1;
1187};
1188typedef union cvmx_sso_iq_int cvmx_sso_iq_int_t;
1189
1190/**
1191 * cvmx_sso_iq_int_en
1192 *
1193 * SSO_IQ_INT_EN = SSO Input Queue Interrupt Enable Register
1194 *
1195 * Contains the bits (one per QOS level) that enable the input queue interrupt.
1196 */
1197union cvmx_sso_iq_int_en {
1198	uint64_t u64;
1199	struct cvmx_sso_iq_int_en_s {
1200#ifdef __BIG_ENDIAN_BITFIELD
1201	uint64_t reserved_8_63                : 56;
1202	uint64_t int_en                       : 8;  /**< Input queue interrupt enable bits */
1203#else
1204	uint64_t int_en                       : 8;
1205	uint64_t reserved_8_63                : 56;
1206#endif
1207	} s;
1208	struct cvmx_sso_iq_int_en_s           cn68xx;
1209	struct cvmx_sso_iq_int_en_s           cn68xxp1;
1210};
1211typedef union cvmx_sso_iq_int_en cvmx_sso_iq_int_en_t;
1212
1213/**
1214 * cvmx_sso_iq_thr#
1215 *
1216 * CSR reserved addresses: (24): 0x9040..0x90f8
1217 * CSR align addresses: ===========================================================================================================
1218 * SSO_IQ_THRX = SSO Input Queue Threshold Register
1219 *               (one per QOS level)
1220 *
1221 * Threshold value for triggering input queue interrupts.
1222 */
1223union cvmx_sso_iq_thrx {
1224	uint64_t u64;
1225	struct cvmx_sso_iq_thrx_s {
1226#ifdef __BIG_ENDIAN_BITFIELD
1227	uint64_t reserved_32_63               : 32;
1228	uint64_t iq_thr                       : 32; /**< Input queue threshold for QOS level X */
1229#else
1230	uint64_t iq_thr                       : 32;
1231	uint64_t reserved_32_63               : 32;
1232#endif
1233	} s;
1234	struct cvmx_sso_iq_thrx_s             cn68xx;
1235	struct cvmx_sso_iq_thrx_s             cn68xxp1;
1236};
1237typedef union cvmx_sso_iq_thrx cvmx_sso_iq_thrx_t;
1238
1239/**
1240 * cvmx_sso_nos_cnt
1241 *
1242 * SSO_NOS_CNT = SSO No-schedule Count Register
1243 *
1244 * Contains the number of work queue entries on the no-schedule list.
1245 */
1246union cvmx_sso_nos_cnt {
1247	uint64_t u64;
1248	struct cvmx_sso_nos_cnt_s {
1249#ifdef __BIG_ENDIAN_BITFIELD
1250	uint64_t reserved_12_63               : 52;
1251	uint64_t nos_cnt                      : 12; /**< Number of work queue entries on the no-schedule list */
1252#else
1253	uint64_t nos_cnt                      : 12;
1254	uint64_t reserved_12_63               : 52;
1255#endif
1256	} s;
1257	struct cvmx_sso_nos_cnt_s             cn68xx;
1258	struct cvmx_sso_nos_cnt_s             cn68xxp1;
1259};
1260typedef union cvmx_sso_nos_cnt cvmx_sso_nos_cnt_t;
1261
1262/**
1263 * cvmx_sso_nw_tim
1264 *
1265 * SSO_NW_TIM = SSO New Work Timer Period Register
1266 *
1267 * Sets the minimum period for a new work request timeout.  Period is specified
1268 * in n-1 notation where the increment value is 1024 clock cycles.  Thus, a
1269 * value of 0x0 in this register translates to 1024 cycles, 0x1 translates to
1270 * 2048 cycles, 0x2 translates to 3072 cycles, etc...  Note: the maximum period
1271 * for a new work request timeout is 2 times the minimum period.  Note: the new
1272 * work request timeout counter is reset when this register is written.
1273 *
1274 * There are two new work request timeout cases:
1275 *
1276 * - WAIT bit clear.  The new work request can timeout if the timer expires
1277 *   before the pre-fetch engine has reached the end of all work queues.  This
1278 *   can occur if the executable work queue entry is deep in the queue and the
1279 *   pre-fetch engine is subject to many resets (i.e. high switch, de-schedule,
1280 *   or new work load from other PP's).  Thus, it is possible for a PP to
1281 *   receive a work response with the NO_WORK bit set even though there was at
1282 *   least one executable entry in the work queues.  The other (and typical)
1283 *   scenario for receiving a NO_WORK response with the WAIT bit clear is that
1284 *   the pre-fetch engine has reached the end of all work queues without
1285 *   finding executable work.
1286 *
1287 * - WAIT bit set.  The new work request can timeout if the timer expires
1288 *   before the pre-fetch engine has found executable work.  In this case, the
1289 *   only scenario where the PP will receive a work response with the NO_WORK
1290 *   bit set is if the timer expires.  Note: it is still possible for a PP to
1291 *   receive a NO_WORK response even though there was at least one executable
1292 *   entry in the work queues.
1293 *
1294 * In either case, it's important to note that switches and de-schedules are
1295 * higher priority operations that can cause the pre-fetch engine to reset.
1296 * Thus in a system with many switches or de-schedules occurring, it's possible
1297 * for the new work timer to expire (resulting in NO_WORK responses) before the
1298 * pre-fetch engine is able to get very deep into the work queues.
1299 */
1300union cvmx_sso_nw_tim {
1301	uint64_t u64;
1302	struct cvmx_sso_nw_tim_s {
1303#ifdef __BIG_ENDIAN_BITFIELD
1304	uint64_t reserved_10_63               : 54;
1305	uint64_t nw_tim                       : 10; /**< New work timer period */
1306#else
1307	uint64_t nw_tim                       : 10;
1308	uint64_t reserved_10_63               : 54;
1309#endif
1310	} s;
1311	struct cvmx_sso_nw_tim_s              cn68xx;
1312	struct cvmx_sso_nw_tim_s              cn68xxp1;
1313};
1314typedef union cvmx_sso_nw_tim cvmx_sso_nw_tim_t;
1315
1316/**
1317 * cvmx_sso_oth_ecc_ctl
1318 *
1319 * SSO_OTH_ECC_CTL = SSO OTH ECC Control
1320 *
1321 */
1322union cvmx_sso_oth_ecc_ctl {
1323	uint64_t u64;
1324	struct cvmx_sso_oth_ecc_ctl_s {
1325#ifdef __BIG_ENDIAN_BITFIELD
1326	uint64_t reserved_6_63                : 58;
1327	uint64_t flip_synd1                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1328                                                         double bit error for the odd OTH RAM. */
1329	uint64_t ecc_ena1                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1330                                                         correct logic for the odd OTH RAM. */
1331	uint64_t flip_synd0                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1332                                                         double bit error for the even OTH RAM. */
1333	uint64_t ecc_ena0                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1334                                                         correct logic for the even OTH RAM. */
1335#else
1336	uint64_t ecc_ena0                     : 1;
1337	uint64_t flip_synd0                   : 2;
1338	uint64_t ecc_ena1                     : 1;
1339	uint64_t flip_synd1                   : 2;
1340	uint64_t reserved_6_63                : 58;
1341#endif
1342	} s;
1343	struct cvmx_sso_oth_ecc_ctl_s         cn68xx;
1344	struct cvmx_sso_oth_ecc_ctl_s         cn68xxp1;
1345};
1346typedef union cvmx_sso_oth_ecc_ctl cvmx_sso_oth_ecc_ctl_t;
1347
1348/**
1349 * cvmx_sso_oth_ecc_st
1350 *
1351 * SSO_OTH_ECC_ST = SSO OTH ECC Status
1352 *
1353 */
1354union cvmx_sso_oth_ecc_st {
1355	uint64_t u64;
1356	struct cvmx_sso_oth_ecc_st_s {
1357#ifdef __BIG_ENDIAN_BITFIELD
1358	uint64_t reserved_59_63               : 5;
1359	uint64_t addr1                        : 11; /**< Latch the address for latest sde/dbe occured
1360                                                         for the odd OTH RAM */
1361	uint64_t reserved_43_47               : 5;
1362	uint64_t syndrom1                     : 7;  /**< Report the latest error syndrom for the odd
1363                                                         OTH RAM */
1364	uint64_t reserved_27_35               : 9;
1365	uint64_t addr0                        : 11; /**< Latch the address for latest sde/dbe occured
1366                                                         for the even OTH RAM */
1367	uint64_t reserved_11_15               : 5;
1368	uint64_t syndrom0                     : 7;  /**< Report the latest error syndrom for the even
1369                                                         OTH RAM */
1370	uint64_t reserved_0_3                 : 4;
1371#else
1372	uint64_t reserved_0_3                 : 4;
1373	uint64_t syndrom0                     : 7;
1374	uint64_t reserved_11_15               : 5;
1375	uint64_t addr0                        : 11;
1376	uint64_t reserved_27_35               : 9;
1377	uint64_t syndrom1                     : 7;
1378	uint64_t reserved_43_47               : 5;
1379	uint64_t addr1                        : 11;
1380	uint64_t reserved_59_63               : 5;
1381#endif
1382	} s;
1383	struct cvmx_sso_oth_ecc_st_s          cn68xx;
1384	struct cvmx_sso_oth_ecc_st_s          cn68xxp1;
1385};
1386typedef union cvmx_sso_oth_ecc_st cvmx_sso_oth_ecc_st_t;
1387
1388/**
1389 * cvmx_sso_pnd_ecc_ctl
1390 *
1391 * SSO_PND_ECC_CTL = SSO PND ECC Control
1392 *
1393 */
1394union cvmx_sso_pnd_ecc_ctl {
1395	uint64_t u64;
1396	struct cvmx_sso_pnd_ecc_ctl_s {
1397#ifdef __BIG_ENDIAN_BITFIELD
1398	uint64_t reserved_6_63                : 58;
1399	uint64_t flip_synd1                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1400                                                         double bit error for the odd PND RAM. */
1401	uint64_t ecc_ena1                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1402                                                         correct logic for the odd PND RAM. */
1403	uint64_t flip_synd0                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1404                                                         double bit error for the even PND RAM. */
1405	uint64_t ecc_ena0                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1406                                                         correct logic for the even PND RAM. */
1407#else
1408	uint64_t ecc_ena0                     : 1;
1409	uint64_t flip_synd0                   : 2;
1410	uint64_t ecc_ena1                     : 1;
1411	uint64_t flip_synd1                   : 2;
1412	uint64_t reserved_6_63                : 58;
1413#endif
1414	} s;
1415	struct cvmx_sso_pnd_ecc_ctl_s         cn68xx;
1416	struct cvmx_sso_pnd_ecc_ctl_s         cn68xxp1;
1417};
1418typedef union cvmx_sso_pnd_ecc_ctl cvmx_sso_pnd_ecc_ctl_t;
1419
1420/**
1421 * cvmx_sso_pnd_ecc_st
1422 *
1423 * SSO_PND_ECC_ST = SSO PND ECC Status
1424 *
1425 */
1426union cvmx_sso_pnd_ecc_st {
1427	uint64_t u64;
1428	struct cvmx_sso_pnd_ecc_st_s {
1429#ifdef __BIG_ENDIAN_BITFIELD
1430	uint64_t reserved_59_63               : 5;
1431	uint64_t addr1                        : 11; /**< Latch the address for latest sde/dbe occured
1432                                                         for the odd PND RAM */
1433	uint64_t reserved_43_47               : 5;
1434	uint64_t syndrom1                     : 7;  /**< Report the latest error syndrom for the odd
1435                                                         PND RAM */
1436	uint64_t reserved_27_35               : 9;
1437	uint64_t addr0                        : 11; /**< Latch the address for latest sde/dbe occured
1438                                                         for the even PND RAM */
1439	uint64_t reserved_11_15               : 5;
1440	uint64_t syndrom0                     : 7;  /**< Report the latest error syndrom for the even
1441                                                         PND RAM */
1442	uint64_t reserved_0_3                 : 4;
1443#else
1444	uint64_t reserved_0_3                 : 4;
1445	uint64_t syndrom0                     : 7;
1446	uint64_t reserved_11_15               : 5;
1447	uint64_t addr0                        : 11;
1448	uint64_t reserved_27_35               : 9;
1449	uint64_t syndrom1                     : 7;
1450	uint64_t reserved_43_47               : 5;
1451	uint64_t addr1                        : 11;
1452	uint64_t reserved_59_63               : 5;
1453#endif
1454	} s;
1455	struct cvmx_sso_pnd_ecc_st_s          cn68xx;
1456	struct cvmx_sso_pnd_ecc_st_s          cn68xxp1;
1457};
1458typedef union cvmx_sso_pnd_ecc_st cvmx_sso_pnd_ecc_st_t;
1459
1460/**
1461 * cvmx_sso_pp#_grp_msk
1462 *
1463 * CSR reserved addresses: (24): 0x5040..0x50f8
1464 * CSR align addresses: ===========================================================================================================
1465 * SSO_PPX_GRP_MSK = SSO PP Group Mask Register
1466 *                   (one bit per group per PP)
1467 *
1468 * Selects which group(s) a PP belongs to.  A '1' in any bit position sets the
1469 * PP's membership in the corresponding group.  A value of 0x0 will prevent the
1470 * PP from receiving new work.
1471 *
1472 * Note that these do not contain QOS level priorities for each PP.  This is a
1473 * change from previous POW designs.
1474 */
1475union cvmx_sso_ppx_grp_msk {
1476	uint64_t u64;
1477	struct cvmx_sso_ppx_grp_msk_s {
1478#ifdef __BIG_ENDIAN_BITFIELD
1479	uint64_t grp_msk                      : 64; /**< PPX group mask */
1480#else
1481	uint64_t grp_msk                      : 64;
1482#endif
1483	} s;
1484	struct cvmx_sso_ppx_grp_msk_s         cn68xx;
1485	struct cvmx_sso_ppx_grp_msk_s         cn68xxp1;
1486};
1487typedef union cvmx_sso_ppx_grp_msk cvmx_sso_ppx_grp_msk_t;
1488
1489/**
1490 * cvmx_sso_pp#_qos_pri
1491 *
1492 * CSR reserved addresses: (56): 0x2040..0x21f8
1493 * CSR align addresses: ===========================================================================================================
1494 * SSO_PP(0..31)_QOS_PRI = SSO PP QOS Priority Register
1495 *                                (one field per IQ per PP)
1496 *
1497 * Contains the QOS level priorities for each PP.
1498 *      0x0       is the highest priority
1499 *      0x7       is the lowest priority
1500 *      0xf       prevents the PP from receiving work from that QOS level
1501 *      0x8-0xe   Reserved
1502 *
1503 * For a given PP, priorities should begin at 0x0, and remain contiguous
1504 * throughout the range.  Failure to do so may result in severe
1505 * performance degradation.
1506 *
1507 *
1508 * Priorities for IQs 0..7
1509 */
1510union cvmx_sso_ppx_qos_pri {
1511	uint64_t u64;
1512	struct cvmx_sso_ppx_qos_pri_s {
1513#ifdef __BIG_ENDIAN_BITFIELD
1514	uint64_t reserved_60_63               : 4;
1515	uint64_t qos7_pri                     : 4;  /**< QOS7 priority for PPX */
1516	uint64_t reserved_52_55               : 4;
1517	uint64_t qos6_pri                     : 4;  /**< QOS6 priority for PPX */
1518	uint64_t reserved_44_47               : 4;
1519	uint64_t qos5_pri                     : 4;  /**< QOS5 priority for PPX */
1520	uint64_t reserved_36_39               : 4;
1521	uint64_t qos4_pri                     : 4;  /**< QOS4 priority for PPX */
1522	uint64_t reserved_28_31               : 4;
1523	uint64_t qos3_pri                     : 4;  /**< QOS3 priority for PPX */
1524	uint64_t reserved_20_23               : 4;
1525	uint64_t qos2_pri                     : 4;  /**< QOS2 priority for PPX */
1526	uint64_t reserved_12_15               : 4;
1527	uint64_t qos1_pri                     : 4;  /**< QOS1 priority for PPX */
1528	uint64_t reserved_4_7                 : 4;
1529	uint64_t qos0_pri                     : 4;  /**< QOS0 priority for PPX */
1530#else
1531	uint64_t qos0_pri                     : 4;
1532	uint64_t reserved_4_7                 : 4;
1533	uint64_t qos1_pri                     : 4;
1534	uint64_t reserved_12_15               : 4;
1535	uint64_t qos2_pri                     : 4;
1536	uint64_t reserved_20_23               : 4;
1537	uint64_t qos3_pri                     : 4;
1538	uint64_t reserved_28_31               : 4;
1539	uint64_t qos4_pri                     : 4;
1540	uint64_t reserved_36_39               : 4;
1541	uint64_t qos5_pri                     : 4;
1542	uint64_t reserved_44_47               : 4;
1543	uint64_t qos6_pri                     : 4;
1544	uint64_t reserved_52_55               : 4;
1545	uint64_t qos7_pri                     : 4;
1546	uint64_t reserved_60_63               : 4;
1547#endif
1548	} s;
1549	struct cvmx_sso_ppx_qos_pri_s         cn68xx;
1550	struct cvmx_sso_ppx_qos_pri_s         cn68xxp1;
1551};
1552typedef union cvmx_sso_ppx_qos_pri cvmx_sso_ppx_qos_pri_t;
1553
1554/**
1555 * cvmx_sso_pp_strict
1556 *
1557 * SSO_PP_STRICT = SSO Strict Priority
1558 *
1559 * This register controls getting work from the input queues.  If the bit
1560 * corresponding to a PP is set, that PP will not take work off the input
1561 * queues until it is known that there is no higher-priority work available.
1562 *
1563 * Setting SSO_PP_STRICT may incur a performance penalty if highest-priority
1564 * work is not found early.
1565 *
1566 * It is possible to starve a PP of work with SSO_PP_STRICT.  If the
1567 * SSO_PPX_GRP_MSK for a PP masks-out much of the work added to the input
1568 * queues that are higher-priority for that PP, and if there is a constant
1569 * stream of work through one or more of those higher-priority input queues,
1570 * then that PP may not accept work from lower-priority input queues.  This can
1571 * be alleviated by ensuring that most or all the work added to the
1572 * higher-priority input queues for a PP with SSO_PP_STRICT set are in a group
1573 * acceptable to that PP.
1574 *
1575 * It is also possible to neglect work in an input queue if SSO_PP_STRICT is
1576 * used.  If an input queue is a lower-priority queue for all PPs, and if all
1577 * the PPs have their corresponding bit in SSO_PP_STRICT set, then work may
1578 * never be taken (or be seldom taken) from that queue.  This can be alleviated
1579 * by ensuring that work in all input queues can be serviced by one or more PPs
1580 * that do not have SSO_PP_STRICT set, or that the input queue is the
1581 * highest-priority input queue for one or more PPs that do have SSO_PP_STRICT
1582 * set.
1583 */
1584union cvmx_sso_pp_strict {
1585	uint64_t u64;
1586	struct cvmx_sso_pp_strict_s {
1587#ifdef __BIG_ENDIAN_BITFIELD
1588	uint64_t reserved_32_63               : 32;
1589	uint64_t pp_strict                    : 32; /**< Corresponding PP operates in strict mode. */
1590#else
1591	uint64_t pp_strict                    : 32;
1592	uint64_t reserved_32_63               : 32;
1593#endif
1594	} s;
1595	struct cvmx_sso_pp_strict_s           cn68xx;
1596	struct cvmx_sso_pp_strict_s           cn68xxp1;
1597};
1598typedef union cvmx_sso_pp_strict cvmx_sso_pp_strict_t;
1599
1600/**
1601 * cvmx_sso_qos#_rnd
1602 *
1603 * CSR align addresses: ===========================================================================================================
1604 * SSO_QOS(0..7)_RND = SSO QOS Issue Round Register
1605 *                (one per IQ)
1606 *
1607 * The number of arbitration rounds each QOS level participates in.
1608 */
1609union cvmx_sso_qosx_rnd {
1610	uint64_t u64;
1611	struct cvmx_sso_qosx_rnd_s {
1612#ifdef __BIG_ENDIAN_BITFIELD
1613	uint64_t reserved_8_63                : 56;
1614	uint64_t rnds_qos                     : 8;  /**< Number of rounds to participate in for IQ(X). */
1615#else
1616	uint64_t rnds_qos                     : 8;
1617	uint64_t reserved_8_63                : 56;
1618#endif
1619	} s;
1620	struct cvmx_sso_qosx_rnd_s            cn68xx;
1621	struct cvmx_sso_qosx_rnd_s            cn68xxp1;
1622};
1623typedef union cvmx_sso_qosx_rnd cvmx_sso_qosx_rnd_t;
1624
1625/**
1626 * cvmx_sso_qos_thr#
1627 *
1628 * CSR reserved addresses: (24): 0xa040..0xa0f8
1629 * CSR align addresses: ===========================================================================================================
1630 * SSO_QOS_THRX = SSO QOS Threshold Register
1631 *                (one per QOS level)
1632 *
1633 * Contains the thresholds for allocating SSO internal storage buffers.  If the
1634 * number of remaining free buffers drops below the minimum threshold (MIN_THR)
1635 * or the number of allocated buffers for this QOS level rises above the
1636 * maximum threshold (MAX_THR), future incoming work queue entries will be
1637 * buffered externally rather than internally.  This register also contains the
1638 * number of internal buffers currently allocated to this QOS level (BUF_CNT).
1639 */
1640union cvmx_sso_qos_thrx {
1641	uint64_t u64;
1642	struct cvmx_sso_qos_thrx_s {
1643#ifdef __BIG_ENDIAN_BITFIELD
1644	uint64_t reserved_40_63               : 24;
1645	uint64_t buf_cnt                      : 12; /**< # of internal buffers allocated to QOS level X */
1646	uint64_t reserved_26_27               : 2;
1647	uint64_t max_thr                      : 12; /**< Max threshold for QOS level X
1648                                                         For performance reasons, MAX_THR can have a slop of 4
1649                                                         WQE for QOS level X. */
1650	uint64_t reserved_12_13               : 2;
1651	uint64_t min_thr                      : 12; /**< Min threshold for QOS level X
1652                                                         For performance reasons, MIN_THR can have a slop of 4
1653                                                         WQEs for QOS level X. */
1654#else
1655	uint64_t min_thr                      : 12;
1656	uint64_t reserved_12_13               : 2;
1657	uint64_t max_thr                      : 12;
1658	uint64_t reserved_26_27               : 2;
1659	uint64_t buf_cnt                      : 12;
1660	uint64_t reserved_40_63               : 24;
1661#endif
1662	} s;
1663	struct cvmx_sso_qos_thrx_s            cn68xx;
1664	struct cvmx_sso_qos_thrx_s            cn68xxp1;
1665};
1666typedef union cvmx_sso_qos_thrx cvmx_sso_qos_thrx_t;
1667
1668/**
1669 * cvmx_sso_qos_we
1670 *
1671 * SSO_QOS_WE = SSO WE Buffers
1672 *
1673 * This register contains a read-only count of the current number of free
1674 * buffers (FREE_CNT) and the total number of tag chain heads on the de-schedule list
1675 * (DES_CNT) (which is not the same as the total number of entries on all of the descheduled
1676 * tag chains.)
1677 */
1678union cvmx_sso_qos_we {
1679	uint64_t u64;
1680	struct cvmx_sso_qos_we_s {
1681#ifdef __BIG_ENDIAN_BITFIELD
1682	uint64_t reserved_26_63               : 38;
1683	uint64_t des_cnt                      : 12; /**< Number of buffers on de-schedule list */
1684	uint64_t reserved_12_13               : 2;
1685	uint64_t free_cnt                     : 12; /**< Number of total free buffers */
1686#else
1687	uint64_t free_cnt                     : 12;
1688	uint64_t reserved_12_13               : 2;
1689	uint64_t des_cnt                      : 12;
1690	uint64_t reserved_26_63               : 38;
1691#endif
1692	} s;
1693	struct cvmx_sso_qos_we_s              cn68xx;
1694	struct cvmx_sso_qos_we_s              cn68xxp1;
1695};
1696typedef union cvmx_sso_qos_we cvmx_sso_qos_we_t;
1697
1698/**
1699 * cvmx_sso_reset
1700 *
1701 * SSO_RESET = SSO Soft Reset
1702 *
1703 * Writing a one to SSO_RESET[RESET] will reset the SSO.  After receiving a
1704 * store to this CSR, the SSO must not be sent any other operations for 2500
1705 * sclk cycles.
1706 *
1707 * Note that the contents of this register are reset along with the rest of the
1708 * SSO.
1709 *
1710 * IMPLEMENTATION NOTES--NOT FOR SPEC:
1711 *      The SSO must return the bus credit associated with the CSR store used
1712 *      to write this register before reseting itself.  And the RSL tree
1713 *      that passes through the SSO must continue to work for RSL operations
1714 *      that do not target the SSO itself.
1715 */
1716union cvmx_sso_reset {
1717	uint64_t u64;
1718	struct cvmx_sso_reset_s {
1719#ifdef __BIG_ENDIAN_BITFIELD
1720	uint64_t reserved_1_63                : 63;
1721	uint64_t reset                        : 1;  /**< Reset the SSO */
1722#else
1723	uint64_t reset                        : 1;
1724	uint64_t reserved_1_63                : 63;
1725#endif
1726	} s;
1727	struct cvmx_sso_reset_s               cn68xx;
1728};
1729typedef union cvmx_sso_reset cvmx_sso_reset_t;
1730
1731/**
1732 * cvmx_sso_rwq_head_ptr#
1733 *
1734 * CSR reserved addresses: (24): 0xb040..0xb0f8
1735 * CSR align addresses: ===========================================================================================================
1736 * SSO_RWQ_HEAD_PTRX = SSO Remote Queue Head Register
1737 *                (one per QOS level)
1738 * Contains the ptr to the first entry of the remote linked list(s) for a particular
1739 * QoS level. SW should initialize the remote linked list(s) by programming
1740 * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1741 */
1742union cvmx_sso_rwq_head_ptrx {
1743	uint64_t u64;
1744	struct cvmx_sso_rwq_head_ptrx_s {
1745#ifdef __BIG_ENDIAN_BITFIELD
1746	uint64_t reserved_38_63               : 26;
1747	uint64_t ptr                          : 31; /**< Head Pointer */
1748	uint64_t reserved_5_6                 : 2;
1749	uint64_t rctr                         : 5;  /**< Index of next WQE entry in fill packet to be
1750                                                         processed (inbound queues) */
1751#else
1752	uint64_t rctr                         : 5;
1753	uint64_t reserved_5_6                 : 2;
1754	uint64_t ptr                          : 31;
1755	uint64_t reserved_38_63               : 26;
1756#endif
1757	} s;
1758	struct cvmx_sso_rwq_head_ptrx_s       cn68xx;
1759	struct cvmx_sso_rwq_head_ptrx_s       cn68xxp1;
1760};
1761typedef union cvmx_sso_rwq_head_ptrx cvmx_sso_rwq_head_ptrx_t;
1762
1763/**
1764 * cvmx_sso_rwq_pop_fptr
1765 *
1766 * SSO_RWQ_POP_FPTR = SSO Pop Free Pointer
1767 *
1768 * This register is used by SW to remove pointers for buffer-reallocation and diagnostics, and
1769 * should only be used when SSO is idle.
1770 *
1771 * To remove ALL pointers, software must insure that there are modulus 16
1772 * pointers in the FPA.  To do this, SSO_CFG.RWQ_BYP_DIS must be set, the FPA
1773 * pointer count read, and enough fake buffers pushed via SSO_RWQ_PSH_FPTR to
1774 * bring the FPA pointer count up to mod 16.
1775 */
1776union cvmx_sso_rwq_pop_fptr {
1777	uint64_t u64;
1778	struct cvmx_sso_rwq_pop_fptr_s {
1779#ifdef __BIG_ENDIAN_BITFIELD
1780	uint64_t val                          : 1;  /**< Free Pointer Valid */
1781	uint64_t cnt                          : 6;  /**< fptr_in count */
1782	uint64_t reserved_38_56               : 19;
1783	uint64_t fptr                         : 31; /**< Free Pointer */
1784	uint64_t reserved_0_6                 : 7;
1785#else
1786	uint64_t reserved_0_6                 : 7;
1787	uint64_t fptr                         : 31;
1788	uint64_t reserved_38_56               : 19;
1789	uint64_t cnt                          : 6;
1790	uint64_t val                          : 1;
1791#endif
1792	} s;
1793	struct cvmx_sso_rwq_pop_fptr_s        cn68xx;
1794	struct cvmx_sso_rwq_pop_fptr_cn68xxp1 {
1795#ifdef __BIG_ENDIAN_BITFIELD
1796	uint64_t val                          : 1;  /**< Free Pointer Valid */
1797	uint64_t reserved_38_62               : 25;
1798	uint64_t fptr                         : 31; /**< Free Pointer */
1799	uint64_t reserved_0_6                 : 7;
1800#else
1801	uint64_t reserved_0_6                 : 7;
1802	uint64_t fptr                         : 31;
1803	uint64_t reserved_38_62               : 25;
1804	uint64_t val                          : 1;
1805#endif
1806	} cn68xxp1;
1807};
1808typedef union cvmx_sso_rwq_pop_fptr cvmx_sso_rwq_pop_fptr_t;
1809
1810/**
1811 * cvmx_sso_rwq_psh_fptr
1812 *
1813 * CSR reserved addresses: (56): 0xc240..0xc3f8
1814 * SSO_RWQ_PSH_FPTR = SSO Free Pointer FIFO
1815 *
1816 * This register is used by SW to initialize the SSO with a pool of free
1817 * pointers by writing the FPTR field whenever FULL = 0. Free pointers are
1818 * fetched/released from/to the pool when accessing WQE entries stored remotely
1819 * (in remote linked lists).  Free pointers should be 128 byte aligned, each of
1820 * 256 bytes. This register should only be used when SSO is idle.
1821 *
1822 * Software needs to set aside buffering for
1823 *      8 + 48 + ROUNDUP(N/26)
1824 *
1825 * where as many as N DRAM work queue entries may be used.  The first 8 buffers
1826 * are used to setup the SSO_RWQ_HEAD_PTR and SSO_RWQ_TAIL_PTRs, and the
1827 * remainder are pushed via this register.
1828 *
1829 * IMPLEMENTATION NOTES--NOT FOR SPEC:
1830 *      48 avoids false out of buffer error due to (16) FPA and in-sso FPA buffering (32)
1831 *      26 is number of WAE's per 256B buffer
1832 */
1833union cvmx_sso_rwq_psh_fptr {
1834	uint64_t u64;
1835	struct cvmx_sso_rwq_psh_fptr_s {
1836#ifdef __BIG_ENDIAN_BITFIELD
1837	uint64_t full                         : 1;  /**< FIFO Full.  When set, the FPA is busy writing entries
1838                                                         and software must wait before adding new entries. */
1839	uint64_t cnt                          : 4;  /**< fptr_out count */
1840	uint64_t reserved_38_58               : 21;
1841	uint64_t fptr                         : 31; /**< Free Pointer */
1842	uint64_t reserved_0_6                 : 7;
1843#else
1844	uint64_t reserved_0_6                 : 7;
1845	uint64_t fptr                         : 31;
1846	uint64_t reserved_38_58               : 21;
1847	uint64_t cnt                          : 4;
1848	uint64_t full                         : 1;
1849#endif
1850	} s;
1851	struct cvmx_sso_rwq_psh_fptr_s        cn68xx;
1852	struct cvmx_sso_rwq_psh_fptr_cn68xxp1 {
1853#ifdef __BIG_ENDIAN_BITFIELD
1854	uint64_t full                         : 1;  /**< FIFO Full.  When set, the FPA is busy writing entries
1855                                                         and software must wait before adding new entries. */
1856	uint64_t reserved_38_62               : 25;
1857	uint64_t fptr                         : 31; /**< Free Pointer */
1858	uint64_t reserved_0_6                 : 7;
1859#else
1860	uint64_t reserved_0_6                 : 7;
1861	uint64_t fptr                         : 31;
1862	uint64_t reserved_38_62               : 25;
1863	uint64_t full                         : 1;
1864#endif
1865	} cn68xxp1;
1866};
1867typedef union cvmx_sso_rwq_psh_fptr cvmx_sso_rwq_psh_fptr_t;
1868
1869/**
1870 * cvmx_sso_rwq_tail_ptr#
1871 *
1872 * CSR reserved addresses: (56): 0xc040..0xc1f8
1873 * SSO_RWQ_TAIL_PTRX = SSO Remote Queue Tail Register
1874 *                (one per QOS level)
1875 * Contains the ptr to the last entry of the remote linked list(s) for a particular
1876 * QoS level. SW must initialize the remote linked list(s) by programming
1877 * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1878 */
1879union cvmx_sso_rwq_tail_ptrx {
1880	uint64_t u64;
1881	struct cvmx_sso_rwq_tail_ptrx_s {
1882#ifdef __BIG_ENDIAN_BITFIELD
1883	uint64_t reserved_38_63               : 26;
1884	uint64_t ptr                          : 31; /**< Tail Pointer */
1885	uint64_t reserved_5_6                 : 2;
1886	uint64_t rctr                         : 5;  /**< Number of entries waiting to be sent out to external
1887                                                         RAM (outbound queues) */
1888#else
1889	uint64_t rctr                         : 5;
1890	uint64_t reserved_5_6                 : 2;
1891	uint64_t ptr                          : 31;
1892	uint64_t reserved_38_63               : 26;
1893#endif
1894	} s;
1895	struct cvmx_sso_rwq_tail_ptrx_s       cn68xx;
1896	struct cvmx_sso_rwq_tail_ptrx_s       cn68xxp1;
1897};
1898typedef union cvmx_sso_rwq_tail_ptrx cvmx_sso_rwq_tail_ptrx_t;
1899
1900/**
1901 * cvmx_sso_ts_pc
1902 *
1903 * SSO_TS_PC = SSO Tag Switch Performance Counter
1904 *
1905 * Counts the number of tag switch requests.
1906 * Counter rolls over through zero when max value exceeded.
1907 */
1908union cvmx_sso_ts_pc {
1909	uint64_t u64;
1910	struct cvmx_sso_ts_pc_s {
1911#ifdef __BIG_ENDIAN_BITFIELD
1912	uint64_t ts_pc                        : 64; /**< Tag switch performance counter */
1913#else
1914	uint64_t ts_pc                        : 64;
1915#endif
1916	} s;
1917	struct cvmx_sso_ts_pc_s               cn68xx;
1918	struct cvmx_sso_ts_pc_s               cn68xxp1;
1919};
1920typedef union cvmx_sso_ts_pc cvmx_sso_ts_pc_t;
1921
1922/**
1923 * cvmx_sso_wa_com_pc
1924 *
1925 * SSO_WA_COM_PC = SSO Work Add Combined Performance Counter
1926 *
1927 * Counts the number of add new work requests for all QOS levels.
1928 * Counter rolls over through zero when max value exceeded.
1929 */
1930union cvmx_sso_wa_com_pc {
1931	uint64_t u64;
1932	struct cvmx_sso_wa_com_pc_s {
1933#ifdef __BIG_ENDIAN_BITFIELD
1934	uint64_t wa_pc                        : 64; /**< Work add combined performance counter */
1935#else
1936	uint64_t wa_pc                        : 64;
1937#endif
1938	} s;
1939	struct cvmx_sso_wa_com_pc_s           cn68xx;
1940	struct cvmx_sso_wa_com_pc_s           cn68xxp1;
1941};
1942typedef union cvmx_sso_wa_com_pc cvmx_sso_wa_com_pc_t;
1943
1944/**
1945 * cvmx_sso_wa_pc#
1946 *
1947 * CSR reserved addresses: (64): 0x4200..0x43f8
1948 * CSR align addresses: ===========================================================================================================
1949 * SSO_WA_PCX = SSO Work Add Performance Counter
1950 *             (one per QOS level)
1951 *
1952 * Counts the number of add new work requests for each QOS level.
1953 * Counter rolls over through zero when max value exceeded.
1954 */
1955union cvmx_sso_wa_pcx {
1956	uint64_t u64;
1957	struct cvmx_sso_wa_pcx_s {
1958#ifdef __BIG_ENDIAN_BITFIELD
1959	uint64_t wa_pc                        : 64; /**< Work add performance counter for QOS level X */
1960#else
1961	uint64_t wa_pc                        : 64;
1962#endif
1963	} s;
1964	struct cvmx_sso_wa_pcx_s              cn68xx;
1965	struct cvmx_sso_wa_pcx_s              cn68xxp1;
1966};
1967typedef union cvmx_sso_wa_pcx cvmx_sso_wa_pcx_t;
1968
1969/**
1970 * cvmx_sso_wq_int
1971 *
1972 * Note, the old POW offsets ran from 0x0 to 0x3f8, leaving the next available slot at 0x400.
1973 * To ensure no overlap, start on 4k boundary: 0x1000.
1974 * SSO_WQ_INT = SSO Work Queue Interrupt Register
1975 *
1976 * Contains the bits (one per group) that set work queue interrupts and are
1977 * used to clear these interrupts.  For more information regarding this
1978 * register, see the interrupt section of the SSO spec.
1979 */
1980union cvmx_sso_wq_int {
1981	uint64_t u64;
1982	struct cvmx_sso_wq_int_s {
1983#ifdef __BIG_ENDIAN_BITFIELD
1984	uint64_t wq_int                       : 64; /**< Work queue interrupt bits
1985                                                         Corresponding WQ_INT bit is set by HW whenever:
1986                                                           - SSO_WQ_INT_CNTX[IQ_CNT] >=
1987                                                             SSO_WQ_INT_THRX[IQ_THR] and the threshold
1988                                                             interrupt is not disabled.
1989                                                             SSO_WQ_IQ_DISX[IQ_DIS<X>]==1 disables the interrupt
1990                                                             SSO_WQ_INT_THRX[IQ_THR]==0 disables the int.
1991                                                           - SSO_WQ_INT_CNTX[DS_CNT] >=
1992                                                             SSO_WQ_INT_THRX[DS_THR] and the threshold
1993                                                             interrupt is not disabled
1994                                                             SSO_WQ_INT_THRX[DS_THR]==0 disables the int.
1995                                                           - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
1996                                                             counter SSO_WQ_INT_PC[PC]==0 and
1997                                                             SSO_WQ_INT_THRX[TC_EN]==1 and at least one of:
1998                                                               - SSO_WQ_INT_CNTX[IQ_CNT] > 0
1999                                                               - SSO_WQ_INT_CNTX[DS_CNT] > 0 */
2000#else
2001	uint64_t wq_int                       : 64;
2002#endif
2003	} s;
2004	struct cvmx_sso_wq_int_s              cn68xx;
2005	struct cvmx_sso_wq_int_s              cn68xxp1;
2006};
2007typedef union cvmx_sso_wq_int cvmx_sso_wq_int_t;
2008
2009/**
2010 * cvmx_sso_wq_int_cnt#
2011 *
2012 * CSR reserved addresses: (64): 0x7200..0x73f8
2013 * CSR align addresses: ===========================================================================================================
2014 * SSO_WQ_INT_CNTX = SSO Work Queue Interrupt Count Register
2015 *                   (one per group)
2016 *
2017 * Contains a read-only copy of the counts used to trigger work queue
2018 * interrupts.  For more information regarding this register, see the interrupt
2019 * section.
2020 */
2021union cvmx_sso_wq_int_cntx {
2022	uint64_t u64;
2023	struct cvmx_sso_wq_int_cntx_s {
2024#ifdef __BIG_ENDIAN_BITFIELD
2025	uint64_t reserved_32_63               : 32;
2026	uint64_t tc_cnt                       : 4;  /**< Time counter current value for group X
2027                                                         HW sets TC_CNT to SSO_WQ_INT_THRX[TC_THR] whenever:
2028                                                           - corresponding SSO_WQ_INT_CNTX[IQ_CNT]==0 and
2029                                                             corresponding SSO_WQ_INT_CNTX[DS_CNT]==0
2030                                                           - corresponding SSO_WQ_INT[WQ_INT<X>] is written
2031                                                             with a 1 by SW
2032                                                           - corresponding SSO_WQ_IQ_DIS[IQ_DIS<X>] is written
2033                                                             with a 1 by SW
2034                                                           - corresponding SSO_WQ_INT_THRX is written by SW
2035                                                           - TC_CNT==1 and periodic counter
2036                                                             SSO_WQ_INT_PC[PC]==0
2037                                                         Otherwise, HW decrements TC_CNT whenever the
2038                                                         periodic counter SSO_WQ_INT_PC[PC]==0.
2039                                                         TC_CNT is 0 whenever SSO_WQ_INT_THRX[TC_THR]==0. */
2040	uint64_t reserved_26_27               : 2;
2041	uint64_t ds_cnt                       : 12; /**< De-schedule executable count for group X */
2042	uint64_t reserved_12_13               : 2;
2043	uint64_t iq_cnt                       : 12; /**< Input queue executable count for group X */
2044#else
2045	uint64_t iq_cnt                       : 12;
2046	uint64_t reserved_12_13               : 2;
2047	uint64_t ds_cnt                       : 12;
2048	uint64_t reserved_26_27               : 2;
2049	uint64_t tc_cnt                       : 4;
2050	uint64_t reserved_32_63               : 32;
2051#endif
2052	} s;
2053	struct cvmx_sso_wq_int_cntx_s         cn68xx;
2054	struct cvmx_sso_wq_int_cntx_s         cn68xxp1;
2055};
2056typedef union cvmx_sso_wq_int_cntx cvmx_sso_wq_int_cntx_t;
2057
2058/**
2059 * cvmx_sso_wq_int_pc
2060 *
2061 * CSR reserved addresses: (1): 0x1018..0x1018
2062 * SSO_WQ_INT_PC = SSO Work Queue Interrupt Periodic Counter Register
2063 *
2064 * Contains the threshold value for the work queue interrupt periodic counter
2065 * and also a read-only copy of the periodic counter.  For more information
2066 * regarding this register, see the interrupt section.
2067 */
2068union cvmx_sso_wq_int_pc {
2069	uint64_t u64;
2070	struct cvmx_sso_wq_int_pc_s {
2071#ifdef __BIG_ENDIAN_BITFIELD
2072	uint64_t reserved_60_63               : 4;
2073	uint64_t pc                           : 28; /**< Work queue interrupt periodic counter */
2074	uint64_t reserved_28_31               : 4;
2075	uint64_t pc_thr                       : 20; /**< Work queue interrupt periodic counter threshold */
2076	uint64_t reserved_0_7                 : 8;
2077#else
2078	uint64_t reserved_0_7                 : 8;
2079	uint64_t pc_thr                       : 20;
2080	uint64_t reserved_28_31               : 4;
2081	uint64_t pc                           : 28;
2082	uint64_t reserved_60_63               : 4;
2083#endif
2084	} s;
2085	struct cvmx_sso_wq_int_pc_s           cn68xx;
2086	struct cvmx_sso_wq_int_pc_s           cn68xxp1;
2087};
2088typedef union cvmx_sso_wq_int_pc cvmx_sso_wq_int_pc_t;
2089
2090/**
2091 * cvmx_sso_wq_int_thr#
2092 *
2093 * CSR reserved addresses: (96): 0x6100..0x63f8
2094 * CSR align addresses: ===========================================================================================================
2095 * SSO_WQ_INT_THR(0..63) = SSO Work Queue Interrupt Threshold Registers
2096 *                         (one per group)
2097 *
2098 * Contains the thresholds for enabling and setting work queue interrupts.  For
2099 * more information, see the interrupt section.
2100 *
2101 * Note: Up to 16 of the SSO's internal storage buffers can be allocated
2102 * for hardware use and are therefore not available for incoming work queue
2103 * entries.  Additionally, any WS that is not in the EMPTY state consumes a
2104 * buffer.  Thus in a 32 PP system, it is not advisable to set either IQ_THR or
2105 * DS_THR to greater than 2048 - 16 - 32*2 = 1968.  Doing so may prevent the
2106 * interrupt from ever triggering.
2107 *
2108 * Priorities for QOS levels 0..7
2109 */
2110union cvmx_sso_wq_int_thrx {
2111	uint64_t u64;
2112	struct cvmx_sso_wq_int_thrx_s {
2113#ifdef __BIG_ENDIAN_BITFIELD
2114	uint64_t reserved_33_63               : 31;
2115	uint64_t tc_en                        : 1;  /**< Time counter interrupt enable for group X
2116                                                         TC_EN must be zero when TC_THR==0 */
2117	uint64_t tc_thr                       : 4;  /**< Time counter interrupt threshold for group X
2118                                                         When TC_THR==0, SSO_WQ_INT_CNTX[TC_CNT] is zero */
2119	uint64_t reserved_26_27               : 2;
2120	uint64_t ds_thr                       : 12; /**< De-schedule count threshold for group X
2121                                                         DS_THR==0 disables the threshold interrupt */
2122	uint64_t reserved_12_13               : 2;
2123	uint64_t iq_thr                       : 12; /**< Input queue count threshold for group X
2124                                                         IQ_THR==0 disables the threshold interrupt */
2125#else
2126	uint64_t iq_thr                       : 12;
2127	uint64_t reserved_12_13               : 2;
2128	uint64_t ds_thr                       : 12;
2129	uint64_t reserved_26_27               : 2;
2130	uint64_t tc_thr                       : 4;
2131	uint64_t tc_en                        : 1;
2132	uint64_t reserved_33_63               : 31;
2133#endif
2134	} s;
2135	struct cvmx_sso_wq_int_thrx_s         cn68xx;
2136	struct cvmx_sso_wq_int_thrx_s         cn68xxp1;
2137};
2138typedef union cvmx_sso_wq_int_thrx cvmx_sso_wq_int_thrx_t;
2139
2140/**
2141 * cvmx_sso_wq_iq_dis
2142 *
2143 * CSR reserved addresses: (1): 0x1008..0x1008
2144 * SSO_WQ_IQ_DIS = SSO Input Queue Interrupt Temporary Disable Mask
2145 *
2146 * Contains the input queue interrupt temporary disable bits (one per group).
2147 * For more information regarding this register, see the interrupt section.
2148 */
2149union cvmx_sso_wq_iq_dis {
2150	uint64_t u64;
2151	struct cvmx_sso_wq_iq_dis_s {
2152#ifdef __BIG_ENDIAN_BITFIELD
2153	uint64_t iq_dis                       : 64; /**< Input queue interrupt temporary disable mask
2154                                                         Corresponding SSO_WQ_INTX[WQ_INT<X>] bit cannot be
2155                                                         set due to IQ_CNT/IQ_THR check when this bit is set.
2156                                                         Corresponding IQ_DIS bit is cleared by HW whenever:
2157                                                          - SSO_WQ_INT_CNTX[IQ_CNT] is zero, or
2158                                                          - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
2159                                                            counter SSO_WQ_INT_PC[PC]==0 */
2160#else
2161	uint64_t iq_dis                       : 64;
2162#endif
2163	} s;
2164	struct cvmx_sso_wq_iq_dis_s           cn68xx;
2165	struct cvmx_sso_wq_iq_dis_s           cn68xxp1;
2166};
2167typedef union cvmx_sso_wq_iq_dis cvmx_sso_wq_iq_dis_t;
2168
2169/**
2170 * cvmx_sso_ws_pc#
2171 *
2172 * CSR reserved addresses: (225): 0x3100..0x3800
2173 * CSR align addresses: ===========================================================================================================
2174 * SSO_WS_PCX = SSO Work Schedule Performance Counter
2175 *              (one per group)
2176 *
2177 * Counts the number of work schedules for each group.
2178 * Counter rolls over through zero when max value exceeded.
2179 */
2180union cvmx_sso_ws_pcx {
2181	uint64_t u64;
2182	struct cvmx_sso_ws_pcx_s {
2183#ifdef __BIG_ENDIAN_BITFIELD
2184	uint64_t ws_pc                        : 64; /**< Work schedule performance counter for group X */
2185#else
2186	uint64_t ws_pc                        : 64;
2187#endif
2188	} s;
2189	struct cvmx_sso_ws_pcx_s              cn68xx;
2190	struct cvmx_sso_ws_pcx_s              cn68xxp1;
2191};
2192typedef union cvmx_sso_ws_pcx cvmx_sso_ws_pcx_t;
2193
2194#endif
2195