cvmx-dfm-defs.h revision 215976
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * cvmx-dfm-defs.h
43 *
44 * Configuration and status register (CSR) type definitions for
45 * Octeon dfm.
46 *
47 * This file is auto generated. Do not edit.
48 *
49 * <hr>$Revision$<hr>
50 *
51 */
52#ifndef __CVMX_DFM_TYPEDEFS_H__
53#define __CVMX_DFM_TYPEDEFS_H__
54
55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56#define CVMX_DFM_CHAR_CTL CVMX_DFM_CHAR_CTL_FUNC()
57static inline uint64_t CVMX_DFM_CHAR_CTL_FUNC(void)
58{
59	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
60		cvmx_warn("CVMX_DFM_CHAR_CTL not supported on this chip\n");
61	return CVMX_ADD_IO_SEG(0x00011800D4000220ull);
62}
63#else
64#define CVMX_DFM_CHAR_CTL (CVMX_ADD_IO_SEG(0x00011800D4000220ull))
65#endif
66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67#define CVMX_DFM_CHAR_MASK0 CVMX_DFM_CHAR_MASK0_FUNC()
68static inline uint64_t CVMX_DFM_CHAR_MASK0_FUNC(void)
69{
70	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
71		cvmx_warn("CVMX_DFM_CHAR_MASK0 not supported on this chip\n");
72	return CVMX_ADD_IO_SEG(0x00011800D4000228ull);
73}
74#else
75#define CVMX_DFM_CHAR_MASK0 (CVMX_ADD_IO_SEG(0x00011800D4000228ull))
76#endif
77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78#define CVMX_DFM_CHAR_MASK2 CVMX_DFM_CHAR_MASK2_FUNC()
79static inline uint64_t CVMX_DFM_CHAR_MASK2_FUNC(void)
80{
81	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
82		cvmx_warn("CVMX_DFM_CHAR_MASK2 not supported on this chip\n");
83	return CVMX_ADD_IO_SEG(0x00011800D4000238ull);
84}
85#else
86#define CVMX_DFM_CHAR_MASK2 (CVMX_ADD_IO_SEG(0x00011800D4000238ull))
87#endif
88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89#define CVMX_DFM_CHAR_MASK4 CVMX_DFM_CHAR_MASK4_FUNC()
90static inline uint64_t CVMX_DFM_CHAR_MASK4_FUNC(void)
91{
92	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
93		cvmx_warn("CVMX_DFM_CHAR_MASK4 not supported on this chip\n");
94	return CVMX_ADD_IO_SEG(0x00011800D4000318ull);
95}
96#else
97#define CVMX_DFM_CHAR_MASK4 (CVMX_ADD_IO_SEG(0x00011800D4000318ull))
98#endif
99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100#define CVMX_DFM_COMP_CTL2 CVMX_DFM_COMP_CTL2_FUNC()
101static inline uint64_t CVMX_DFM_COMP_CTL2_FUNC(void)
102{
103	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
104		cvmx_warn("CVMX_DFM_COMP_CTL2 not supported on this chip\n");
105	return CVMX_ADD_IO_SEG(0x00011800D40001B8ull);
106}
107#else
108#define CVMX_DFM_COMP_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001B8ull))
109#endif
110#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
111#define CVMX_DFM_CONFIG CVMX_DFM_CONFIG_FUNC()
112static inline uint64_t CVMX_DFM_CONFIG_FUNC(void)
113{
114	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
115		cvmx_warn("CVMX_DFM_CONFIG not supported on this chip\n");
116	return CVMX_ADD_IO_SEG(0x00011800D4000188ull);
117}
118#else
119#define CVMX_DFM_CONFIG (CVMX_ADD_IO_SEG(0x00011800D4000188ull))
120#endif
121#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
122#define CVMX_DFM_CONTROL CVMX_DFM_CONTROL_FUNC()
123static inline uint64_t CVMX_DFM_CONTROL_FUNC(void)
124{
125	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
126		cvmx_warn("CVMX_DFM_CONTROL not supported on this chip\n");
127	return CVMX_ADD_IO_SEG(0x00011800D4000190ull);
128}
129#else
130#define CVMX_DFM_CONTROL (CVMX_ADD_IO_SEG(0x00011800D4000190ull))
131#endif
132#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
133#define CVMX_DFM_DLL_CTL2 CVMX_DFM_DLL_CTL2_FUNC()
134static inline uint64_t CVMX_DFM_DLL_CTL2_FUNC(void)
135{
136	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
137		cvmx_warn("CVMX_DFM_DLL_CTL2 not supported on this chip\n");
138	return CVMX_ADD_IO_SEG(0x00011800D40001C8ull);
139}
140#else
141#define CVMX_DFM_DLL_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001C8ull))
142#endif
143#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
144#define CVMX_DFM_DLL_CTL3 CVMX_DFM_DLL_CTL3_FUNC()
145static inline uint64_t CVMX_DFM_DLL_CTL3_FUNC(void)
146{
147	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
148		cvmx_warn("CVMX_DFM_DLL_CTL3 not supported on this chip\n");
149	return CVMX_ADD_IO_SEG(0x00011800D4000218ull);
150}
151#else
152#define CVMX_DFM_DLL_CTL3 (CVMX_ADD_IO_SEG(0x00011800D4000218ull))
153#endif
154#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
155#define CVMX_DFM_FCLK_CNT CVMX_DFM_FCLK_CNT_FUNC()
156static inline uint64_t CVMX_DFM_FCLK_CNT_FUNC(void)
157{
158	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
159		cvmx_warn("CVMX_DFM_FCLK_CNT not supported on this chip\n");
160	return CVMX_ADD_IO_SEG(0x00011800D40001E0ull);
161}
162#else
163#define CVMX_DFM_FCLK_CNT (CVMX_ADD_IO_SEG(0x00011800D40001E0ull))
164#endif
165#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
166#define CVMX_DFM_FNT_BIST CVMX_DFM_FNT_BIST_FUNC()
167static inline uint64_t CVMX_DFM_FNT_BIST_FUNC(void)
168{
169	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
170		cvmx_warn("CVMX_DFM_FNT_BIST not supported on this chip\n");
171	return CVMX_ADD_IO_SEG(0x00011800D40007F8ull);
172}
173#else
174#define CVMX_DFM_FNT_BIST (CVMX_ADD_IO_SEG(0x00011800D40007F8ull))
175#endif
176#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
177#define CVMX_DFM_FNT_CTL CVMX_DFM_FNT_CTL_FUNC()
178static inline uint64_t CVMX_DFM_FNT_CTL_FUNC(void)
179{
180	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
181		cvmx_warn("CVMX_DFM_FNT_CTL not supported on this chip\n");
182	return CVMX_ADD_IO_SEG(0x00011800D4000400ull);
183}
184#else
185#define CVMX_DFM_FNT_CTL (CVMX_ADD_IO_SEG(0x00011800D4000400ull))
186#endif
187#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188#define CVMX_DFM_FNT_IENA CVMX_DFM_FNT_IENA_FUNC()
189static inline uint64_t CVMX_DFM_FNT_IENA_FUNC(void)
190{
191	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
192		cvmx_warn("CVMX_DFM_FNT_IENA not supported on this chip\n");
193	return CVMX_ADD_IO_SEG(0x00011800D4000410ull);
194}
195#else
196#define CVMX_DFM_FNT_IENA (CVMX_ADD_IO_SEG(0x00011800D4000410ull))
197#endif
198#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
199#define CVMX_DFM_FNT_SCLK CVMX_DFM_FNT_SCLK_FUNC()
200static inline uint64_t CVMX_DFM_FNT_SCLK_FUNC(void)
201{
202	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
203		cvmx_warn("CVMX_DFM_FNT_SCLK not supported on this chip\n");
204	return CVMX_ADD_IO_SEG(0x00011800D4000418ull);
205}
206#else
207#define CVMX_DFM_FNT_SCLK (CVMX_ADD_IO_SEG(0x00011800D4000418ull))
208#endif
209#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210#define CVMX_DFM_FNT_STAT CVMX_DFM_FNT_STAT_FUNC()
211static inline uint64_t CVMX_DFM_FNT_STAT_FUNC(void)
212{
213	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
214		cvmx_warn("CVMX_DFM_FNT_STAT not supported on this chip\n");
215	return CVMX_ADD_IO_SEG(0x00011800D4000408ull);
216}
217#else
218#define CVMX_DFM_FNT_STAT (CVMX_ADD_IO_SEG(0x00011800D4000408ull))
219#endif
220#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221#define CVMX_DFM_IFB_CNT CVMX_DFM_IFB_CNT_FUNC()
222static inline uint64_t CVMX_DFM_IFB_CNT_FUNC(void)
223{
224	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
225		cvmx_warn("CVMX_DFM_IFB_CNT not supported on this chip\n");
226	return CVMX_ADD_IO_SEG(0x00011800D40001D0ull);
227}
228#else
229#define CVMX_DFM_IFB_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D0ull))
230#endif
231#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
232#define CVMX_DFM_MODEREG_PARAMS0 CVMX_DFM_MODEREG_PARAMS0_FUNC()
233static inline uint64_t CVMX_DFM_MODEREG_PARAMS0_FUNC(void)
234{
235	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
236		cvmx_warn("CVMX_DFM_MODEREG_PARAMS0 not supported on this chip\n");
237	return CVMX_ADD_IO_SEG(0x00011800D40001A8ull);
238}
239#else
240#define CVMX_DFM_MODEREG_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D40001A8ull))
241#endif
242#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243#define CVMX_DFM_MODEREG_PARAMS1 CVMX_DFM_MODEREG_PARAMS1_FUNC()
244static inline uint64_t CVMX_DFM_MODEREG_PARAMS1_FUNC(void)
245{
246	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
247		cvmx_warn("CVMX_DFM_MODEREG_PARAMS1 not supported on this chip\n");
248	return CVMX_ADD_IO_SEG(0x00011800D4000260ull);
249}
250#else
251#define CVMX_DFM_MODEREG_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D4000260ull))
252#endif
253#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
254#define CVMX_DFM_OPS_CNT CVMX_DFM_OPS_CNT_FUNC()
255static inline uint64_t CVMX_DFM_OPS_CNT_FUNC(void)
256{
257	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
258		cvmx_warn("CVMX_DFM_OPS_CNT not supported on this chip\n");
259	return CVMX_ADD_IO_SEG(0x00011800D40001D8ull);
260}
261#else
262#define CVMX_DFM_OPS_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D8ull))
263#endif
264#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
265#define CVMX_DFM_PHY_CTL CVMX_DFM_PHY_CTL_FUNC()
266static inline uint64_t CVMX_DFM_PHY_CTL_FUNC(void)
267{
268	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
269		cvmx_warn("CVMX_DFM_PHY_CTL not supported on this chip\n");
270	return CVMX_ADD_IO_SEG(0x00011800D4000210ull);
271}
272#else
273#define CVMX_DFM_PHY_CTL (CVMX_ADD_IO_SEG(0x00011800D4000210ull))
274#endif
275#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276#define CVMX_DFM_RESET_CTL CVMX_DFM_RESET_CTL_FUNC()
277static inline uint64_t CVMX_DFM_RESET_CTL_FUNC(void)
278{
279	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
280		cvmx_warn("CVMX_DFM_RESET_CTL not supported on this chip\n");
281	return CVMX_ADD_IO_SEG(0x00011800D4000180ull);
282}
283#else
284#define CVMX_DFM_RESET_CTL (CVMX_ADD_IO_SEG(0x00011800D4000180ull))
285#endif
286#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
287#define CVMX_DFM_RLEVEL_CTL CVMX_DFM_RLEVEL_CTL_FUNC()
288static inline uint64_t CVMX_DFM_RLEVEL_CTL_FUNC(void)
289{
290	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
291		cvmx_warn("CVMX_DFM_RLEVEL_CTL not supported on this chip\n");
292	return CVMX_ADD_IO_SEG(0x00011800D40002A0ull);
293}
294#else
295#define CVMX_DFM_RLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D40002A0ull))
296#endif
297#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298#define CVMX_DFM_RLEVEL_DBG CVMX_DFM_RLEVEL_DBG_FUNC()
299static inline uint64_t CVMX_DFM_RLEVEL_DBG_FUNC(void)
300{
301	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
302		cvmx_warn("CVMX_DFM_RLEVEL_DBG not supported on this chip\n");
303	return CVMX_ADD_IO_SEG(0x00011800D40002A8ull);
304}
305#else
306#define CVMX_DFM_RLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D40002A8ull))
307#endif
308#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
309static inline uint64_t CVMX_DFM_RLEVEL_RANKX(unsigned long offset)
310{
311	if (!(
312	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
313		cvmx_warn("CVMX_DFM_RLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
314	return CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8;
315}
316#else
317#define CVMX_DFM_RLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8)
318#endif
319#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
320#define CVMX_DFM_RODT_MASK CVMX_DFM_RODT_MASK_FUNC()
321static inline uint64_t CVMX_DFM_RODT_MASK_FUNC(void)
322{
323	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
324		cvmx_warn("CVMX_DFM_RODT_MASK not supported on this chip\n");
325	return CVMX_ADD_IO_SEG(0x00011800D4000268ull);
326}
327#else
328#define CVMX_DFM_RODT_MASK (CVMX_ADD_IO_SEG(0x00011800D4000268ull))
329#endif
330#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
331#define CVMX_DFM_SLOT_CTL0 CVMX_DFM_SLOT_CTL0_FUNC()
332static inline uint64_t CVMX_DFM_SLOT_CTL0_FUNC(void)
333{
334	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
335		cvmx_warn("CVMX_DFM_SLOT_CTL0 not supported on this chip\n");
336	return CVMX_ADD_IO_SEG(0x00011800D40001F8ull);
337}
338#else
339#define CVMX_DFM_SLOT_CTL0 (CVMX_ADD_IO_SEG(0x00011800D40001F8ull))
340#endif
341#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
342#define CVMX_DFM_SLOT_CTL1 CVMX_DFM_SLOT_CTL1_FUNC()
343static inline uint64_t CVMX_DFM_SLOT_CTL1_FUNC(void)
344{
345	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
346		cvmx_warn("CVMX_DFM_SLOT_CTL1 not supported on this chip\n");
347	return CVMX_ADD_IO_SEG(0x00011800D4000200ull);
348}
349#else
350#define CVMX_DFM_SLOT_CTL1 (CVMX_ADD_IO_SEG(0x00011800D4000200ull))
351#endif
352#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
353#define CVMX_DFM_TIMING_PARAMS0 CVMX_DFM_TIMING_PARAMS0_FUNC()
354static inline uint64_t CVMX_DFM_TIMING_PARAMS0_FUNC(void)
355{
356	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
357		cvmx_warn("CVMX_DFM_TIMING_PARAMS0 not supported on this chip\n");
358	return CVMX_ADD_IO_SEG(0x00011800D4000198ull);
359}
360#else
361#define CVMX_DFM_TIMING_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D4000198ull))
362#endif
363#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
364#define CVMX_DFM_TIMING_PARAMS1 CVMX_DFM_TIMING_PARAMS1_FUNC()
365static inline uint64_t CVMX_DFM_TIMING_PARAMS1_FUNC(void)
366{
367	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
368		cvmx_warn("CVMX_DFM_TIMING_PARAMS1 not supported on this chip\n");
369	return CVMX_ADD_IO_SEG(0x00011800D40001A0ull);
370}
371#else
372#define CVMX_DFM_TIMING_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D40001A0ull))
373#endif
374#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
375#define CVMX_DFM_WLEVEL_CTL CVMX_DFM_WLEVEL_CTL_FUNC()
376static inline uint64_t CVMX_DFM_WLEVEL_CTL_FUNC(void)
377{
378	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
379		cvmx_warn("CVMX_DFM_WLEVEL_CTL not supported on this chip\n");
380	return CVMX_ADD_IO_SEG(0x00011800D4000300ull);
381}
382#else
383#define CVMX_DFM_WLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D4000300ull))
384#endif
385#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
386#define CVMX_DFM_WLEVEL_DBG CVMX_DFM_WLEVEL_DBG_FUNC()
387static inline uint64_t CVMX_DFM_WLEVEL_DBG_FUNC(void)
388{
389	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
390		cvmx_warn("CVMX_DFM_WLEVEL_DBG not supported on this chip\n");
391	return CVMX_ADD_IO_SEG(0x00011800D4000308ull);
392}
393#else
394#define CVMX_DFM_WLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D4000308ull))
395#endif
396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397static inline uint64_t CVMX_DFM_WLEVEL_RANKX(unsigned long offset)
398{
399	if (!(
400	      (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
401		cvmx_warn("CVMX_DFM_WLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
402	return CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8;
403}
404#else
405#define CVMX_DFM_WLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8)
406#endif
407#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
408#define CVMX_DFM_WODT_MASK CVMX_DFM_WODT_MASK_FUNC()
409static inline uint64_t CVMX_DFM_WODT_MASK_FUNC(void)
410{
411	if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
412		cvmx_warn("CVMX_DFM_WODT_MASK not supported on this chip\n");
413	return CVMX_ADD_IO_SEG(0x00011800D40001B0ull);
414}
415#else
416#define CVMX_DFM_WODT_MASK (CVMX_ADD_IO_SEG(0x00011800D40001B0ull))
417#endif
418
419/**
420 * cvmx_dfm_char_ctl
421 *
422 * DFM_CHAR_CTL = DFM Characterization Control
423 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
424 */
425union cvmx_dfm_char_ctl
426{
427	uint64_t u64;
428	struct cvmx_dfm_char_ctl_s
429	{
430#if __BYTE_ORDER == __BIG_ENDIAN
431	uint64_t reserved_42_63               : 22;
432	uint64_t en                           : 1;  /**< Enable characterization */
433	uint64_t sel                          : 1;  /**< Pattern select
434                                                         0 = PRBS
435                                                         1 = Programmable pattern */
436	uint64_t prog                         : 8;  /**< Programmable pattern */
437	uint64_t prbs                         : 32; /**< PRBS Polynomial */
438#else
439	uint64_t prbs                         : 32;
440	uint64_t prog                         : 8;
441	uint64_t sel                          : 1;
442	uint64_t en                           : 1;
443	uint64_t reserved_42_63               : 22;
444#endif
445	} s;
446	struct cvmx_dfm_char_ctl_s            cn63xx;
447	struct cvmx_dfm_char_ctl_s            cn63xxp1;
448};
449typedef union cvmx_dfm_char_ctl cvmx_dfm_char_ctl_t;
450
451/**
452 * cvmx_dfm_char_mask0
453 *
454 * DFM_CHAR_MASK0 = DFM Characterization Control Mask0
455 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
456 */
457union cvmx_dfm_char_mask0
458{
459	uint64_t u64;
460	struct cvmx_dfm_char_mask0_s
461	{
462#if __BYTE_ORDER == __BIG_ENDIAN
463	uint64_t reserved_16_63               : 48;
464	uint64_t mask                         : 16; /**< Mask for DQ0[15:0] */
465#else
466	uint64_t mask                         : 16;
467	uint64_t reserved_16_63               : 48;
468#endif
469	} s;
470	struct cvmx_dfm_char_mask0_s          cn63xx;
471	struct cvmx_dfm_char_mask0_s          cn63xxp1;
472};
473typedef union cvmx_dfm_char_mask0 cvmx_dfm_char_mask0_t;
474
475/**
476 * cvmx_dfm_char_mask2
477 *
478 * DFM_CHAR_MASK2 = DFM Characterization Control Mask2
479 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
480 */
481union cvmx_dfm_char_mask2
482{
483	uint64_t u64;
484	struct cvmx_dfm_char_mask2_s
485	{
486#if __BYTE_ORDER == __BIG_ENDIAN
487	uint64_t reserved_16_63               : 48;
488	uint64_t mask                         : 16; /**< Mask for DQ1[15:0] */
489#else
490	uint64_t mask                         : 16;
491	uint64_t reserved_16_63               : 48;
492#endif
493	} s;
494	struct cvmx_dfm_char_mask2_s          cn63xx;
495	struct cvmx_dfm_char_mask2_s          cn63xxp1;
496};
497typedef union cvmx_dfm_char_mask2 cvmx_dfm_char_mask2_t;
498
499/**
500 * cvmx_dfm_char_mask4
501 *
502 * DFM_CHAR_MASK4 = DFM Characterization Mask4
503 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
504 */
505union cvmx_dfm_char_mask4
506{
507	uint64_t u64;
508	struct cvmx_dfm_char_mask4_s
509	{
510#if __BYTE_ORDER == __BIG_ENDIAN
511	uint64_t reserved_33_63               : 31;
512	uint64_t reset_n_mask                 : 1;  /**< Mask for RESET_N */
513	uint64_t a_mask                       : 16; /**< Mask for A[15:0] */
514	uint64_t ba_mask                      : 3;  /**< Mask for BA[2:0] */
515	uint64_t we_n_mask                    : 1;  /**< Mask for WE_N */
516	uint64_t cas_n_mask                   : 1;  /**< Mask for CAS_N */
517	uint64_t ras_n_mask                   : 1;  /**< Mask for RAS_N */
518	uint64_t odt1_mask                    : 2;  /**< Mask for ODT1
519                                                         For DFM, ODT1 is reserved. */
520	uint64_t odt0_mask                    : 2;  /**< Mask for ODT0 */
521	uint64_t cs1_n_mask                   : 2;  /**< Mask for CS1_N
522                                                         For DFM, CS1_N is reserved. */
523	uint64_t cs0_n_mask                   : 2;  /**< Mask for CS0_N */
524	uint64_t cke_mask                     : 2;  /**< Mask for CKE
525                                                         For DFM, CKE_MASK[1] is reserved. */
526#else
527	uint64_t cke_mask                     : 2;
528	uint64_t cs0_n_mask                   : 2;
529	uint64_t cs1_n_mask                   : 2;
530	uint64_t odt0_mask                    : 2;
531	uint64_t odt1_mask                    : 2;
532	uint64_t ras_n_mask                   : 1;
533	uint64_t cas_n_mask                   : 1;
534	uint64_t we_n_mask                    : 1;
535	uint64_t ba_mask                      : 3;
536	uint64_t a_mask                       : 16;
537	uint64_t reset_n_mask                 : 1;
538	uint64_t reserved_33_63               : 31;
539#endif
540	} s;
541	struct cvmx_dfm_char_mask4_s          cn63xx;
542};
543typedef union cvmx_dfm_char_mask4 cvmx_dfm_char_mask4_t;
544
545/**
546 * cvmx_dfm_comp_ctl2
547 *
548 * DFM_COMP_CTL2 = DFM Compensation control2
549 *
550 */
551union cvmx_dfm_comp_ctl2
552{
553	uint64_t u64;
554	struct cvmx_dfm_comp_ctl2_s
555	{
556#if __BYTE_ORDER == __BIG_ENDIAN
557	uint64_t reserved_34_63               : 30;
558	uint64_t ddr__ptune                   : 4;  /**< DDR pctl from compensation circuit
559                                                         The encoded value provides debug information for the
560                                                         compensation impedance on P-pullup */
561	uint64_t ddr__ntune                   : 4;  /**< DDR nctl from compensation circuit
562                                                         The encoded value provides debug information for the
563                                                         compensation impedance on N-pulldown */
564	uint64_t m180                         : 1;  /**< Cap impedance at 180 ohm (instead of 240 ohm) */
565	uint64_t byp                          : 1;  /**< Bypass mode
566                                                         Use compensation setting from PTUNE,NTUNE */
567	uint64_t ptune                        : 4;  /**< PCTL impedance control in bypass mode */
568	uint64_t ntune                        : 4;  /**< NCTL impedance control in bypass mode */
569	uint64_t rodt_ctl                     : 4;  /**< NCTL RODT impedance control bits
570                                                         0000 = No ODT
571                                                         0001 = 20 ohm
572                                                         0010 = 30 ohm
573                                                         0011 = 40 ohm
574                                                         0100 = 60 ohm
575                                                         0101 = 120 ohm
576                                                         0110-1111 = Reserved */
577	uint64_t cmd_ctl                      : 4;  /**< Drive strength control for CMD/A/RESET_N/CKE drivers
578                                                         0001 = 24 ohm
579                                                         0010 = 26.67 ohm
580                                                         0011 = 30 ohm
581                                                         0100 = 34.3 ohm
582                                                         0101 = 40 ohm
583                                                         0110 = 48 ohm
584                                                         0111 = 60 ohm
585                                                         0000,1000-1111 = Reserved */
586	uint64_t ck_ctl                       : 4;  /**< Drive strength control for CK/CS_N/ODT drivers
587                                                         0001 = 24 ohm
588                                                         0010 = 26.67 ohm
589                                                         0011 = 30 ohm
590                                                         0100 = 34.3 ohm
591                                                         0101 = 40 ohm
592                                                         0110 = 48 ohm
593                                                         0111 = 60 ohm
594                                                         0000,1000-1111 = Reserved */
595	uint64_t dqx_ctl                      : 4;  /**< Drive strength control for DQ/DQS drivers
596                                                         0001 = 24 ohm
597                                                         0010 = 26.67 ohm
598                                                         0011 = 30 ohm
599                                                         0100 = 34.3 ohm
600                                                         0101 = 40 ohm
601                                                         0110 = 48 ohm
602                                                         0111 = 60 ohm
603                                                         0000,1000-1111 = Reserved */
604#else
605	uint64_t dqx_ctl                      : 4;
606	uint64_t ck_ctl                       : 4;
607	uint64_t cmd_ctl                      : 4;
608	uint64_t rodt_ctl                     : 4;
609	uint64_t ntune                        : 4;
610	uint64_t ptune                        : 4;
611	uint64_t byp                          : 1;
612	uint64_t m180                         : 1;
613	uint64_t ddr__ntune                   : 4;
614	uint64_t ddr__ptune                   : 4;
615	uint64_t reserved_34_63               : 30;
616#endif
617	} s;
618	struct cvmx_dfm_comp_ctl2_s           cn63xx;
619	struct cvmx_dfm_comp_ctl2_s           cn63xxp1;
620};
621typedef union cvmx_dfm_comp_ctl2 cvmx_dfm_comp_ctl2_t;
622
623/**
624 * cvmx_dfm_config
625 *
626 * DFM_CONFIG = DFM Memory Configuration Register
627 *
628 * This register controls certain parameters of  Memory Configuration
629 *
630 * Notes:
631 * a. The self refresh entry sequence(s) power the DLL up/down (depending on DFM_MODEREG_PARAMS[DLL])
632 * when DFM_CONFIG[SREF_WITH_DLL] is set
633 * b. Prior to the self-refresh exit sequence, DFM_MODEREG_PARAMS should be re-programmed (if needed) to the
634 * appropriate values
635 *
636 * DFM Bringup Sequence:
637 * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
638 * 2. Write DFM_COMP_CTL2, DFM_CONTROL, DFM_WODT_MASK, DFM_RODT_MASK, DFM_DUAL_MEMCFG, DFM_TIMING_PARAMS0, DFM_TIMING_PARAMS1,
639 *    DFM_MODEREG_PARAMS0, DFM_MODEREG_PARAMS1, DFM_RESET_CTL (with DDR3RST=0), DFM_CONFIG (with INIT_START=0)
640 *    with appropriate values, if necessary.
641 * 3. Wait 200us, then write DFM_RESET_CTL[DDR3RST] = 1.
642 * 4. Initialize all ranks at once by writing DFM_CONFIG[RANKMASK][n] = 1, DFM_CONFIG[INIT_STATUS][n] = 1, and DFM_CONFIG[INIT_START] = 1
643 *    where n is a valid rank index for the specific board configuration.
644 * 5. for each rank n to be write-leveled [
645 *       if auto write-leveling is desired [
646 *           write DFM_CONFIG[RANKMASK][n] = 1, DFM_WLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
647 *           wait until DFM_WLEVEL_RANKn[STATUS] = 3
648 *       ] else [
649 *           write DFM_WLEVEL_RANKn with appropriate values
650 *       ]
651 *    ]
652 * 6. for each rank n to be read-leveled [
653 *       if auto read-leveling is desired [
654 *           write DFM_CONFIG[RANKMASK][n] = 1, DFM_RLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
655 *           wait until DFM_RLEVEL_RANKn[STATUS] = 3
656 *       ] else [
657 *           write DFM_RLEVEL_RANKn with appropriate values
658 *       ]
659 *    ]
660 */
661union cvmx_dfm_config
662{
663	uint64_t u64;
664	struct cvmx_dfm_config_s
665	{
666#if __BYTE_ORDER == __BIG_ENDIAN
667	uint64_t reserved_59_63               : 5;
668	uint64_t early_unload_d1_r1           : 1;  /**< Reserved */
669	uint64_t early_unload_d1_r0           : 1;  /**< Reserved */
670	uint64_t early_unload_d0_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 1
671                                                         reads.
672                                                         The recommended EARLY_UNLOAD_D0_R1 value can be calculated
673                                                         after the final DFM_RLEVEL_RANK1[BYTE*] values are
674                                                         selected (as part of read-leveling initialization).
675                                                         Then, determine the largest read-leveling setting
676                                                         for rank 1 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK1[BYTEi])
677                                                         across all i), then set EARLY_UNLOAD_D0_R1
678                                                         when the low two bits of this largest setting is not
679                                                         3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
680	uint64_t early_unload_d0_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 0
681                                                         reads.
682                                                         The recommended EARLY_UNLOAD_D0_R0 value can be calculated
683                                                         after the final DFM_RLEVEL_RANK0[BYTE*] values are
684                                                         selected (as part of read-leveling initialization).
685                                                         Then, determine the largest read-leveling setting
686                                                         for rank 0 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK0[BYTEi])
687                                                         across all i), then set EARLY_UNLOAD_D0_R0
688                                                         when the low two bits of this largest setting is not
689                                                         3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
690	uint64_t init_status                  : 4;  /**< Indicates status of initialization
691                                                         INIT_STATUS[n] = 1 implies rank n has been initialized
692                                                         SW must set necessary INIT_STATUS bits with the
693                                                         same DFM_CONFIG write that initiates
694                                                         power-up/init and self-refresh exit sequences
695                                                         (if the required INIT_STATUS bits are not already
696                                                         set before DFM initiates the sequence).
697                                                         INIT_STATUS determines the chip-selects that assert
698                                                         during refresh, ZQCS, and precharge power-down and
699                                                         self-refresh entry/exit SEQUENCE's.
700                                                         INIT_STATUS<3:2> must be zero. */
701	uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
702                                                         MIRRMASK<n> = 1 means Rank n addresses are mirrored
703                                                         for 0 <= n <= 1
704                                                         A mirrored read/write has these differences:
705                                                          - DDR_BA<1> is swapped with DDR_BA<0>
706                                                          - DDR_A<8> is swapped with DDR_A<7>
707                                                          - DDR_A<6> is swapped with DDR_A<5>
708                                                          - DDR_A<4> is swapped with DDR_A<3>
709                                                         MIRRMASK<3:2> must be zero.
710                                                         When RANK_ENA=0, MIRRMASK<1> MBZ */
711	uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
712                                                         To write-level/read-level/initialize rank i, set RANKMASK<i>
713                                                                         RANK_ENA=1               RANK_ENA=0
714                                                           RANKMASK<0> =    CS0                  CS0 and CS1
715                                                           RANKMASK<1> =    CS1                      MBZ
716                                                         For read/write leveling, each rank has to be leveled separately,
717                                                         so RANKMASK should only have one bit set.
718                                                         RANKMASK is not used during self-refresh entry/exit and
719                                                         precharge power-down entry/exit instruction sequences.
720                                                         RANKMASK<3:2> must be zero.
721                                                         When RANK_ENA=0, RANKMASK<1> MBZ */
722	uint64_t rank_ena                     : 1;  /**< RANK enable (for use with multiple ranks)
723                                                         The RANK_ENA bit enables
724                                                         the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
725                                                         (PBANK_LSB-1) address bit. */
726	uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
727                                                         When set, self-refresh entry and exit instruction sequences
728                                                         write MR1 and MR2 (in all ranks). (The writes occur before
729                                                         self-refresh entry, and after self-refresh exit.)
730                                                         When clear, self-refresh entry and exit instruction sequences
731                                                         do not write any registers in the DDR3 parts. */
732	uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
733                                                         the shortest DQx lines have a larger delay than the CK line */
734	uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1
735                                                         transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
736                                                         precharge power-down entry and exit SEQUENCE's can also
737                                                         be initiated automatically by hardware.
738                                                         0=power-up/init                  (RANKMASK used, MR0, MR1, MR2, and MR3 written)
739                                                         1=read-leveling                  (RANKMASK used, MR3 written)
740                                                         2=self-refresh entry             (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
741                                                         3=self-refresh exit,             (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
742                                                         4=precharge power-down entry     (all ranks participate)
743                                                         5=precharge power-down exit      (all ranks participate)
744                                                         6=write-leveling                 (RANKMASK used, MR1 written)
745                                                         7=illegal
746                                                         Precharge power-down entry and exit SEQUENCE's may
747                                                         be automatically generated by the HW when IDLEPOWER!=0.
748                                                         Self-refresh entry SEQUENCE's may be automatically
749                                                         generated by hardware upon a chip warm or soft reset
750                                                         sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
751                                                         DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
752                                                         to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
753                                                         Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
754                                                         The DFR_CKE pin gets activated as part of power-up/init,
755                                                         self-refresh exit, and precharge power-down exit sequences.
756                                                         The DFR_CKE pin gets de-activated as part of self-refresh entry,
757                                                         precharge power-down entry, or DRESET assertion.
758                                                         If there are two consecutive power-up/init's without
759                                                         a DRESET assertion between them, DFM asserts DFR_CKE as part of
760                                                         the first power-up/init, and continues to assert DFR_CKE
761                                                         through the remainder of the first and the second power-up/init.
762                                                         If DFR_CKE deactivation and reactivation is needed for
763                                                         a second power-up/init, a DRESET assertion is required
764                                                         between the first and the second. */
765	uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
766                                                         increments. A Refresh sequence is triggered when bits
767                                                         [24:18] are equal to 0, and a ZQCS sequence is triggered
768                                                         when [36:18] are equal to 0.
769                                                         Program [24:18] to RND-DN(tREFI/clkPeriod/512)
770                                                         Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
771                                                         that this value should always be greater than 32, to account for
772                                                         resistor calibration delays.
773                                                         000_00000000_00000000: RESERVED
774                                                         Max Refresh interval = 127 * 512           = 65024 fclks
775                                                         Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
776                                                         DFM_CONFIG[INIT_STATUS] determines which ranks receive
777                                                         the REF / ZQCS. DFM does not send any refreshes / ZQCS's
778                                                         when DFM_CONFIG[INIT_STATUS]=0. */
779	uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
780                                                         and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
781                                                         CSR's. SW should write this to a one, then re-write
782                                                         it to a zero to cause the reset. */
783	uint64_t ecc_adr                      : 1;  /**< Must be zero. */
784	uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
785                                                         having waited for 2^FORCEWRITE cycles.  0=disabled. */
786	uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
787                                                         controller has been idle for 2^(2+IDLEPOWER) cycles.
788                                                         0=disabled.
789                                                         This field should only be programmed after initialization.
790                                                         DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
791                                                         is disabled during the precharge power-down. */
792	uint64_t pbank_lsb                    : 4;  /**< Physical bank address bit select
793                                                         Encoding used to determine which memory address
794                                                         bit position represents the rank(or bunk) bit used to enable 1(of 2)
795                                                         ranks(via chip enables) supported by the DFM DDR3 interface.
796                                                         Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
797                                                         \#rowbits + \#rankbits.
798                                                         PBANK_LSB
799                                                             - 0: rank = mem_adr[24]
800                                                             - 1: rank = mem_adr[25]
801                                                             - 2: rank = mem_adr[26]
802                                                             - 3: rank = mem_adr[27]
803                                                             - 4: rank = mem_adr[28]
804                                                             - 5: rank = mem_adr[29]
805                                                             - 6: rank = mem_adr[30]
806                                                             - 7: rank = mem_adr[31]
807                                                          - 8-15:  RESERVED
808                                                         DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
809                                                         support standard 64b/72b DDR3 DIMM modules. The board designer should
810                                                         populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
811                                                         (or a single x16bit device if available) to fully populate the 16b
812                                                         DFM DDR3 data bus.
813                                                         The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
814                                                         on how much total memory is desired for the DFA application. See
815                                                         RANK_ENA CSR bit when enabling for dual-ranks.
816                                                         SW NOTE:
817                                                             1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
818                                                                reference upper unused memory address bits.
819                                                             2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
820                                                                reference the upper most address bit based on the total size
821                                                                of the rank.
822                                                         For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
823                                                         1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
824                                                         the device row address width = 14b.  The single x8bit device contains 128MB, and
825                                                         requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
826                                                         a total rank size = 256MB = 2^28.
827                                                         For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
828                                                         select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
829                                                         For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
830                                                         rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
831                                                         access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
832                                                         upper and lower rank). */
833	uint64_t row_lsb                      : 3;  /**< Row Address bit select
834                                                         Encoding used to determine which memory address
835                                                         bit position represents the low order DDR ROW address.
836                                                         The DFM memory address [31:4] which references octawords
837                                                         needs to be translated to DRAM addresses (bnk,row,col,bunk)
838                                                         mem_adr[31:4]:
839                                                           3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
840                                                           1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
841                                                          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
842                                                          |       ROW[m:n]            |     COL[13:3]       | BA
843                                                          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
844                                                         See:
845                                                           BA[2:0]:   mem_adr[6:4]
846                                                           COL[13:0]: [mem_adr[17:7],3'd0]
847                                                               NOTE: The extracted COL address is always 14b fixed size width,
848                                                               and upper unused bits are ignored by the DRAM device.
849                                                           ROW[15:0]: Extraction of ROW starting address bit is programmable,
850                                                           and is dependent on the \#column bits supported by the DRAM device.
851                                                           The actual starting bit of the ROW can actually span into the
852                                                           high order bits of the COL[13:3] field described above.
853                                                                  ROW_LSB    ROW[15:0]
854                                                                --------------------------
855                                                                   - 0:      mem_adr[26:11]
856                                                                   - 1:      mem_adr[27:12]
857                                                                   - 2:      mem_adr[28:13]
858                                                                   - 3:      mem_adr[29:14]
859                                                                   - 4:      mem_adr[30:15]
860                                                                   - 5:      mem_adr[31:16]
861                                                                  6,7:     [1'b0, mem_adr[31:17]]  For current DDR3 Jedec spec - UNSUPPORTED
862                                                         For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
863                                                         DDR3 parts, the column address width = 10. Therefore,
864                                                              BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
865                                                         we would want the row starting address to be extracted from mem_adr[14].
866                                                         Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
867	uint64_t ecc_ena                      : 1;  /**< Must be zero. */
868	uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
869                                                         selected by DFM_CONFIG[SEQUENCE].  This register is a
870                                                         oneshot and clears itself each time it is set. */
871#else
872	uint64_t init_start                   : 1;
873	uint64_t ecc_ena                      : 1;
874	uint64_t row_lsb                      : 3;
875	uint64_t pbank_lsb                    : 4;
876	uint64_t idlepower                    : 3;
877	uint64_t forcewrite                   : 4;
878	uint64_t ecc_adr                      : 1;
879	uint64_t reset                        : 1;
880	uint64_t ref_zqcs_int                 : 19;
881	uint64_t sequence                     : 3;
882	uint64_t early_dqx                    : 1;
883	uint64_t sref_with_dll                : 1;
884	uint64_t rank_ena                     : 1;
885	uint64_t rankmask                     : 4;
886	uint64_t mirrmask                     : 4;
887	uint64_t init_status                  : 4;
888	uint64_t early_unload_d0_r0           : 1;
889	uint64_t early_unload_d0_r1           : 1;
890	uint64_t early_unload_d1_r0           : 1;
891	uint64_t early_unload_d1_r1           : 1;
892	uint64_t reserved_59_63               : 5;
893#endif
894	} s;
895	struct cvmx_dfm_config_s              cn63xx;
896	struct cvmx_dfm_config_cn63xxp1
897	{
898#if __BYTE_ORDER == __BIG_ENDIAN
899	uint64_t reserved_55_63               : 9;
900	uint64_t init_status                  : 4;  /**< Indicates status of initialization
901                                                         INIT_STATUS[n] = 1 implies rank n has been initialized
902                                                         SW must set necessary INIT_STATUS bits with the
903                                                         same DFM_CONFIG write that initiates
904                                                         power-up/init and self-refresh exit sequences
905                                                         (if the required INIT_STATUS bits are not already
906                                                         set before DFM initiates the sequence).
907                                                         INIT_STATUS determines the chip-selects that assert
908                                                         during refresh, ZQCS, and precharge power-down and
909                                                         self-refresh entry/exit SEQUENCE's.
910                                                         INIT_STATUS<3:2> must be zero. */
911	uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
912                                                         MIRRMASK<n> = 1 means Rank n addresses are mirrored
913                                                         for 0 <= n <= 1
914                                                         A mirrored read/write has these differences:
915                                                          - DDR_BA<1> is swapped with DDR_BA<0>
916                                                          - DDR_A<8> is swapped with DDR_A<7>
917                                                          - DDR_A<6> is swapped with DDR_A<5>
918                                                          - DDR_A<4> is swapped with DDR_A<3>
919                                                         MIRRMASK<3:2> must be zero.
920                                                         When RANK_ENA=0, MIRRMASK<1> MBZ */
921	uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
922                                                         To write-level/read-level/initialize rank i, set RANKMASK<i>
923                                                                         RANK_ENA=1               RANK_ENA=0
924                                                           RANKMASK<0> =    CS0                  CS0 and CS1
925                                                           RANKMASK<1> =    CS1                      MBZ
926                                                         For read/write leveling, each rank has to be leveled separately,
927                                                         so RANKMASK should only have one bit set.
928                                                         RANKMASK is not used during self-refresh entry/exit and
929                                                         precharge power-down entry/exit instruction sequences.
930                                                         RANKMASK<3:2> must be zero.
931                                                         When RANK_ENA=0, RANKMASK<1> MBZ */
932	uint64_t rank_ena                     : 1;  /**< RANK enable (for use with multiple ranks)
933                                                         The RANK_ENA bit enables
934                                                         the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
935                                                         (PBANK_LSB-1) address bit. */
936	uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
937                                                         When set, self-refresh entry and exit instruction sequences
938                                                         write MR1 and MR2 (in all ranks). (The writes occur before
939                                                         self-refresh entry, and after self-refresh exit.)
940                                                         When clear, self-refresh entry and exit instruction sequences
941                                                         do not write any registers in the DDR3 parts. */
942	uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
943                                                         the shortest DQx lines have a larger delay than the CK line */
944	uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1
945                                                         transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
946                                                         precharge power-down entry and exit SEQUENCE's can also
947                                                         be initiated automatically by hardware.
948                                                         0=power-up/init                  (RANKMASK used, MR0, MR1, MR2, and MR3 written)
949                                                         1=read-leveling                  (RANKMASK used, MR3 written)
950                                                         2=self-refresh entry             (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
951                                                         3=self-refresh exit,             (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
952                                                         4=precharge power-down entry     (all ranks participate)
953                                                         5=precharge power-down exit      (all ranks participate)
954                                                         6=write-leveling                 (RANKMASK used, MR1 written)
955                                                         7=illegal
956                                                         Precharge power-down entry and exit SEQUENCE's may
957                                                         be automatically generated by the HW when IDLEPOWER!=0.
958                                                         Self-refresh entry SEQUENCE's may be automatically
959                                                         generated by hardware upon a chip warm or soft reset
960                                                         sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
961                                                         DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
962                                                         to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
963                                                         Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
964                                                         The DFR_CKE pin gets activated as part of power-up/init,
965                                                         self-refresh exit, and precharge power-down exit sequences.
966                                                         The DFR_CKE pin gets de-activated as part of self-refresh entry,
967                                                         precharge power-down entry, or DRESET assertion.
968                                                         If there are two consecutive power-up/init's without
969                                                         a DRESET assertion between them, DFM asserts DFR_CKE as part of
970                                                         the first power-up/init, and continues to assert DFR_CKE
971                                                         through the remainder of the first and the second power-up/init.
972                                                         If DFR_CKE deactivation and reactivation is needed for
973                                                         a second power-up/init, a DRESET assertion is required
974                                                         between the first and the second. */
975	uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
976                                                         increments. A Refresh sequence is triggered when bits
977                                                         [24:18] are equal to 0, and a ZQCS sequence is triggered
978                                                         when [36:18] are equal to 0.
979                                                         Program [24:18] to RND-DN(tREFI/clkPeriod/512)
980                                                         Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
981                                                         that this value should always be greater than 32, to account for
982                                                         resistor calibration delays.
983                                                         000_00000000_00000000: RESERVED
984                                                         Max Refresh interval = 127 * 512           = 65024 fclks
985                                                         Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
986                                                         DFM_CONFIG[INIT_STATUS] determines which ranks receive
987                                                         the REF / ZQCS. DFM does not send any refreshes / ZQCS's
988                                                         when DFM_CONFIG[INIT_STATUS]=0. */
989	uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
990                                                         and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
991                                                         CSR's. SW should write this to a one, then re-write
992                                                         it to a zero to cause the reset. */
993	uint64_t ecc_adr                      : 1;  /**< Must be zero. */
994	uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
995                                                         having waited for 2^FORCEWRITE cycles.  0=disabled. */
996	uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
997                                                         controller has been idle for 2^(2+IDLEPOWER) cycles.
998                                                         0=disabled.
999                                                         This field should only be programmed after initialization.
1000                                                         DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
1001                                                         is disabled during the precharge power-down. */
1002	uint64_t pbank_lsb                    : 4;  /**< Physical bank address bit select
1003                                                         Encoding used to determine which memory address
1004                                                         bit position represents the rank(or bunk) bit used to enable 1(of 2)
1005                                                         ranks(via chip enables) supported by the DFM DDR3 interface.
1006                                                         Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
1007                                                         \#rowbits + \#rankbits.
1008                                                         PBANK_LSB
1009                                                             - 0: rank = mem_adr[24]
1010                                                             - 1: rank = mem_adr[25]
1011                                                             - 2: rank = mem_adr[26]
1012                                                             - 3: rank = mem_adr[27]
1013                                                             - 4: rank = mem_adr[28]
1014                                                             - 5: rank = mem_adr[29]
1015                                                             - 6: rank = mem_adr[30]
1016                                                             - 7: rank = mem_adr[31]
1017                                                          - 8-15:  RESERVED
1018                                                         DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
1019                                                         support standard 64b/72b DDR3 DIMM modules. The board designer should
1020                                                         populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
1021                                                         (or a single x16bit device if available) to fully populate the 16b
1022                                                         DFM DDR3 data bus.
1023                                                         The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
1024                                                         on how much total memory is desired for the DFA application. See
1025                                                         RANK_ENA CSR bit when enabling for dual-ranks.
1026                                                         SW NOTE:
1027                                                             1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
1028                                                                reference upper unused memory address bits.
1029                                                             2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
1030                                                                reference the upper most address bit based on the total size
1031                                                                of the rank.
1032                                                         For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
1033                                                         1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
1034                                                         the device row address width = 14b.  The single x8bit device contains 128MB, and
1035                                                         requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
1036                                                         a total rank size = 256MB = 2^28.
1037                                                         For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
1038                                                         select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
1039                                                         For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
1040                                                         rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
1041                                                         access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
1042                                                         upper and lower rank). */
1043	uint64_t row_lsb                      : 3;  /**< Row Address bit select
1044                                                         Encoding used to determine which memory address
1045                                                         bit position represents the low order DDR ROW address.
1046                                                         The DFM memory address [31:4] which references octawords
1047                                                         needs to be translated to DRAM addresses (bnk,row,col,bunk)
1048                                                         mem_adr[31:4]:
1049                                                           3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1050                                                           1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
1051                                                          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1052                                                          |       ROW[m:n]            |     COL[13:3]       | BA
1053                                                          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1054                                                         See:
1055                                                           BA[2:0]:   mem_adr[6:4]
1056                                                           COL[13:0]: [mem_adr[17:7],3'd0]
1057                                                               NOTE: The extracted COL address is always 14b fixed size width,
1058                                                               and upper unused bits are ignored by the DRAM device.
1059                                                           ROW[15:0]: Extraction of ROW starting address bit is programmable,
1060                                                           and is dependent on the \#column bits supported by the DRAM device.
1061                                                           The actual starting bit of the ROW can actually span into the
1062                                                           high order bits of the COL[13:3] field described above.
1063                                                                  ROW_LSB    ROW[15:0]
1064                                                                --------------------------
1065                                                                   - 0:      mem_adr[26:11]
1066                                                                   - 1:      mem_adr[27:12]
1067                                                                   - 2:      mem_adr[28:13]
1068                                                                   - 3:      mem_adr[29:14]
1069                                                                   - 4:      mem_adr[30:15]
1070                                                                   - 5:      mem_adr[31:16]
1071                                                                  6,7:     [1'b0, mem_adr[31:17]]  For current DDR3 Jedec spec - UNSUPPORTED
1072                                                         For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1073                                                         DDR3 parts, the column address width = 10. Therefore,
1074                                                              BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
1075                                                         we would want the row starting address to be extracted from mem_adr[14].
1076                                                         Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
1077	uint64_t ecc_ena                      : 1;  /**< Must be zero. */
1078	uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
1079                                                         selected by DFM_CONFIG[SEQUENCE].  This register is a
1080                                                         oneshot and clears itself each time it is set. */
1081#else
1082	uint64_t init_start                   : 1;
1083	uint64_t ecc_ena                      : 1;
1084	uint64_t row_lsb                      : 3;
1085	uint64_t pbank_lsb                    : 4;
1086	uint64_t idlepower                    : 3;
1087	uint64_t forcewrite                   : 4;
1088	uint64_t ecc_adr                      : 1;
1089	uint64_t reset                        : 1;
1090	uint64_t ref_zqcs_int                 : 19;
1091	uint64_t sequence                     : 3;
1092	uint64_t early_dqx                    : 1;
1093	uint64_t sref_with_dll                : 1;
1094	uint64_t rank_ena                     : 1;
1095	uint64_t rankmask                     : 4;
1096	uint64_t mirrmask                     : 4;
1097	uint64_t init_status                  : 4;
1098	uint64_t reserved_55_63               : 9;
1099#endif
1100	} cn63xxp1;
1101};
1102typedef union cvmx_dfm_config cvmx_dfm_config_t;
1103
1104/**
1105 * cvmx_dfm_control
1106 *
1107 * DFM_CONTROL = DFM Control
1108 * This register is an assortment of various control fields needed by the memory controller
1109 */
1110union cvmx_dfm_control
1111{
1112	uint64_t u64;
1113	struct cvmx_dfm_control_s
1114	{
1115#if __BYTE_ORDER == __BIG_ENDIAN
1116	uint64_t reserved_24_63               : 40;
1117	uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
1118                                                         RD cmd is delayed an additional DCLK cycle. */
1119	uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
1120                                                         WR cmd is delayed an additional DCLK cycle. */
1121	uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
1122                                                         the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
1123                                                         cycles.
1124                                                         00 = 0 fclks
1125                                                         01 = 1 fclks
1126                                                         10 = 2 fclks
1127                                                         11 = 3 fclks */
1128	uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
1129                                                         When clear, DFM runs external ZQ calibration */
1130	uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
1131                                                         When counter is re-enabled, ZQCS is run immediately,
1132                                                         and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
1133	uint64_t auto_fclkdis                 : 1;  /**< When 1, DFM will automatically shut off its internal
1134                                                         clock to conserve power when there is no traffic. Note
1135                                                         that this has no effect on the DDR3 PHY and pads clocks. */
1136	uint64_t xor_bank                     : 1;  /**< Must be zero. */
1137	uint64_t max_write_batch              : 4;  /**< Must be set to value 8 */
1138	uint64_t nxm_write_en                 : 1;  /**< Must be zero. */
1139	uint64_t elev_prio_dis                : 1;  /**< Must be zero. */
1140	uint64_t inorder_wr                   : 1;  /**< Must be zero. */
1141	uint64_t inorder_rd                   : 1;  /**< Must be zero. */
1142	uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes
1143                                                         THROTTLE_RD and THROTTLE_WR must be the same value. */
1144	uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads
1145                                                         THROTTLE_RD and THROTTLE_WR must be the same value. */
1146	uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
1147                                                         time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
1148                                                         00 = 0 fclks
1149                                                         01 = 1 fclks
1150                                                         10 = 2 fclks
1151                                                         11 = RESERVED */
1152	uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
1153                                                         This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
1154	uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
1155                                                         address. This mode helps relieve setup time pressure
1156                                                         on the Address and command bus which nominally have
1157                                                         a very large fanout. Please refer to Micron's tech
1158                                                         note tn_47_01 titled "DDR2-533 Memory Design Guide
1159                                                         for Two Dimm Unbuffered Systems" for physical details. */
1160	uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
1161                                                         Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
1162                                                         DFM_FCLK_CNT registers. SW should first write this
1163                                                         field to a one, then write this field to a zero to
1164                                                         clear the CSR's. */
1165	uint64_t rdimm_ena                    : 1;  /**< Must be zero. */
1166#else
1167	uint64_t rdimm_ena                    : 1;
1168	uint64_t bwcnt                        : 1;
1169	uint64_t ddr2t                        : 1;
1170	uint64_t pocas                        : 1;
1171	uint64_t fprch2                       : 2;
1172	uint64_t throttle_rd                  : 1;
1173	uint64_t throttle_wr                  : 1;
1174	uint64_t inorder_rd                   : 1;
1175	uint64_t inorder_wr                   : 1;
1176	uint64_t elev_prio_dis                : 1;
1177	uint64_t nxm_write_en                 : 1;
1178	uint64_t max_write_batch              : 4;
1179	uint64_t xor_bank                     : 1;
1180	uint64_t auto_fclkdis                 : 1;
1181	uint64_t int_zqcs_dis                 : 1;
1182	uint64_t ext_zqcs_dis                 : 1;
1183	uint64_t bprch                        : 2;
1184	uint64_t wodt_bprch                   : 1;
1185	uint64_t rodt_bprch                   : 1;
1186	uint64_t reserved_24_63               : 40;
1187#endif
1188	} s;
1189	struct cvmx_dfm_control_s             cn63xx;
1190	struct cvmx_dfm_control_cn63xxp1
1191	{
1192#if __BYTE_ORDER == __BIG_ENDIAN
1193	uint64_t reserved_22_63               : 42;
1194	uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
1195                                                         the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
1196                                                         cycles.
1197                                                         00 = 0 fclks
1198                                                         01 = 1 fclks
1199                                                         10 = 2 fclks
1200                                                         11 = 3 fclks */
1201	uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
1202                                                         When clear, DFM runs external ZQ calibration */
1203	uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
1204                                                         When counter is re-enabled, ZQCS is run immediately,
1205                                                         and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
1206	uint64_t auto_fclkdis                 : 1;  /**< When 1, DFM will automatically shut off its internal
1207                                                         clock to conserve power when there is no traffic. Note
1208                                                         that this has no effect on the DDR3 PHY and pads clocks. */
1209	uint64_t xor_bank                     : 1;  /**< Must be zero. */
1210	uint64_t max_write_batch              : 4;  /**< Must be set to value 8 */
1211	uint64_t nxm_write_en                 : 1;  /**< Must be zero. */
1212	uint64_t elev_prio_dis                : 1;  /**< Must be zero. */
1213	uint64_t inorder_wr                   : 1;  /**< Must be zero. */
1214	uint64_t inorder_rd                   : 1;  /**< Must be zero. */
1215	uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes
1216                                                         THROTTLE_RD and THROTTLE_WR must be the same value. */
1217	uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads
1218                                                         THROTTLE_RD and THROTTLE_WR must be the same value. */
1219	uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
1220                                                         time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
1221                                                         00 = 0 fclks
1222                                                         01 = 1 fclks
1223                                                         10 = 2 fclks
1224                                                         11 = RESERVED */
1225	uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
1226                                                         This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
1227	uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
1228                                                         address. This mode helps relieve setup time pressure
1229                                                         on the Address and command bus which nominally have
1230                                                         a very large fanout. Please refer to Micron's tech
1231                                                         note tn_47_01 titled "DDR2-533 Memory Design Guide
1232                                                         for Two Dimm Unbuffered Systems" for physical details. */
1233	uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
1234                                                         Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
1235                                                         DFM_FCLK_CNT registers. SW should first write this
1236                                                         field to a one, then write this field to a zero to
1237                                                         clear the CSR's. */
1238	uint64_t rdimm_ena                    : 1;  /**< Must be zero. */
1239#else
1240	uint64_t rdimm_ena                    : 1;
1241	uint64_t bwcnt                        : 1;
1242	uint64_t ddr2t                        : 1;
1243	uint64_t pocas                        : 1;
1244	uint64_t fprch2                       : 2;
1245	uint64_t throttle_rd                  : 1;
1246	uint64_t throttle_wr                  : 1;
1247	uint64_t inorder_rd                   : 1;
1248	uint64_t inorder_wr                   : 1;
1249	uint64_t elev_prio_dis                : 1;
1250	uint64_t nxm_write_en                 : 1;
1251	uint64_t max_write_batch              : 4;
1252	uint64_t xor_bank                     : 1;
1253	uint64_t auto_fclkdis                 : 1;
1254	uint64_t int_zqcs_dis                 : 1;
1255	uint64_t ext_zqcs_dis                 : 1;
1256	uint64_t bprch                        : 2;
1257	uint64_t reserved_22_63               : 42;
1258#endif
1259	} cn63xxp1;
1260};
1261typedef union cvmx_dfm_control cvmx_dfm_control_t;
1262
1263/**
1264 * cvmx_dfm_dll_ctl2
1265 *
1266 * DFM_DLL_CTL2 = DFM (Octeon) DLL control and FCLK reset
1267 *
1268 *
1269 * Notes:
1270 * DLL Bringup sequence:
1271 * 1. If not done already, set DFM_DLL_CTL2 = 0, except when DFM_DLL_CTL2[DRESET] = 1.
1272 * 2. Write 1 to DFM_DLL_CTL2[DLL_BRINGUP]
1273 * 3. Wait for 10 FCLK cycles, then write 1 to DFM_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 FCLK cycles, but the
1274 *    idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
1275 *    cycle early. DFM_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the DFM and/or DRESET initialization
1276 *    sequence.
1277 * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
1278 *    without step 4, since step 5 has enough time)
1279 * 5. Wait 10 us.
1280 * 6. Write 0 to DFM_DLL_CTL2[DLL_BRINGUP]. DFM_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the DFM
1281 *    and/or DRESET initialization sequence.
1282 * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
1283 *    is one cycle is fine)
1284 * 8. Write 0 to DFM_DLL_CTL2[DRESET].  DFM_DLL_CTL2[DRESET] must not change after this point without restarting the DFM and/or
1285 *    DRESET initialization sequence.
1286 */
1287union cvmx_dfm_dll_ctl2
1288{
1289	uint64_t u64;
1290	struct cvmx_dfm_dll_ctl2_s
1291	{
1292#if __BYTE_ORDER == __BIG_ENDIAN
1293	uint64_t reserved_15_63               : 49;
1294	uint64_t dll_bringup                  : 1;  /**< DLL Bringup */
1295	uint64_t dreset                       : 1;  /**< Fclk domain reset.  The reset signal that is used by the
1296                                                         Fclk domain is (DRESET || ECLK_RESET). */
1297	uint64_t quad_dll_ena                 : 1;  /**< DLL Enable */
1298	uint64_t byp_sel                      : 4;  /**< Bypass select
1299                                                         0000 : no byte
1300                                                         0001 : byte 0
1301                                                         - ...
1302                                                         1001 : byte 8
1303                                                         1010 : all bytes
1304                                                         1011-1111 : Reserved */
1305	uint64_t byp_setting                  : 8;  /**< Bypass setting
1306                                                         DDR3-1600: 00100010
1307                                                         DDR3-1333: 00110010
1308                                                         DDR3-1066: 01001011
1309                                                         DDR3-800 : 01110101
1310                                                         DDR3-667 : 10010110
1311                                                         DDR3-600 : 10101100 */
1312#else
1313	uint64_t byp_setting                  : 8;
1314	uint64_t byp_sel                      : 4;
1315	uint64_t quad_dll_ena                 : 1;
1316	uint64_t dreset                       : 1;
1317	uint64_t dll_bringup                  : 1;
1318	uint64_t reserved_15_63               : 49;
1319#endif
1320	} s;
1321	struct cvmx_dfm_dll_ctl2_s            cn63xx;
1322	struct cvmx_dfm_dll_ctl2_s            cn63xxp1;
1323};
1324typedef union cvmx_dfm_dll_ctl2 cvmx_dfm_dll_ctl2_t;
1325
1326/**
1327 * cvmx_dfm_dll_ctl3
1328 *
1329 * DFM_DLL_CTL3 = DFM DLL control and FCLK reset
1330 *
1331 */
1332union cvmx_dfm_dll_ctl3
1333{
1334	uint64_t u64;
1335	struct cvmx_dfm_dll_ctl3_s
1336	{
1337#if __BYTE_ORDER == __BIG_ENDIAN
1338	uint64_t reserved_29_63               : 35;
1339	uint64_t dll_fast                     : 1;  /**< DLL lock
1340                                                         0 = DLL locked */
1341	uint64_t dll90_setting                : 8;  /**< Encoded DLL settings. Works in conjuction with
1342                                                         DLL90_BYTE_SEL */
1343	uint64_t fine_tune_mode               : 1;  /**< Fine Tune Mode */
1344	uint64_t dll_mode                     : 1;  /**< DLL Mode */
1345	uint64_t dll90_byte_sel               : 4;  /**< Observe DLL settings for selected byte
1346                                                         0001 : byte 0
1347                                                         - ...
1348                                                         1001 : byte 8
1349                                                         0000,1010-1111 : Reserved */
1350	uint64_t offset_ena                   : 1;  /**< Offset enable
1351                                                         0 = disable
1352                                                         1 = enable */
1353	uint64_t load_offset                  : 1;  /**< Load offset
1354                                                         0 : disable
1355                                                         1 : load (generates a 1 cycle pulse to the PHY)
1356                                                         This register is oneshot and clears itself each time
1357                                                         it is set */
1358	uint64_t mode_sel                     : 2;  /**< Mode select
1359                                                         00 : reset
1360                                                         01 : write
1361                                                         10 : read
1362                                                         11 : write & read */
1363	uint64_t byte_sel                     : 4;  /**< Byte select
1364                                                         0000 : no byte
1365                                                         0001 : byte 0
1366                                                         - ...
1367                                                         1001 : byte 8
1368                                                         1010 : all bytes
1369                                                         1011-1111 : Reserved */
1370	uint64_t offset                       : 6;  /**< Write/read offset setting
1371                                                         [4:0] : offset
1372                                                         [5]   : 0 = increment, 1 = decrement
1373                                                         Not a 2's complement value */
1374#else
1375	uint64_t offset                       : 6;
1376	uint64_t byte_sel                     : 4;
1377	uint64_t mode_sel                     : 2;
1378	uint64_t load_offset                  : 1;
1379	uint64_t offset_ena                   : 1;
1380	uint64_t dll90_byte_sel               : 4;
1381	uint64_t dll_mode                     : 1;
1382	uint64_t fine_tune_mode               : 1;
1383	uint64_t dll90_setting                : 8;
1384	uint64_t dll_fast                     : 1;
1385	uint64_t reserved_29_63               : 35;
1386#endif
1387	} s;
1388	struct cvmx_dfm_dll_ctl3_s            cn63xx;
1389	struct cvmx_dfm_dll_ctl3_s            cn63xxp1;
1390};
1391typedef union cvmx_dfm_dll_ctl3 cvmx_dfm_dll_ctl3_t;
1392
1393/**
1394 * cvmx_dfm_fclk_cnt
1395 *
1396 * DFM_FCLK_CNT  = Performance Counters
1397 *
1398 */
1399union cvmx_dfm_fclk_cnt
1400{
1401	uint64_t u64;
1402	struct cvmx_dfm_fclk_cnt_s
1403	{
1404#if __BYTE_ORDER == __BIG_ENDIAN
1405	uint64_t fclkcnt                      : 64; /**< Performance Counter that counts fclks
1406                                                         64-bit counter. */
1407#else
1408	uint64_t fclkcnt                      : 64;
1409#endif
1410	} s;
1411	struct cvmx_dfm_fclk_cnt_s            cn63xx;
1412	struct cvmx_dfm_fclk_cnt_s            cn63xxp1;
1413};
1414typedef union cvmx_dfm_fclk_cnt cvmx_dfm_fclk_cnt_t;
1415
1416/**
1417 * cvmx_dfm_fnt_bist
1418 *
1419 * DFM_FNT_BIST = DFM Front BIST Status
1420 *
1421 * This register contains Bist Status for DFM Front
1422 */
1423union cvmx_dfm_fnt_bist
1424{
1425	uint64_t u64;
1426	struct cvmx_dfm_fnt_bist_s
1427	{
1428#if __BYTE_ORDER == __BIG_ENDIAN
1429	uint64_t reserved_5_63                : 59;
1430	uint64_t cab                          : 1;  /**< Bist Results for CAB RAM
1431                                                         - 0: GOOD (or bist in progress/never run)
1432                                                         - 1: BAD */
1433	uint64_t mrq                          : 1;  /**< Bist Results for MRQ RAM
1434                                                         - 0: GOOD (or bist in progress/never run)
1435                                                         - 1: BAD */
1436	uint64_t mff                          : 1;  /**< Bist Results for MFF RAM
1437                                                         - 0: GOOD (or bist in progress/never run)
1438                                                         - 1: BAD */
1439	uint64_t rpb                          : 1;  /**< Bist Results for RPB RAM
1440                                                         - 0: GOOD (or bist in progress/never run)
1441                                                         - 1: BAD */
1442	uint64_t mwb                          : 1;  /**< Bist Results for MWB RAM
1443                                                         - 0: GOOD (or bist in progress/never run)
1444                                                         - 1: BAD */
1445#else
1446	uint64_t mwb                          : 1;
1447	uint64_t rpb                          : 1;
1448	uint64_t mff                          : 1;
1449	uint64_t mrq                          : 1;
1450	uint64_t cab                          : 1;
1451	uint64_t reserved_5_63                : 59;
1452#endif
1453	} s;
1454	struct cvmx_dfm_fnt_bist_s            cn63xx;
1455	struct cvmx_dfm_fnt_bist_cn63xxp1
1456	{
1457#if __BYTE_ORDER == __BIG_ENDIAN
1458	uint64_t reserved_4_63                : 60;
1459	uint64_t mrq                          : 1;  /**< Bist Results for MRQ RAM
1460                                                         - 0: GOOD (or bist in progress/never run)
1461                                                         - 1: BAD */
1462	uint64_t mff                          : 1;  /**< Bist Results for MFF RAM
1463                                                         - 0: GOOD (or bist in progress/never run)
1464                                                         - 1: BAD */
1465	uint64_t rpb                          : 1;  /**< Bist Results for RPB RAM
1466                                                         - 0: GOOD (or bist in progress/never run)
1467                                                         - 1: BAD */
1468	uint64_t mwb                          : 1;  /**< Bist Results for MWB RAM
1469                                                         - 0: GOOD (or bist in progress/never run)
1470                                                         - 1: BAD */
1471#else
1472	uint64_t mwb                          : 1;
1473	uint64_t rpb                          : 1;
1474	uint64_t mff                          : 1;
1475	uint64_t mrq                          : 1;
1476	uint64_t reserved_4_63                : 60;
1477#endif
1478	} cn63xxp1;
1479};
1480typedef union cvmx_dfm_fnt_bist cvmx_dfm_fnt_bist_t;
1481
1482/**
1483 * cvmx_dfm_fnt_ctl
1484 *
1485 * Specify the RSL base addresses for the block
1486 *
1487 *                  DFM_FNT_CTL = DFM Front Control Register
1488 *
1489 * This register contains control registers for the DFM Front Section of Logic.
1490 */
1491union cvmx_dfm_fnt_ctl
1492{
1493	uint64_t u64;
1494	struct cvmx_dfm_fnt_ctl_s
1495	{
1496#if __BYTE_ORDER == __BIG_ENDIAN
1497	uint64_t reserved_4_63                : 60;
1498	uint64_t sbe_ena                      : 1;  /**< If SBE_ENA=1 & RECC_ENA=1 then all single bit errors
1499                                                         which have been detected/corrected during GWALK reads,
1500                                                         will be reported through RWORD0[REA]=ERR code in system
1501                                                         memory at the conclusion of the DFA instruction.
1502                                                         SWNOTE: The application user may wish to report single
1503                                                         bit errors that were corrected through the
1504                                                         RWORD0[REA]=ERR codeword.
1505                                                         NOTE: This DOES NOT effect the reporting of SBEs in
1506                                                         DFM_FNT_STAT[SBE] (which were corrected if RECC_ENA=1).
1507                                                         This bit is only here for applications which 'MAY' want
1508                                                         to be alerted with an ERR completion code if there were
1509                                                         SBEs that were auto-corrected during GWALK instructions.
1510                                                         Recap: If there is a SBE and SBE_ENA==1, the "err" field
1511                                                         in the data returned to DFA will be set.  If SBE_ENA==0,
1512                                                         the "err" is always 0 when there is a SBE; however,
1513                                                         regardless of SBE_ENA, DBE will cause "err" to be 1. */
1514	uint64_t wecc_ena                     : 1;  /**< If WECC_ENA=1, HW will auto-generate(overwrite) the 10b
1515                                                         OWECC codeword during Memory Writes sourced by
1516                                                         1) DFA MLOAD instructions, or by 2) NCB-Direct CSR
1517                                                         mode writes to DFA memory space. The HW will insert
1518                                                         the 10b OWECC inband into OW-DATA[127:118].
1519                                                         If WECC_ENA=0, SW is responsible for generating the
1520                                                         10b OWECC codeword inband in the upper OW-data[127:118]
1521                                                         during Memory writes (to provide SEC/DED coverage for
1522                                                         the data during subsequent Memory reads-see RECC_ENA). */
1523	uint64_t recc_ena                     : 1;  /**< If RECC_ENA=1, all DFA memory reads sourced by 1) DFA
1524                                                         GWALK instructions or by 2) NCB-Direct CSR mode reads
1525                                                         to DFA memory space, will be protected by an inband 10b
1526                                                         OWECC SEC/DED codeword. The inband OW-DATA[127:118]
1527                                                         represents the inband OWECC codeword which offers single
1528                                                         bit error correction(SEC)/double bit error detection(DED).
1529                                                         [see also DFM_FNT_STAT[SBE,DBE,FADR,FSYN] status fields].
1530                                                         The FSYN field contains an encoded value which determines
1531                                                         which bit was corrected(for SBE) or detected(for DBE) to
1532                                                         help in bit isolation of the error.
1533                                                         SW NOTE: If RECC_ENA=1: An NCB-Direct CSR mode read of the
1534                                                         upper QW in memory will return ZEROES in the upper 10b of the
1535                                                         data word.
1536                                                         If RECC_ENA=0: An NCB-Direct CSR mode read of the upper QW in
1537                                                         memory will return the RAW 64bits from memory. During memory
1538                                                         debug, writing RECC_ENA=0 provides visibility into the raw ECC
1539                                                         stored in memory at that time. */
1540	uint64_t dfr_ena                      : 1;  /**< DFM Memory Interface Enable
1541                                                         The DFM powers up with the DDR3 interface disabled.
1542                                                         If the DFA function is required, then after poweron
1543                                                         software configures a stable DFM DDR3 memory clock
1544                                                         (see: LMCx_DDR_PLL_CTL[DFM_PS_EN, DFM_DIV_RESET]),
1545                                                         the DFM DDR3 memory interface can be enabled.
1546                                                         When disabled (DFR_ENA=0), all DFM DDR3 memory
1547                                                         output and bidirectional pins will be tristated.
1548                                                         SW NOTE: The DFR_ENA=1 write MUST occur sometime after
1549                                                         the DFM is brought out of reset (ie: after the
1550                                                         DFM_DLL_CTL2[DRESET]=0 write). */
1551#else
1552	uint64_t dfr_ena                      : 1;
1553	uint64_t recc_ena                     : 1;
1554	uint64_t wecc_ena                     : 1;
1555	uint64_t sbe_ena                      : 1;
1556	uint64_t reserved_4_63                : 60;
1557#endif
1558	} s;
1559	struct cvmx_dfm_fnt_ctl_s             cn63xx;
1560	struct cvmx_dfm_fnt_ctl_s             cn63xxp1;
1561};
1562typedef union cvmx_dfm_fnt_ctl cvmx_dfm_fnt_ctl_t;
1563
1564/**
1565 * cvmx_dfm_fnt_iena
1566 *
1567 * DFM_FNT_IENA = DFM Front Interrupt Enable Mask
1568 *
1569 * This register contains error interrupt enable information for the DFM Front Section of Logic.
1570 */
1571union cvmx_dfm_fnt_iena
1572{
1573	uint64_t u64;
1574	struct cvmx_dfm_fnt_iena_s
1575	{
1576#if __BYTE_ORDER == __BIG_ENDIAN
1577	uint64_t reserved_2_63                : 62;
1578	uint64_t dbe_intena                   : 1;  /**< OWECC Double Error Detected(DED) Interrupt Enable
1579                                                         When set, the memory controller raises a processor
1580                                                         interrupt on detecting an uncorrectable double bit
1581                                                         OWECC during a memory read. */
1582	uint64_t sbe_intena                   : 1;  /**< OWECC Single Error Corrected(SEC) Interrupt Enable
1583                                                         When set, the memory controller raises a processor
1584                                                         interrupt on detecting a correctable single bit
1585                                                         OWECC error which was corrected during a memory
1586                                                         read. */
1587#else
1588	uint64_t sbe_intena                   : 1;
1589	uint64_t dbe_intena                   : 1;
1590	uint64_t reserved_2_63                : 62;
1591#endif
1592	} s;
1593	struct cvmx_dfm_fnt_iena_s            cn63xx;
1594	struct cvmx_dfm_fnt_iena_s            cn63xxp1;
1595};
1596typedef union cvmx_dfm_fnt_iena cvmx_dfm_fnt_iena_t;
1597
1598/**
1599 * cvmx_dfm_fnt_sclk
1600 *
1601 * DFM_FNT_SCLK = DFM Front SCLK Control Register
1602 *
1603 * This register contains control registers for the DFM Front Section of Logic.
1604 * NOTE: This register is in USCLK domain and is ised to enable the conditional SCLK grid, as well as
1605 * to start a software BiST sequence for the DFM sub-block. (note: the DFM has conditional clocks which
1606 * prevent BiST to run under reset automatically).
1607 */
1608union cvmx_dfm_fnt_sclk
1609{
1610	uint64_t u64;
1611	struct cvmx_dfm_fnt_sclk_s
1612	{
1613#if __BYTE_ORDER == __BIG_ENDIAN
1614	uint64_t reserved_3_63                : 61;
1615	uint64_t clear_bist                   : 1;  /**< When START_BIST is written 0->1, if CLEAR_BIST=1, all
1616                                                         previous BiST state is cleared.
1617                                                         NOTES:
1618                                                         1) CLEAR_BIST must be written to 1 before START_BIST
1619                                                         is written to 1 using a separate CSR write.
1620                                                         2) CLEAR_BIST must not be changed after writing START_BIST
1621                                                         0->1 until the BIST operation completes. */
1622	uint64_t bist_start                   : 1;  /**< When software writes BIST_START=0->1, a BiST is executed
1623                                                         for the DFM sub-block.
1624                                                         NOTES:
1625                                                         1) This bit should only be written after BOTH sclk
1626                                                         and fclk have been enabled by software and are stable
1627                                                         (see: DFM_FNT_SCLK[SCLKDIS] and instructions on how to
1628                                                         enable the DFM DDR3 memory (fclk) - which requires LMC
1629                                                         PLL init, DFM clock divider and proper DFM DLL
1630                                                         initialization sequence). */
1631	uint64_t sclkdis                      : 1;  /**< DFM sclk disable Source
1632                                                         When SET, the DFM sclk are disabled (to conserve overall
1633                                                         chip clocking power when the DFM function is not used).
1634                                                         NOTE: This should only be written to a different value
1635                                                         during power-on SW initialization. */
1636#else
1637	uint64_t sclkdis                      : 1;
1638	uint64_t bist_start                   : 1;
1639	uint64_t clear_bist                   : 1;
1640	uint64_t reserved_3_63                : 61;
1641#endif
1642	} s;
1643	struct cvmx_dfm_fnt_sclk_s            cn63xx;
1644	struct cvmx_dfm_fnt_sclk_s            cn63xxp1;
1645};
1646typedef union cvmx_dfm_fnt_sclk cvmx_dfm_fnt_sclk_t;
1647
1648/**
1649 * cvmx_dfm_fnt_stat
1650 *
1651 * DFM_FNT_STAT = DFM Front Status Register
1652 *
1653 * This register contains error status information for the DFM Front Section of Logic.
1654 */
1655union cvmx_dfm_fnt_stat
1656{
1657	uint64_t u64;
1658	struct cvmx_dfm_fnt_stat_s
1659	{
1660#if __BYTE_ORDER == __BIG_ENDIAN
1661	uint64_t reserved_42_63               : 22;
1662	uint64_t fsyn                         : 10; /**< Failing Syndrome
1663                                                         If SBE_ERR=1, the FSYN code determines which bit was
1664                                                         corrected during the OWECC check/correct.
1665                                                         NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
1666                                                         higher priority and FSYN captured will always be for the
1667                                                         DBE_ERR detected.
1668                                                         The FSYN is "locked down" when either DBE_ERR/SBE_ERR
1669                                                         are detected (until these bits are cleared (W1C)).
1670                                                         However, if an SBE_ERR occurs first, followed by a
1671                                                         DBE_ERR, the higher priority DBE_ERR will re-capture
1672                                                         the FSYN for the higher priority error case. */
1673	uint64_t fadr                         : 28; /**< Failing Memory octaword address
1674                                                         If either SBE_ERR or DBE_ERR are set, the FADR
1675                                                         represents the failing octaword address.
1676                                                         NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
1677                                                         higher priority and the FADR captured will always be
1678                                                         with the DBE_ERR detected.
1679                                                         The FADR is "locked down" when either DBE_ERR/SBE_ERR
1680                                                         are detected (until these bits are cleared (W1C)).
1681                                                         However, if an SBE_ERR occurs first, followed by a
1682                                                         DBE_ERR, the higher priority DBE_ERR will re-capture
1683                                                         the FADR for the higher priority error case. */
1684	uint64_t reserved_2_3                 : 2;
1685	uint64_t dbe_err                      : 1;  /**< Double bit error detected(uncorrectable) during
1686                                                         Memory Read.
1687                                                         Write of 1 will clear the corresponding error bit */
1688	uint64_t sbe_err                      : 1;  /**< Single bit error detected(corrected) during
1689                                                         Memory Read.
1690                                                         Write of 1 will clear the corresponding error bit */
1691#else
1692	uint64_t sbe_err                      : 1;
1693	uint64_t dbe_err                      : 1;
1694	uint64_t reserved_2_3                 : 2;
1695	uint64_t fadr                         : 28;
1696	uint64_t fsyn                         : 10;
1697	uint64_t reserved_42_63               : 22;
1698#endif
1699	} s;
1700	struct cvmx_dfm_fnt_stat_s            cn63xx;
1701	struct cvmx_dfm_fnt_stat_s            cn63xxp1;
1702};
1703typedef union cvmx_dfm_fnt_stat cvmx_dfm_fnt_stat_t;
1704
1705/**
1706 * cvmx_dfm_ifb_cnt
1707 *
1708 * DFM_IFB_CNT  = Performance Counters
1709 *
1710 */
1711union cvmx_dfm_ifb_cnt
1712{
1713	uint64_t u64;
1714	struct cvmx_dfm_ifb_cnt_s
1715	{
1716#if __BYTE_ORDER == __BIG_ENDIAN
1717	uint64_t ifbcnt                       : 64; /**< Performance Counter
1718                                                         64-bit counter that increments every
1719                                                         cycle there is something in the in-flight buffer. */
1720#else
1721	uint64_t ifbcnt                       : 64;
1722#endif
1723	} s;
1724	struct cvmx_dfm_ifb_cnt_s             cn63xx;
1725	struct cvmx_dfm_ifb_cnt_s             cn63xxp1;
1726};
1727typedef union cvmx_dfm_ifb_cnt cvmx_dfm_ifb_cnt_t;
1728
1729/**
1730 * cvmx_dfm_modereg_params0
1731 *
1732 * Notes:
1733 * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
1734 *
1735 */
1736union cvmx_dfm_modereg_params0
1737{
1738	uint64_t u64;
1739	struct cvmx_dfm_modereg_params0_s
1740	{
1741#if __BYTE_ORDER == __BIG_ENDIAN
1742	uint64_t reserved_25_63               : 39;
1743	uint64_t ppd                          : 1;  /**< DLL Control for precharge powerdown
1744                                                         0 = Slow exit (DLL off)
1745                                                         1 = Fast exit (DLL on)
1746                                                         DFM writes this value to MR0[PPD] in the selected DDR3 parts
1747                                                         during power-up/init instruction sequencing.
1748                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1749                                                         This value must equal the MR0[PPD] value in all the DDR3
1750                                                         parts attached to all ranks during normal operation. */
1751	uint64_t wrp                          : 3;  /**< Write recovery for auto precharge
1752                                                         Should be programmed to be equal to or greater than
1753                                                         RNDUP[tWR(ns)/tCYC(ns)]
1754                                                         000 = Reserved
1755                                                         001 = 5
1756                                                         010 = 6
1757                                                         011 = 7
1758                                                         100 = 8
1759                                                         101 = 10
1760                                                         110 = 12
1761                                                         111 = Reserved
1762                                                         DFM writes this value to MR0[WR] in the selected DDR3 parts
1763                                                         during power-up/init instruction sequencing.
1764                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1765                                                         This value must equal the MR0[WR] value in all the DDR3
1766                                                         parts attached to all ranks during normal operation. */
1767	uint64_t dllr                         : 1;  /**< DLL Reset
1768                                                         DFM writes this value to MR0[DLL] in the selected DDR3 parts
1769                                                         during power-up/init instruction sequencing.
1770                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1771                                                         The MR0[DLL] value must be 0 in all the DDR3
1772                                                         parts attached to all ranks during normal operation. */
1773	uint64_t tm                           : 1;  /**< Test Mode
1774                                                         DFM writes this value to MR0[TM] in the selected DDR3 parts
1775                                                         during power-up/init instruction sequencing.
1776                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1777                                                         The MR0[TM] value must be 0 in all the DDR3
1778                                                         parts attached to all ranks during normal operation. */
1779	uint64_t rbt                          : 1;  /**< Read Burst Type
1780                                                         1 = interleaved (fixed)
1781                                                         DFM writes this value to MR0[RBT] in the selected DDR3 parts
1782                                                         during power-up/init instruction sequencing.
1783                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1784                                                         The MR0[RBT] value must be 1 in all the DDR3
1785                                                         parts attached to all ranks during normal operation. */
1786	uint64_t cl                           : 4;  /**< CAS Latency
1787                                                         0010 = 5
1788                                                         0100 = 6
1789                                                         0110 = 7
1790                                                         1000 = 8
1791                                                         1010 = 9
1792                                                         1100 = 10
1793                                                         1110 = 11
1794                                                         0000, ???1 = Reserved
1795                                                         DFM writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
1796                                                         during power-up/init instruction sequencing.
1797                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1798                                                         This value must equal the MR0[CAS Latency / CL] value in all the DDR3
1799                                                         parts attached to all ranks during normal operation. */
1800	uint64_t bl                           : 2;  /**< Burst Length
1801                                                         0 = 8 (fixed)
1802                                                         DFM writes this value to MR0[BL] in the selected DDR3 parts
1803                                                         during power-up/init instruction sequencing.
1804                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1805                                                         The MR0[BL] value must be 0 in all the DDR3
1806                                                         parts attached to all ranks during normal operation. */
1807	uint64_t qoff                         : 1;  /**< Qoff Enable
1808                                                         0 = enable
1809                                                         DFM writes this value to MR1[Qoff] in the selected DDR3 parts
1810                                                         during power-up/init and write-leveling instruction sequencing.
1811                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1812                                                         this value to MR1[Qoff] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1813                                                         entry and exit instruction sequences.
1814                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1815                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1816                                                         The MR1[Qoff] value must be 0 in all the DDR3
1817                                                         parts attached to all ranks during normal operation. */
1818	uint64_t tdqs                         : 1;  /**< TDQS Enable
1819                                                         0 = disable
1820                                                         DFM writes this value to MR1[TDQS] in the selected DDR3 parts
1821                                                         during power-up/init and write-leveling instruction sequencing.
1822                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1823                                                         this value to MR1[TDQS] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1824                                                         entry and exit instruction sequences.
1825                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1826                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1827	uint64_t wlev                         : 1;  /**< Write Leveling Enable
1828                                                         0 = disable
1829                                                         DFM writes MR1[Level]=0 in the selected DDR3 parts
1830                                                         during power-up/init and write-leveling instruction sequencing.
1831                                                         (DFM also writes MR1[Level]=1 at the beginning of a
1832                                                         write-leveling instruction sequence. Write-leveling can only be initiated via the
1833                                                         write-leveling instruction sequence.)
1834                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1835                                                         MR1[Level]=0 in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1836                                                         entry and exit instruction sequences.
1837                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1838                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1839	uint64_t al                           : 2;  /**< Additive Latency
1840                                                         00 = 0
1841                                                         01 = CL-1
1842                                                         10 = CL-2
1843                                                         11 = Reserved
1844                                                         DFM writes this value to MR1[AL] in the selected DDR3 parts
1845                                                         during power-up/init and write-leveling instruction sequencing.
1846                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1847                                                         this value to MR1[AL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1848                                                         entry and exit instruction sequences.
1849                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1850                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1851                                                         This value must equal the MR1[AL] value in all the DDR3
1852                                                         parts attached to all ranks during normal operation.
1853                                                         See also DFM_CONTROL[POCAS]. */
1854	uint64_t dll                          : 1;  /**< DLL Enable
1855                                                         0 = enable
1856                                                         1 = disable
1857                                                         DFM writes this value to MR1[DLL] in the selected DDR3 parts
1858                                                         during power-up/init and write-leveling instruction sequencing.
1859                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1860                                                         this value to MR1[DLL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1861                                                         entry and exit instruction sequences.
1862                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1863                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1864                                                         This value must equal the MR1[DLL] value in all the DDR3
1865                                                         parts attached to all ranks during normal operation.
1866                                                         In dll-off mode, CL/CWL must be programmed
1867                                                         equal to 6/6, respectively, as per the DDR3 specifications. */
1868	uint64_t mpr                          : 1;  /**< MPR
1869                                                         DFM writes this value to MR3[MPR] in the selected DDR3 parts
1870                                                         during power-up/init and read-leveling instruction sequencing.
1871                                                         (DFM also writes MR3[MPR]=1 at the beginning of a
1872                                                         read-leveling instruction sequence. Read-leveling can only be initiated via the
1873                                                         read-leveling instruction sequence.)
1874                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1875                                                         The MR3[MPR] value must be 0 in all the DDR3
1876                                                         parts attached to all ranks during normal operation. */
1877	uint64_t mprloc                       : 2;  /**< MPR Location
1878                                                         DFM writes this value to MR3[MPRLoc] in the selected DDR3 parts
1879                                                         during power-up/init and read-leveling instruction sequencing.
1880                                                         (DFM also writes MR3[MPRLoc]=0 at the beginning of the
1881                                                         read-leveling instruction sequence.)
1882                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1883                                                         The MR3[MPRLoc] value must be 0 in all the DDR3
1884                                                         parts attached to all ranks during normal operation. */
1885	uint64_t cwl                          : 3;  /**< CAS Write Latency
1886                                                         - 000: 5
1887                                                         - 001: 6
1888                                                         - 010: 7
1889                                                         - 011: 8
1890                                                         1xx: Reserved
1891                                                         DFM writes this value to MR2[CWL] in the selected DDR3 parts
1892                                                         during power-up/init instruction sequencing.
1893                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1894                                                         this value to MR2[CWL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1895                                                         entry and exit instruction sequences.
1896                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1897                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1898                                                         This value must equal the MR2[CWL] value in all the DDR3
1899                                                         parts attached to all ranks during normal operation. */
1900#else
1901	uint64_t cwl                          : 3;
1902	uint64_t mprloc                       : 2;
1903	uint64_t mpr                          : 1;
1904	uint64_t dll                          : 1;
1905	uint64_t al                           : 2;
1906	uint64_t wlev                         : 1;
1907	uint64_t tdqs                         : 1;
1908	uint64_t qoff                         : 1;
1909	uint64_t bl                           : 2;
1910	uint64_t cl                           : 4;
1911	uint64_t rbt                          : 1;
1912	uint64_t tm                           : 1;
1913	uint64_t dllr                         : 1;
1914	uint64_t wrp                          : 3;
1915	uint64_t ppd                          : 1;
1916	uint64_t reserved_25_63               : 39;
1917#endif
1918	} s;
1919	struct cvmx_dfm_modereg_params0_s     cn63xx;
1920	struct cvmx_dfm_modereg_params0_s     cn63xxp1;
1921};
1922typedef union cvmx_dfm_modereg_params0 cvmx_dfm_modereg_params0_t;
1923
1924/**
1925 * cvmx_dfm_modereg_params1
1926 *
1927 * Notes:
1928 * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
1929 *
1930 */
1931union cvmx_dfm_modereg_params1
1932{
1933	uint64_t u64;
1934	struct cvmx_dfm_modereg_params1_s
1935	{
1936#if __BYTE_ORDER == __BIG_ENDIAN
1937	uint64_t reserved_48_63               : 16;
1938	uint64_t rtt_nom_11                   : 3;  /**< Must be zero */
1939	uint64_t dic_11                       : 2;  /**< Must be zero */
1940	uint64_t rtt_wr_11                    : 2;  /**< Must be zero */
1941	uint64_t srt_11                       : 1;  /**< Must be zero */
1942	uint64_t asr_11                       : 1;  /**< Must be zero */
1943	uint64_t pasr_11                      : 3;  /**< Must be zero */
1944	uint64_t rtt_nom_10                   : 3;  /**< Must be zero */
1945	uint64_t dic_10                       : 2;  /**< Must be zero */
1946	uint64_t rtt_wr_10                    : 2;  /**< Must be zero */
1947	uint64_t srt_10                       : 1;  /**< Must be zero */
1948	uint64_t asr_10                       : 1;  /**< Must be zero */
1949	uint64_t pasr_10                      : 3;  /**< Must be zero */
1950	uint64_t rtt_nom_01                   : 3;  /**< RTT_NOM Rank 1
1951                                                         DFM writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. CS1) DDR3 parts
1952                                                         when selected during power-up/init instruction sequencing.
1953                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1954                                                         this value to MR1[Rtt_Nom] in all DRAM parts in rank 1 during self-refresh
1955                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1956                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1957                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1958	uint64_t dic_01                       : 2;  /**< Output Driver Impedance Control Rank 1
1959                                                         DFM writes this value to MR1[D.I.C.] in the rank 1 (i.e. CS1) DDR3 parts
1960                                                         when selected during power-up/init and write-leveling instruction sequencing.
1961                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1962                                                         this value to MR1[D.I.C.] in all DRAM parts in rank 1 during self-refresh
1963                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1964                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1965                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1966	uint64_t rtt_wr_01                    : 2;  /**< RTT_WR Rank 1
1967                                                         DFM writes this value to MR2[Rtt_WR] in the rank 1 (i.e. CS1) DDR3 parts
1968                                                         when selected during power-up/init instruction sequencing.
1969                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1970                                                         this value to MR2[Rtt_WR] in all DRAM parts in rank 1 during self-refresh
1971                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1972                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1973                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1974	uint64_t srt_01                       : 1;  /**< Self-refresh temperature range Rank 1
1975                                                         DFM writes this value to MR2[SRT] in the rank 1 (i.e. CS1) DDR3 parts
1976                                                         when selected during power-up/init instruction sequencing.
1977                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1978                                                         this value to MR2[SRT] in all DRAM parts in rank 1 during self-refresh
1979                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1980                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1981                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1982	uint64_t asr_01                       : 1;  /**< Auto self-refresh Rank 1
1983                                                         DFM writes this value to MR2[ASR] in the rank 1 (i.e. CS1) DDR3 parts
1984                                                         when selected during power-up/init instruction sequencing.
1985                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1986                                                         this value to MR2[ASR] in all DRAM parts in rank 1 during self-refresh
1987                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1988                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1989                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1990	uint64_t pasr_01                      : 3;  /**< Partial array self-refresh Rank 1
1991                                                         DFM writes this value to MR2[PASR] in the rank 1 (i.e. CS1) DDR3 parts
1992                                                         when selected during power-up/init instruction sequencing.
1993                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1994                                                         this value to MR2[PASR] in all DRAM parts in rank 1 during self-refresh
1995                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1996                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1997                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1998	uint64_t rtt_nom_00                   : 3;  /**< RTT_NOM Rank 0
1999                                                         DFM writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. CS0) DDR3 parts
2000                                                         when selected during power-up/init instruction sequencing.
2001                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2002                                                         this value to MR1[Rtt_Nom] in all DRAM parts in rank 0 during self-refresh
2003                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2004                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2005                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2006	uint64_t dic_00                       : 2;  /**< Output Driver Impedance Control Rank 0
2007                                                         DFM writes this value to MR1[D.I.C.] in the rank 0 (i.e. CS0) DDR3 parts
2008                                                         when selected during power-up/init and write-leveling instruction sequencing.
2009                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2010                                                         this value to MR1[D.I.C.] in all DRAM parts in rank 0 during self-refresh
2011                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2012                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2013                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2014	uint64_t rtt_wr_00                    : 2;  /**< RTT_WR Rank 0
2015                                                         DFM writes this value to MR2[Rtt_WR] in the rank 0 (i.e. CS0) DDR3 parts
2016                                                         when selected during power-up/init instruction sequencing.
2017                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2018                                                         this value to MR2[Rtt_WR] in all DRAM parts in rank 0 during self-refresh
2019                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2020                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2021                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2022	uint64_t srt_00                       : 1;  /**< Self-refresh temperature range Rank 0
2023                                                         DFM writes this value to MR2[SRT] in the rank 0 (i.e. CS0) DDR3 parts
2024                                                         when selected during power-up/init instruction sequencing.
2025                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2026                                                         this value to MR2[SRT] in all DRAM parts in rank 0 during self-refresh
2027                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2028                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2029                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2030	uint64_t asr_00                       : 1;  /**< Auto self-refresh Rank 0
2031                                                         DFM writes this value to MR2[ASR] in the rank 0 (i.e. CS0) DDR3 parts
2032                                                         when selected during power-up/init instruction sequencing.
2033                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2034                                                         this value to MR2[ASR] in all DRAM parts in rank 0 during self-refresh
2035                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2036                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2037                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2038	uint64_t pasr_00                      : 3;  /**< Partial array self-refresh Rank 0
2039                                                         DFM writes this value to MR2[PASR] in the rank 0 (i.e. CS0) DDR3 parts
2040                                                         when selected during power-up/init instruction sequencing.
2041                                                         If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2042                                                         this value to MR2[PASR] in all DRAM parts in rank 0 during self-refresh
2043                                                         entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2044                                                         See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2045                                                         DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2046#else
2047	uint64_t pasr_00                      : 3;
2048	uint64_t asr_00                       : 1;
2049	uint64_t srt_00                       : 1;
2050	uint64_t rtt_wr_00                    : 2;
2051	uint64_t dic_00                       : 2;
2052	uint64_t rtt_nom_00                   : 3;
2053	uint64_t pasr_01                      : 3;
2054	uint64_t asr_01                       : 1;
2055	uint64_t srt_01                       : 1;
2056	uint64_t rtt_wr_01                    : 2;
2057	uint64_t dic_01                       : 2;
2058	uint64_t rtt_nom_01                   : 3;
2059	uint64_t pasr_10                      : 3;
2060	uint64_t asr_10                       : 1;
2061	uint64_t srt_10                       : 1;
2062	uint64_t rtt_wr_10                    : 2;
2063	uint64_t dic_10                       : 2;
2064	uint64_t rtt_nom_10                   : 3;
2065	uint64_t pasr_11                      : 3;
2066	uint64_t asr_11                       : 1;
2067	uint64_t srt_11                       : 1;
2068	uint64_t rtt_wr_11                    : 2;
2069	uint64_t dic_11                       : 2;
2070	uint64_t rtt_nom_11                   : 3;
2071	uint64_t reserved_48_63               : 16;
2072#endif
2073	} s;
2074	struct cvmx_dfm_modereg_params1_s     cn63xx;
2075	struct cvmx_dfm_modereg_params1_s     cn63xxp1;
2076};
2077typedef union cvmx_dfm_modereg_params1 cvmx_dfm_modereg_params1_t;
2078
2079/**
2080 * cvmx_dfm_ops_cnt
2081 *
2082 * DFM_OPS_CNT  = Performance Counters
2083 *
2084 */
2085union cvmx_dfm_ops_cnt
2086{
2087	uint64_t u64;
2088	struct cvmx_dfm_ops_cnt_s
2089	{
2090#if __BYTE_ORDER == __BIG_ENDIAN
2091	uint64_t opscnt                       : 64; /**< Performance Counter
2092                                                         64-bit counter that increments when the DDR3 data bus
2093                                                         is being used.
2094                                                           DRAM bus utilization = DFM_OPS_CNT/DFM_FCLK_CNT */
2095#else
2096	uint64_t opscnt                       : 64;
2097#endif
2098	} s;
2099	struct cvmx_dfm_ops_cnt_s             cn63xx;
2100	struct cvmx_dfm_ops_cnt_s             cn63xxp1;
2101};
2102typedef union cvmx_dfm_ops_cnt cvmx_dfm_ops_cnt_t;
2103
2104/**
2105 * cvmx_dfm_phy_ctl
2106 *
2107 * DFM_PHY_CTL = DFM PHY Control
2108 *
2109 */
2110union cvmx_dfm_phy_ctl
2111{
2112	uint64_t u64;
2113	struct cvmx_dfm_phy_ctl_s
2114	{
2115#if __BYTE_ORDER == __BIG_ENDIAN
2116	uint64_t reserved_15_63               : 49;
2117	uint64_t rx_always_on                 : 1;  /**< Disable dynamic DDR3 IO Rx power gating */
2118	uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
2119	uint64_t ck_tune1                     : 1;  /**< Clock Tune
2120
2121                                                         NOTE: DFM UNUSED */
2122	uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting
2123
2124                                                         NOTE: DFM UNUSED */
2125	uint64_t ck_tune0                     : 1;  /**< Clock Tune */
2126	uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
2127	uint64_t loopback                     : 1;  /**< Loopback enable */
2128	uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
2129	uint64_t ts_stagger                   : 1;  /**< TS Staggermode
2130                                                         This mode configures output drivers with 2-stage drive
2131                                                         strength to avoid undershoot issues on the bus when strong
2132                                                         drivers are suddenly turned on. When this mode is asserted,
2133                                                         Octeon will configure output drivers to be weak drivers
2134                                                         (60 ohm output impedance) at the first FCLK cycle, and
2135                                                         change drivers to the designated drive strengths specified
2136                                                         in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
2137                                                         at the following cycle */
2138#else
2139	uint64_t ts_stagger                   : 1;
2140	uint64_t loopback_pos                 : 1;
2141	uint64_t loopback                     : 1;
2142	uint64_t ck_dlyout0                   : 4;
2143	uint64_t ck_tune0                     : 1;
2144	uint64_t ck_dlyout1                   : 4;
2145	uint64_t ck_tune1                     : 1;
2146	uint64_t lv_mode                      : 1;
2147	uint64_t rx_always_on                 : 1;
2148	uint64_t reserved_15_63               : 49;
2149#endif
2150	} s;
2151	struct cvmx_dfm_phy_ctl_s             cn63xx;
2152	struct cvmx_dfm_phy_ctl_cn63xxp1
2153	{
2154#if __BYTE_ORDER == __BIG_ENDIAN
2155	uint64_t reserved_14_63               : 50;
2156	uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
2157	uint64_t ck_tune1                     : 1;  /**< Clock Tune
2158
2159                                                         NOTE: DFM UNUSED */
2160	uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting
2161
2162                                                         NOTE: DFM UNUSED */
2163	uint64_t ck_tune0                     : 1;  /**< Clock Tune */
2164	uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
2165	uint64_t loopback                     : 1;  /**< Loopback enable */
2166	uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
2167	uint64_t ts_stagger                   : 1;  /**< TS Staggermode
2168                                                         This mode configures output drivers with 2-stage drive
2169                                                         strength to avoid undershoot issues on the bus when strong
2170                                                         drivers are suddenly turned on. When this mode is asserted,
2171                                                         Octeon will configure output drivers to be weak drivers
2172                                                         (60 ohm output impedance) at the first FCLK cycle, and
2173                                                         change drivers to the designated drive strengths specified
2174                                                         in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
2175                                                         at the following cycle */
2176#else
2177	uint64_t ts_stagger                   : 1;
2178	uint64_t loopback_pos                 : 1;
2179	uint64_t loopback                     : 1;
2180	uint64_t ck_dlyout0                   : 4;
2181	uint64_t ck_tune0                     : 1;
2182	uint64_t ck_dlyout1                   : 4;
2183	uint64_t ck_tune1                     : 1;
2184	uint64_t lv_mode                      : 1;
2185	uint64_t reserved_14_63               : 50;
2186#endif
2187	} cn63xxp1;
2188};
2189typedef union cvmx_dfm_phy_ctl cvmx_dfm_phy_ctl_t;
2190
2191/**
2192 * cvmx_dfm_reset_ctl
2193 *
2194 * Specify the RSL base addresses for the block
2195 *
2196 *
2197 * Notes:
2198 * DDR3RST - DDR3 DRAM parts have a new RESET#
2199 * pin that wasn't present in DDR2 parts. The
2200 * DDR3RST CSR field controls the assertion of
2201 * the new 63xx pin that attaches to RESET#.
2202 * When DDR3RST is set, 63xx asserts RESET#.
2203 * When DDR3RST is clear, 63xx de-asserts
2204 * RESET#.
2205 *
2206 * DDR3RST is set on a cold reset. Warm and
2207 * soft chip resets do not affect the DDR3RST
2208 * value. Outside of cold reset, only software
2209 * CSR writes change the DDR3RST value.
2210 */
2211union cvmx_dfm_reset_ctl
2212{
2213	uint64_t u64;
2214	struct cvmx_dfm_reset_ctl_s
2215	{
2216#if __BYTE_ORDER == __BIG_ENDIAN
2217	uint64_t reserved_4_63                : 60;
2218	uint64_t ddr3psv                      : 1;  /**< Must be zero */
2219	uint64_t ddr3psoft                    : 1;  /**< Must be zero */
2220	uint64_t ddr3pwarm                    : 1;  /**< Must be zero */
2221	uint64_t ddr3rst                      : 1;  /**< Memory Reset
2222                                                         0 = Reset asserted
2223                                                         1 = Reset de-asserted */
2224#else
2225	uint64_t ddr3rst                      : 1;
2226	uint64_t ddr3pwarm                    : 1;
2227	uint64_t ddr3psoft                    : 1;
2228	uint64_t ddr3psv                      : 1;
2229	uint64_t reserved_4_63                : 60;
2230#endif
2231	} s;
2232	struct cvmx_dfm_reset_ctl_s           cn63xx;
2233	struct cvmx_dfm_reset_ctl_s           cn63xxp1;
2234};
2235typedef union cvmx_dfm_reset_ctl cvmx_dfm_reset_ctl_t;
2236
2237/**
2238 * cvmx_dfm_rlevel_ctl
2239 */
2240union cvmx_dfm_rlevel_ctl
2241{
2242	uint64_t u64;
2243	struct cvmx_dfm_rlevel_ctl_s
2244	{
2245#if __BYTE_ORDER == __BIG_ENDIAN
2246	uint64_t reserved_22_63               : 42;
2247	uint64_t delay_unload_3               : 1;  /**< When set, unload the PHY silo one cycle later
2248                                                         during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 3
2249                                                         DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
2250	uint64_t delay_unload_2               : 1;  /**< When set, unload the PHY silo one cycle later
2251                                                         during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 2
2252                                                         DELAY_UNLOAD_2 should normally not be set. */
2253	uint64_t delay_unload_1               : 1;  /**< When set, unload the PHY silo one cycle later
2254                                                         during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 1
2255                                                         DELAY_UNLOAD_1 should normally not be set. */
2256	uint64_t delay_unload_0               : 1;  /**< When set, unload the PHY silo one cycle later
2257                                                         during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 0
2258                                                         DELAY_UNLOAD_0 should normally not be set. */
2259	uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which read-leveling
2260                                                         feedback is returned when OR_DIS is set to 1 */
2261	uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
2262                                                         the read-leveling bitmask
2263                                                         OR_DIS should normally not be set. */
2264	uint64_t offset_en                    : 1;  /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
2265                                                         level dskew settings */
2266	uint64_t offset                       : 4;  /**< Pick final_setting-offset (if set) for the read level
2267                                                         deskew setting instead of the middle of the largest
2268                                                         contiguous sequence of 1's in the bitmask */
2269	uint64_t byte                         : 4;  /**< 0 <= BYTE <= 1
2270                                                         Byte index for which bitmask results are saved
2271                                                         in DFM_RLEVEL_DBG */
2272#else
2273	uint64_t byte                         : 4;
2274	uint64_t offset                       : 4;
2275	uint64_t offset_en                    : 1;
2276	uint64_t or_dis                       : 1;
2277	uint64_t bitmask                      : 8;
2278	uint64_t delay_unload_0               : 1;
2279	uint64_t delay_unload_1               : 1;
2280	uint64_t delay_unload_2               : 1;
2281	uint64_t delay_unload_3               : 1;
2282	uint64_t reserved_22_63               : 42;
2283#endif
2284	} s;
2285	struct cvmx_dfm_rlevel_ctl_s          cn63xx;
2286	struct cvmx_dfm_rlevel_ctl_cn63xxp1
2287	{
2288#if __BYTE_ORDER == __BIG_ENDIAN
2289	uint64_t reserved_9_63                : 55;
2290	uint64_t offset_en                    : 1;  /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
2291                                                         level dskew settings */
2292	uint64_t offset                       : 4;  /**< Pick final_setting-offset (if set) for the read level
2293                                                         deskew setting instead of the middle of the largest
2294                                                         contiguous sequence of 1's in the bitmask */
2295	uint64_t byte                         : 4;  /**< 0 <= BYTE <= 1
2296                                                         Byte index for which bitmask results are saved
2297                                                         in DFM_RLEVEL_DBG */
2298#else
2299	uint64_t byte                         : 4;
2300	uint64_t offset                       : 4;
2301	uint64_t offset_en                    : 1;
2302	uint64_t reserved_9_63                : 55;
2303#endif
2304	} cn63xxp1;
2305};
2306typedef union cvmx_dfm_rlevel_ctl cvmx_dfm_rlevel_ctl_t;
2307
2308/**
2309 * cvmx_dfm_rlevel_dbg
2310 *
2311 * Notes:
2312 * A given read of DFM_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
2313 * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
2314 * DFM_RLEVEL_CTL[BYTE] selects the particular byte.
2315 * To get these pass/fail results for another different rank, you must run the hardware read-leveling
2316 * again. For example, it is possible to get the BITMASK results for every byte of every rank
2317 * if you run read-leveling separately for each rank, probing DFM_RLEVEL_DBG between each
2318 * read-leveling.
2319 */
2320union cvmx_dfm_rlevel_dbg
2321{
2322	uint64_t u64;
2323	struct cvmx_dfm_rlevel_dbg_s
2324	{
2325#if __BYTE_ORDER == __BIG_ENDIAN
2326	uint64_t bitmask                      : 64; /**< Bitmask generated during deskew settings sweep
2327                                                         BITMASK[n]=0 means deskew setting n failed
2328                                                         BITMASK[n]=1 means deskew setting n passed
2329                                                         for 0 <= n <= 63 */
2330#else
2331	uint64_t bitmask                      : 64;
2332#endif
2333	} s;
2334	struct cvmx_dfm_rlevel_dbg_s          cn63xx;
2335	struct cvmx_dfm_rlevel_dbg_s          cn63xxp1;
2336};
2337typedef union cvmx_dfm_rlevel_dbg cvmx_dfm_rlevel_dbg_t;
2338
2339/**
2340 * cvmx_dfm_rlevel_rank#
2341 *
2342 * Notes:
2343 * This is TWO CSRs per DFM, one per each rank.
2344 *
2345 * Deskew setting is measured in units of 1/4 FCLK, so the above BYTE* values can range over 16 FCLKs.
2346 *
2347 * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
2348 * If HW is unable to find a match per DFM_RLEVEL_CTL[OFFSET_EN] and DFM_RLEVEL_CTL[OFFSET], then HW will set DFM_RLEVEL_RANKn[BYTE*<5:0>]
2349 * to 0.
2350 *
2351 * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
2352 *
2353 * SW initiates a HW read-leveling sequence by programming DFM_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1 in DFM_CONFIG.
2354 * See DFM_RLEVEL_CTL.
2355 */
2356union cvmx_dfm_rlevel_rankx
2357{
2358	uint64_t u64;
2359	struct cvmx_dfm_rlevel_rankx_s
2360	{
2361#if __BYTE_ORDER == __BIG_ENDIAN
2362	uint64_t reserved_56_63               : 8;
2363	uint64_t status                       : 2;  /**< Indicates status of the read-levelling and where
2364                                                         the BYTE* programmings in <35:0> came from:
2365                                                         0 = BYTE* values are their reset value
2366                                                         1 = BYTE* values were set via a CSR write to this register
2367                                                         2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
2368                                                         3 = BYTE* values came from a complete read-leveling sequence */
2369	uint64_t reserved_12_53               : 42;
2370	uint64_t byte1                        : 6;  /**< Deskew setting */
2371	uint64_t byte0                        : 6;  /**< Deskew setting */
2372#else
2373	uint64_t byte0                        : 6;
2374	uint64_t byte1                        : 6;
2375	uint64_t reserved_12_53               : 42;
2376	uint64_t status                       : 2;
2377	uint64_t reserved_56_63               : 8;
2378#endif
2379	} s;
2380	struct cvmx_dfm_rlevel_rankx_s        cn63xx;
2381	struct cvmx_dfm_rlevel_rankx_s        cn63xxp1;
2382};
2383typedef union cvmx_dfm_rlevel_rankx cvmx_dfm_rlevel_rankx_t;
2384
2385/**
2386 * cvmx_dfm_rodt_mask
2387 *
2388 * DFM_RODT_MASK = DFM Read OnDieTermination mask
2389 * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
2390 * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
2391 * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
2392 * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
2393 * in that rank. System designers may prefer different combinations of ODT ON's for reads
2394 * into different ranks. Octeon supports full programmability by way of the mask register below.
2395 * Each Rank position has its own 8-bit programmable field.
2396 * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
2397 * For eg., When doing a read into Rank0, a system designer may desire to terminate the lines
2398 * with the resistor on Dimm0/Rank1. The mask RODT_D0_R0 would then be [00000010].
2399 * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
2400 * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
2401 * for the rank that is being read should always be 0.
2402 *
2403 * Notes:
2404 * - Notice that when there is only one rank, all valid fields must be zero.  This is because there is no
2405 * "other" rank to terminate lines for.  Read ODT is meant for multirank systems.
2406 * - For a two rank system and a read op to rank0: use RODT_D0_R0<1> to terminate lines on rank1.
2407 * - For a two rank system and a read op to rank1: use RODT_D0_R1<0> to terminate lines on rank0.
2408 * - Therefore, when a given RANK is selected, the RODT mask for that RANK is used.
2409 *
2410 * DFM always reads 128-bit words independently via one read CAS operation per word.
2411 * When a RODT mask bit is set, DFM asserts the OCTEON ODT output
2412 * pin(s) starting (CL - CWL) CK's after the read CAS operation. Then, OCTEON
2413 * normally continues to assert the ODT output pin(s) for 5+DFM_CONTROL[RODT_BPRCH] more CK's
2414 * - for a total of 6+DFM_CONTROL[RODT_BPRCH] CK's for the entire 128-bit read -
2415 * satisfying the 6 CK DDR3 ODTH8 requirements.
2416 *
2417 * But it is possible for OCTEON to issue two 128-bit reads separated by as few as
2418 * RtR = 4 or 5 (6 if DFM_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
2419 * for the RODT mask of the first 128-bit read for RtR CK's, then asserts
2420 * the ODT output pin(s) for the RODT mask of the second 128-bit read for 6+DFM_CONTROL[RODT_BPRCH] CK's
2421 * (or less if a third 128-bit read follows within 4 or 5 (or 6) CK's of this second 128-bit read).
2422 * Note that it may be necessary to force DFM to space back-to-back 128-bit reads
2423 * to different ranks apart by at least 6+DFM_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
2424 */
2425union cvmx_dfm_rodt_mask
2426{
2427	uint64_t u64;
2428	struct cvmx_dfm_rodt_mask_s
2429	{
2430#if __BYTE_ORDER == __BIG_ENDIAN
2431	uint64_t rodt_d3_r1                   : 8;  /**< Must be zero. */
2432	uint64_t rodt_d3_r0                   : 8;  /**< Must be zero. */
2433	uint64_t rodt_d2_r1                   : 8;  /**< Must be zero. */
2434	uint64_t rodt_d2_r0                   : 8;  /**< Must be zero. */
2435	uint64_t rodt_d1_r1                   : 8;  /**< Must be zero. */
2436	uint64_t rodt_d1_r0                   : 8;  /**< Must be zero. */
2437	uint64_t rodt_d0_r1                   : 8;  /**< Read ODT mask RANK1
2438                                                         RODT_D0_R1<7:1> must be zero in all cases.
2439                                                         RODT_D0_R1<0> must also be zero if RANK_ENA is not set. */
2440	uint64_t rodt_d0_r0                   : 8;  /**< Read ODT mask RANK0
2441                                                         RODT_D0_R0<7:2,0> must be zero in all cases.
2442                                                         RODT_D0_R0<1> must also be zero if RANK_ENA is not set. */
2443#else
2444	uint64_t rodt_d0_r0                   : 8;
2445	uint64_t rodt_d0_r1                   : 8;
2446	uint64_t rodt_d1_r0                   : 8;
2447	uint64_t rodt_d1_r1                   : 8;
2448	uint64_t rodt_d2_r0                   : 8;
2449	uint64_t rodt_d2_r1                   : 8;
2450	uint64_t rodt_d3_r0                   : 8;
2451	uint64_t rodt_d3_r1                   : 8;
2452#endif
2453	} s;
2454	struct cvmx_dfm_rodt_mask_s           cn63xx;
2455	struct cvmx_dfm_rodt_mask_s           cn63xxp1;
2456};
2457typedef union cvmx_dfm_rodt_mask cvmx_dfm_rodt_mask_t;
2458
2459/**
2460 * cvmx_dfm_slot_ctl0
2461 *
2462 * DFM_SLOT_CTL0 = DFM Slot Control0
2463 * This register is an assortment of various control fields needed by the memory controller
2464 *
2465 * Notes:
2466 * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
2467 * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
2468 * have valid data.
2469 * R2W_INIT has 1 extra CK cycle built in for odt settling/channel turnaround time.
2470 */
2471union cvmx_dfm_slot_ctl0
2472{
2473	uint64_t u64;
2474	struct cvmx_dfm_slot_ctl0_s
2475	{
2476#if __BYTE_ORDER == __BIG_ENDIAN
2477	uint64_t reserved_24_63               : 40;
2478	uint64_t w2w_init                     : 6;  /**< Write-to-write spacing control
2479                                                         for back to back accesses to the same rank and dimm */
2480	uint64_t w2r_init                     : 6;  /**< Write-to-read spacing control
2481                                                         for back to back accesses to the same rank and dimm */
2482	uint64_t r2w_init                     : 6;  /**< Read-to-write spacing control
2483                                                         for back to back accesses to the same rank and dimm */
2484	uint64_t r2r_init                     : 6;  /**< Read-to-read spacing control
2485                                                         for back to back accesses to the same rank and dimm */
2486#else
2487	uint64_t r2r_init                     : 6;
2488	uint64_t r2w_init                     : 6;
2489	uint64_t w2r_init                     : 6;
2490	uint64_t w2w_init                     : 6;
2491	uint64_t reserved_24_63               : 40;
2492#endif
2493	} s;
2494	struct cvmx_dfm_slot_ctl0_s           cn63xx;
2495	struct cvmx_dfm_slot_ctl0_s           cn63xxp1;
2496};
2497typedef union cvmx_dfm_slot_ctl0 cvmx_dfm_slot_ctl0_t;
2498
2499/**
2500 * cvmx_dfm_slot_ctl1
2501 *
2502 * DFM_SLOT_CTL1 = DFM Slot Control1
2503 * This register is an assortment of various control fields needed by the memory controller
2504 *
2505 * Notes:
2506 * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
2507 * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
2508 * have valid data.
2509 * R2W_XRANK_INIT, W2R_XRANK_INIT have 1 extra CK cycle built in for odt settling/channel turnaround time.
2510 */
2511union cvmx_dfm_slot_ctl1
2512{
2513	uint64_t u64;
2514	struct cvmx_dfm_slot_ctl1_s
2515	{
2516#if __BYTE_ORDER == __BIG_ENDIAN
2517	uint64_t reserved_24_63               : 40;
2518	uint64_t w2w_xrank_init               : 6;  /**< Write-to-write spacing control
2519                                                         for back to back accesses across ranks of the same dimm */
2520	uint64_t w2r_xrank_init               : 6;  /**< Write-to-read spacing control
2521                                                         for back to back accesses across ranks of the same dimm */
2522	uint64_t r2w_xrank_init               : 6;  /**< Read-to-write spacing control
2523                                                         for back to back accesses across ranks of the same dimm */
2524	uint64_t r2r_xrank_init               : 6;  /**< Read-to-read spacing control
2525                                                         for back to back accesses across ranks of the same dimm */
2526#else
2527	uint64_t r2r_xrank_init               : 6;
2528	uint64_t r2w_xrank_init               : 6;
2529	uint64_t w2r_xrank_init               : 6;
2530	uint64_t w2w_xrank_init               : 6;
2531	uint64_t reserved_24_63               : 40;
2532#endif
2533	} s;
2534	struct cvmx_dfm_slot_ctl1_s           cn63xx;
2535	struct cvmx_dfm_slot_ctl1_s           cn63xxp1;
2536};
2537typedef union cvmx_dfm_slot_ctl1 cvmx_dfm_slot_ctl1_t;
2538
2539/**
2540 * cvmx_dfm_timing_params0
2541 */
2542union cvmx_dfm_timing_params0
2543{
2544	uint64_t u64;
2545	struct cvmx_dfm_timing_params0_s
2546	{
2547#if __BYTE_ORDER == __BIG_ENDIAN
2548	uint64_t reserved_47_63               : 17;
2549	uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
2550                                                         Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2551                                                         + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2552                                                         where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2553                                                         is the DDR clock frequency (not data rate).
2554                                                         TYP tRP=10-15ns
2555                                                         TYP tRTP=max(4nCK, 7.5ns) */
2556	uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
2557                                                         Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2558                                                         where tCKSRE is from the DDR3 spec, and tCYC(ns)
2559                                                         is the DDR clock frequency (not data rate).
2560                                                         TYP=max(5nCK, 10ns) */
2561	uint64_t trp                          : 4;  /**< Indicates tRP constraints.
2562                                                         Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2563                                                         + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2564                                                         where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2565                                                         is the DDR clock frequency (not data rate).
2566                                                         TYP tRP=10-15ns
2567                                                         TYP tRTP=max(4nCK, 7.5ns) */
2568	uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
2569                                                         Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2570                                                         where tZQINIT is from the DDR3 spec, and tCYC(ns)
2571                                                         is the DDR clock frequency (not data rate).
2572                                                         TYP=2 (equivalent to 512) */
2573	uint64_t tdllk                        : 4;  /**< Indicates tDLLk constraints.
2574                                                         Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2575                                                         where tDLLk is from the DDR3 spec, and tCYC(ns)
2576                                                         is the DDR clock frequency (not data rate).
2577                                                         TYP=2 (equivalent to 512)
2578                                                         This parameter is used in self-refresh exit
2579                                                         and assumed to be greater than tRFC */
2580	uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
2581                                                         Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2582                                                         where tMOD is from the DDR3 spec, and tCYC(ns)
2583                                                         is the DDR clock frequency (not data rate).
2584                                                         TYP=max(12nCK, 15ns) */
2585	uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
2586                                                         Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2587                                                         where tMRD is from the DDR3 spec, and tCYC(ns)
2588                                                         is the DDR clock frequency (not data rate).
2589                                                         TYP=4nCK */
2590	uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
2591                                                         Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2592                                                         where tXPR is from the DDR3 spec, and tCYC(ns)
2593                                                         is the DDR clock frequency (not data rate).
2594                                                         TYP=max(5nCK, tRFC+10ns) */
2595	uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
2596                                                         Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2597                                                         where tCKE is from the DDR3 spec, and tCYC(ns)
2598                                                         is the DDR clock frequency (not data rate).
2599                                                         TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2600	uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
2601                                                         Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2602                                                         where tZQCS is from the DDR3 spec, and tCYC(ns)
2603                                                         is the DDR clock frequency (not data rate).
2604                                                         TYP=4 (equivalent to 64) */
2605	uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
2606#else
2607	uint64_t tckeon                       : 10;
2608	uint64_t tzqcs                        : 4;
2609	uint64_t tcke                         : 4;
2610	uint64_t txpr                         : 4;
2611	uint64_t tmrd                         : 4;
2612	uint64_t tmod                         : 4;
2613	uint64_t tdllk                        : 4;
2614	uint64_t tzqinit                      : 4;
2615	uint64_t trp                          : 4;
2616	uint64_t tcksre                       : 4;
2617	uint64_t trp_ext                      : 1;
2618	uint64_t reserved_47_63               : 17;
2619#endif
2620	} s;
2621	struct cvmx_dfm_timing_params0_cn63xx
2622	{
2623#if __BYTE_ORDER == __BIG_ENDIAN
2624	uint64_t reserved_47_63               : 17;
2625	uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
2626                                                         Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2627                                                         + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2628                                                         where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2629                                                         is the DDR clock frequency (not data rate).
2630                                                         TYP tRP=10-15ns
2631                                                         TYP tRTP=max(4nCK, 7.5ns) */
2632	uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
2633                                                         Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2634                                                         where tCKSRE is from the DDR3 spec, and tCYC(ns)
2635                                                         is the DDR clock frequency (not data rate).
2636                                                         TYP=max(5nCK, 10ns) */
2637	uint64_t trp                          : 4;  /**< Indicates tRP constraints.
2638                                                         Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2639                                                         + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2640                                                         where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2641                                                         is the DDR clock frequency (not data rate).
2642                                                         TYP tRP=10-15ns
2643                                                         TYP tRTP=max(4nCK, 7.5ns) */
2644	uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
2645                                                         Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2646                                                         where tZQINIT is from the DDR3 spec, and tCYC(ns)
2647                                                         is the DDR clock frequency (not data rate).
2648                                                         TYP=2 (equivalent to 512) */
2649	uint64_t tdllk                        : 4;  /**< Indicates tDLLk constraints.
2650                                                         Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2651                                                         where tDLLk is from the DDR3 spec, and tCYC(ns)
2652                                                         is the DDR clock frequency (not data rate).
2653                                                         TYP=2 (equivalent to 512)
2654                                                         This parameter is used in self-refresh exit
2655                                                         and assumed to be greater than tRFC */
2656	uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
2657                                                         Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2658                                                         where tMOD is from the DDR3 spec, and tCYC(ns)
2659                                                         is the DDR clock frequency (not data rate).
2660                                                         TYP=max(12nCK, 15ns) */
2661	uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
2662                                                         Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2663                                                         where tMRD is from the DDR3 spec, and tCYC(ns)
2664                                                         is the DDR clock frequency (not data rate).
2665                                                         TYP=4nCK */
2666	uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
2667                                                         Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2668                                                         where tXPR is from the DDR3 spec, and tCYC(ns)
2669                                                         is the DDR clock frequency (not data rate).
2670                                                         TYP=max(5nCK, tRFC+10ns) */
2671	uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
2672                                                         Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2673                                                         where tCKE is from the DDR3 spec, and tCYC(ns)
2674                                                         is the DDR clock frequency (not data rate).
2675                                                         TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2676	uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
2677                                                         Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2678                                                         where tZQCS is from the DDR3 spec, and tCYC(ns)
2679                                                         is the DDR clock frequency (not data rate).
2680                                                         TYP=4 (equivalent to 64) */
2681	uint64_t reserved_0_9                 : 10;
2682#else
2683	uint64_t reserved_0_9                 : 10;
2684	uint64_t tzqcs                        : 4;
2685	uint64_t tcke                         : 4;
2686	uint64_t txpr                         : 4;
2687	uint64_t tmrd                         : 4;
2688	uint64_t tmod                         : 4;
2689	uint64_t tdllk                        : 4;
2690	uint64_t tzqinit                      : 4;
2691	uint64_t trp                          : 4;
2692	uint64_t tcksre                       : 4;
2693	uint64_t trp_ext                      : 1;
2694	uint64_t reserved_47_63               : 17;
2695#endif
2696	} cn63xx;
2697	struct cvmx_dfm_timing_params0_cn63xxp1
2698	{
2699#if __BYTE_ORDER == __BIG_ENDIAN
2700	uint64_t reserved_46_63               : 18;
2701	uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
2702                                                         Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2703                                                         where tCKSRE is from the DDR3 spec, and tCYC(ns)
2704                                                         is the DDR clock frequency (not data rate).
2705                                                         TYP=max(5nCK, 10ns) */
2706	uint64_t trp                          : 4;  /**< Indicates tRP constraints.
2707                                                         Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2708                                                         + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2709                                                         where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2710                                                         is the DDR clock frequency (not data rate).
2711                                                         TYP tRP=10-15ns
2712                                                         TYP tRTP=max(4nCK, 7.5ns) */
2713	uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
2714                                                         Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2715                                                         where tZQINIT is from the DDR3 spec, and tCYC(ns)
2716                                                         is the DDR clock frequency (not data rate).
2717                                                         TYP=2 (equivalent to 512) */
2718	uint64_t tdllk                        : 4;  /**< Indicates tDLLk constraints.
2719                                                         Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2720                                                         where tDLLk is from the DDR3 spec, and tCYC(ns)
2721                                                         is the DDR clock frequency (not data rate).
2722                                                         TYP=2 (equivalent to 512)
2723                                                         This parameter is used in self-refresh exit
2724                                                         and assumed to be greater than tRFC */
2725	uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
2726                                                         Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2727                                                         where tMOD is from the DDR3 spec, and tCYC(ns)
2728                                                         is the DDR clock frequency (not data rate).
2729                                                         TYP=max(12nCK, 15ns) */
2730	uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
2731                                                         Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2732                                                         where tMRD is from the DDR3 spec, and tCYC(ns)
2733                                                         is the DDR clock frequency (not data rate).
2734                                                         TYP=4nCK */
2735	uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
2736                                                         Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2737                                                         where tXPR is from the DDR3 spec, and tCYC(ns)
2738                                                         is the DDR clock frequency (not data rate).
2739                                                         TYP=max(5nCK, tRFC+10ns) */
2740	uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
2741                                                         Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2742                                                         where tCKE is from the DDR3 spec, and tCYC(ns)
2743                                                         is the DDR clock frequency (not data rate).
2744                                                         TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2745	uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
2746                                                         Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2747                                                         where tZQCS is from the DDR3 spec, and tCYC(ns)
2748                                                         is the DDR clock frequency (not data rate).
2749                                                         TYP=4 (equivalent to 64) */
2750	uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
2751#else
2752	uint64_t tckeon                       : 10;
2753	uint64_t tzqcs                        : 4;
2754	uint64_t tcke                         : 4;
2755	uint64_t txpr                         : 4;
2756	uint64_t tmrd                         : 4;
2757	uint64_t tmod                         : 4;
2758	uint64_t tdllk                        : 4;
2759	uint64_t tzqinit                      : 4;
2760	uint64_t trp                          : 4;
2761	uint64_t tcksre                       : 4;
2762	uint64_t reserved_46_63               : 18;
2763#endif
2764	} cn63xxp1;
2765};
2766typedef union cvmx_dfm_timing_params0 cvmx_dfm_timing_params0_t;
2767
2768/**
2769 * cvmx_dfm_timing_params1
2770 */
2771union cvmx_dfm_timing_params1
2772{
2773	uint64_t u64;
2774	struct cvmx_dfm_timing_params1_s
2775	{
2776#if __BYTE_ORDER == __BIG_ENDIAN
2777	uint64_t reserved_47_63               : 17;
2778	uint64_t tras_ext                     : 1;  /**< Indicates tRAS constraints.
2779                                                         Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2780                                                         where tRAS is from the DDR3 spec, and tCYC(ns)
2781                                                         is the DDR clock frequency (not data rate).
2782                                                         TYP=35ns-9*tREFI
2783                                                             - 000000: RESERVED
2784                                                             - 000001: 2 tCYC
2785                                                             - 000010: 3 tCYC
2786                                                             - ...
2787                                                             - 111111: 64 tCYC */
2788	uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
2789                                                         Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
2790                                                         where tXPDLL is from the DDR3 spec, and tCYC(ns)
2791                                                         is the DDR clock frequency (not data rate).
2792                                                         TYP=max(10nCK, 24ns) */
2793	uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
2794                                                         Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
2795                                                         where tFAW is from the DDR3 spec, and tCYC(ns)
2796                                                         is the DDR clock frequency (not data rate).
2797                                                         TYP=30-40ns */
2798	uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
2799                                                         Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
2800                                                         where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
2801                                                         is the DDR clock frequency (not data rate).
2802                                                         TYP=max(25nCK) */
2803	uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
2804                                                         Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
2805                                                         where tWLMRD is from the DDR3 spec, and tCYC(ns)
2806                                                         is the DDR clock frequency (not data rate).
2807                                                         TYP=max(40nCK) */
2808	uint64_t txp                          : 3;  /**< Indicates tXP constraints.
2809                                                         Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
2810                                                         where tXP is from the DDR3 spec, and tCYC(ns)
2811                                                         is the DDR clock frequency (not data rate).
2812                                                         TYP=max(3nCK, 7.5ns) */
2813	uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
2814                                                         Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
2815                                                         where tRRD is from the DDR3 spec, and tCYC(ns)
2816                                                         is the DDR clock frequency (not data rate).
2817                                                         TYP=max(4nCK, 10ns)
2818                                                            - 000: RESERVED
2819                                                            - 001: 3 tCYC
2820                                                            - ...
2821                                                            - 110: 8 tCYC
2822                                                            - 111: 9 tCYC */
2823	uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
2824                                                         Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
2825                                                         where tRFC is from the DDR3 spec, and tCYC(ns)
2826                                                         is the DDR clock frequency (not data rate).
2827                                                         TYP=90-350ns
2828                                                              - 00000: RESERVED
2829                                                              - 00001: 8 tCYC
2830                                                              - 00010: 16 tCYC
2831                                                              - 00011: 24 tCYC
2832                                                              - 00100: 32 tCYC
2833                                                              - ...
2834                                                              - 11110: 240 tCYC
2835                                                              - 11111: 248 tCYC */
2836	uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
2837                                                         Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
2838                                                         where tWTR is from the DDR3 spec, and tCYC(ns)
2839                                                         is the DDR clock frequency (not data rate).
2840                                                         TYP=max(4nCK, 7.5ns)
2841                                                             - 0000: RESERVED
2842                                                             - 0001: 2
2843                                                             - ...
2844                                                             - 0111: 8
2845                                                             - 1000-1111: RESERVED */
2846	uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
2847                                                         Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
2848                                                         where tRCD is from the DDR3 spec, and tCYC(ns)
2849                                                         is the DDR clock frequency (not data rate).
2850                                                         TYP=10-15ns
2851                                                             - 0000: RESERVED
2852                                                             - 0001: 2 (2 is the smallest value allowed)
2853                                                             - 0002: 2
2854                                                             - ...
2855                                                             - 1001: 9
2856                                                             - 1010-1111: RESERVED
2857                                                         In 2T mode, make this register TRCD-1, not going
2858                                                         below 2. */
2859	uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
2860                                                         Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2861                                                         where tRAS is from the DDR3 spec, and tCYC(ns)
2862                                                         is the DDR clock frequency (not data rate).
2863                                                         TYP=35ns-9*tREFI
2864                                                             - 00000: RESERVED
2865                                                             - 00001: 2 tCYC
2866                                                             - 00010: 3 tCYC
2867                                                             - ...
2868                                                             - 11111: 32 tCYC */
2869	uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
2870                                                         Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
2871                                                         where tMPRR is from the DDR3 spec, and tCYC(ns)
2872                                                         is the DDR clock frequency (not data rate).
2873                                                         TYP=1nCK */
2874#else
2875	uint64_t tmprr                        : 4;
2876	uint64_t tras                         : 5;
2877	uint64_t trcd                         : 4;
2878	uint64_t twtr                         : 4;
2879	uint64_t trfc                         : 5;
2880	uint64_t trrd                         : 3;
2881	uint64_t txp                          : 3;
2882	uint64_t twlmrd                       : 4;
2883	uint64_t twldqsen                     : 4;
2884	uint64_t tfaw                         : 5;
2885	uint64_t txpdll                       : 5;
2886	uint64_t tras_ext                     : 1;
2887	uint64_t reserved_47_63               : 17;
2888#endif
2889	} s;
2890	struct cvmx_dfm_timing_params1_s      cn63xx;
2891	struct cvmx_dfm_timing_params1_cn63xxp1
2892	{
2893#if __BYTE_ORDER == __BIG_ENDIAN
2894	uint64_t reserved_46_63               : 18;
2895	uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
2896                                                         Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
2897                                                         where tXPDLL is from the DDR3 spec, and tCYC(ns)
2898                                                         is the DDR clock frequency (not data rate).
2899                                                         TYP=max(10nCK, 24ns) */
2900	uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
2901                                                         Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
2902                                                         where tFAW is from the DDR3 spec, and tCYC(ns)
2903                                                         is the DDR clock frequency (not data rate).
2904                                                         TYP=30-40ns */
2905	uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
2906                                                         Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
2907                                                         where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
2908                                                         is the DDR clock frequency (not data rate).
2909                                                         TYP=max(25nCK) */
2910	uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
2911                                                         Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
2912                                                         where tWLMRD is from the DDR3 spec, and tCYC(ns)
2913                                                         is the DDR clock frequency (not data rate).
2914                                                         TYP=max(40nCK) */
2915	uint64_t txp                          : 3;  /**< Indicates tXP constraints.
2916                                                         Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
2917                                                         where tXP is from the DDR3 spec, and tCYC(ns)
2918                                                         is the DDR clock frequency (not data rate).
2919                                                         TYP=max(3nCK, 7.5ns) */
2920	uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
2921                                                         Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
2922                                                         where tRRD is from the DDR3 spec, and tCYC(ns)
2923                                                         is the DDR clock frequency (not data rate).
2924                                                         TYP=max(4nCK, 10ns)
2925                                                            - 000: RESERVED
2926                                                            - 001: 3 tCYC
2927                                                            - ...
2928                                                            - 110: 8 tCYC
2929                                                            - 111: 9 tCYC */
2930	uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
2931                                                         Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
2932                                                         where tRFC is from the DDR3 spec, and tCYC(ns)
2933                                                         is the DDR clock frequency (not data rate).
2934                                                         TYP=90-350ns
2935                                                              - 00000: RESERVED
2936                                                              - 00001: 8 tCYC
2937                                                              - 00010: 16 tCYC
2938                                                              - 00011: 24 tCYC
2939                                                              - 00100: 32 tCYC
2940                                                              - ...
2941                                                              - 11110: 240 tCYC
2942                                                              - 11111: 248 tCYC */
2943	uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
2944                                                         Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
2945                                                         where tWTR is from the DDR3 spec, and tCYC(ns)
2946                                                         is the DDR clock frequency (not data rate).
2947                                                         TYP=max(4nCK, 7.5ns)
2948                                                             - 0000: RESERVED
2949                                                             - 0001: 2
2950                                                             - ...
2951                                                             - 0111: 8
2952                                                             - 1000-1111: RESERVED */
2953	uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
2954                                                         Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
2955                                                         where tRCD is from the DDR3 spec, and tCYC(ns)
2956                                                         is the DDR clock frequency (not data rate).
2957                                                         TYP=10-15ns
2958                                                             - 0000: RESERVED
2959                                                             - 0001: 2 (2 is the smallest value allowed)
2960                                                             - 0002: 2
2961                                                             - ...
2962                                                             - 1001: 9
2963                                                             - 1010-1111: RESERVED
2964                                                         In 2T mode, make this register TRCD-1, not going
2965                                                         below 2. */
2966	uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
2967                                                         Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2968                                                         where tRAS is from the DDR3 spec, and tCYC(ns)
2969                                                         is the DDR clock frequency (not data rate).
2970                                                         TYP=35ns-9*tREFI
2971                                                             - 00000: RESERVED
2972                                                             - 00001: 2 tCYC
2973                                                             - 00010: 3 tCYC
2974                                                             - ...
2975                                                             - 11111: 32 tCYC */
2976	uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
2977                                                         Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
2978                                                         where tMPRR is from the DDR3 spec, and tCYC(ns)
2979                                                         is the DDR clock frequency (not data rate).
2980                                                         TYP=1nCK */
2981#else
2982	uint64_t tmprr                        : 4;
2983	uint64_t tras                         : 5;
2984	uint64_t trcd                         : 4;
2985	uint64_t twtr                         : 4;
2986	uint64_t trfc                         : 5;
2987	uint64_t trrd                         : 3;
2988	uint64_t txp                          : 3;
2989	uint64_t twlmrd                       : 4;
2990	uint64_t twldqsen                     : 4;
2991	uint64_t tfaw                         : 5;
2992	uint64_t txpdll                       : 5;
2993	uint64_t reserved_46_63               : 18;
2994#endif
2995	} cn63xxp1;
2996};
2997typedef union cvmx_dfm_timing_params1 cvmx_dfm_timing_params1_t;
2998
2999/**
3000 * cvmx_dfm_wlevel_ctl
3001 */
3002union cvmx_dfm_wlevel_ctl
3003{
3004	uint64_t u64;
3005	struct cvmx_dfm_wlevel_ctl_s
3006	{
3007#if __BYTE_ORDER == __BIG_ENDIAN
3008	uint64_t reserved_22_63               : 42;
3009	uint64_t rtt_nom                      : 3;  /**< RTT_NOM
3010                                                         DFM writes a decoded value to MR1[Rtt_Nom] of the rank during
3011                                                         write leveling. Per JEDEC DDR3 specifications,
3012                                                         only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
3013                                                         are allowed during write leveling with output buffer enabled.
3014                                                         000 : DFM writes 001 (RZQ/4)   to MR1[Rtt_Nom]
3015                                                         001 : DFM writes 010 (RZQ/2)   to MR1[Rtt_Nom]
3016                                                         010 : DFM writes 011 (RZQ/6)   to MR1[Rtt_Nom]
3017                                                         011 : DFM writes 100 (RZQ/12)  to MR1[Rtt_Nom]
3018                                                         100 : DFM writes 101 (RZQ/8)   to MR1[Rtt_Nom]
3019                                                         101 : DFM writes 110 (Rsvd)    to MR1[Rtt_Nom]
3020                                                         110 : DFM writes 111 (Rsvd)    to  MR1[Rtt_Nom]
3021                                                         111 : DFM writes 000 (Disabled) to MR1[Rtt_Nom] */
3022	uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which write-leveling
3023                                                         feedback is returned when OR_DIS is set to 1 */
3024	uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
3025                                                         the write-leveling bitmask */
3026	uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
3027	uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
3028                                                         the write-leveling sequence
3029                                                         Used with x16 parts where the upper and lower byte
3030                                                         lanes need to be leveled independently
3031                                                         LANEMASK<8:2> must be zero. */
3032#else
3033	uint64_t lanemask                     : 9;
3034	uint64_t sset                         : 1;
3035	uint64_t or_dis                       : 1;
3036	uint64_t bitmask                      : 8;
3037	uint64_t rtt_nom                      : 3;
3038	uint64_t reserved_22_63               : 42;
3039#endif
3040	} s;
3041	struct cvmx_dfm_wlevel_ctl_s          cn63xx;
3042	struct cvmx_dfm_wlevel_ctl_cn63xxp1
3043	{
3044#if __BYTE_ORDER == __BIG_ENDIAN
3045	uint64_t reserved_10_63               : 54;
3046	uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
3047	uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
3048                                                         the write-leveling sequence
3049                                                         Used with x16 parts where the upper and lower byte
3050                                                         lanes need to be leveled independently
3051                                                         LANEMASK<8:2> must be zero. */
3052#else
3053	uint64_t lanemask                     : 9;
3054	uint64_t sset                         : 1;
3055	uint64_t reserved_10_63               : 54;
3056#endif
3057	} cn63xxp1;
3058};
3059typedef union cvmx_dfm_wlevel_ctl cvmx_dfm_wlevel_ctl_t;
3060
3061/**
3062 * cvmx_dfm_wlevel_dbg
3063 *
3064 * Notes:
3065 * A given write of DFM_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
3066 * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
3067 * DFM_WLEVEL_DBG[BYTE] selects the particular byte.
3068 * To get these pass/fail results for another different rank, you must run the hardware write-leveling
3069 * again. For example, it is possible to get the BITMASK results for every byte of every rank
3070 * if you run write-leveling separately for each rank, probing DFM_WLEVEL_DBG between each
3071 * write-leveling.
3072 */
3073union cvmx_dfm_wlevel_dbg
3074{
3075	uint64_t u64;
3076	struct cvmx_dfm_wlevel_dbg_s
3077	{
3078#if __BYTE_ORDER == __BIG_ENDIAN
3079	uint64_t reserved_12_63               : 52;
3080	uint64_t bitmask                      : 8;  /**< Bitmask generated during deskew settings sweep
3081                                                         if DFM_WLEVEL_CTL[SSET]=0
3082                                                           BITMASK[n]=0 means deskew setting n failed
3083                                                           BITMASK[n]=1 means deskew setting n passed
3084                                                           for 0 <= n <= 7
3085                                                           BITMASK contains the first 8 results of the total 16
3086                                                           collected by DFM during the write-leveling sequence
3087                                                         else if DFM_WLEVEL_CTL[SSET]=1
3088                                                           BITMASK[0]=0 means curr deskew setting failed
3089                                                           BITMASK[0]=1 means curr deskew setting passed */
3090	uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8 */
3091#else
3092	uint64_t byte                         : 4;
3093	uint64_t bitmask                      : 8;
3094	uint64_t reserved_12_63               : 52;
3095#endif
3096	} s;
3097	struct cvmx_dfm_wlevel_dbg_s          cn63xx;
3098	struct cvmx_dfm_wlevel_dbg_s          cn63xxp1;
3099};
3100typedef union cvmx_dfm_wlevel_dbg cvmx_dfm_wlevel_dbg_t;
3101
3102/**
3103 * cvmx_dfm_wlevel_rank#
3104 *
3105 * Notes:
3106 * This is TWO CSRs per DFM, one per each rank. (front bunk/back bunk)
3107 *
3108 * Deskew setting is measured in units of 1/8 FCLK, so the above BYTE* values can range over 4 FCLKs.
3109 *
3110 * Assuming DFM_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
3111 * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
3112 * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
3113 *
3114 * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
3115 *
3116 * SW initiates a HW write-leveling sequence by programming DFM_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in DFM_CONFIG.
3117 * DFM will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
3118 * DFM_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set DFM_WLEVEL_RANKn[BYTE*<2:0>] to indicate the
3119 * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
3120 * bitmask, except that DFM will always write DFM_WLEVEL_RANKn[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
3121 * set DFM_WLEVEL_RANKn[BYTE*<2:0>] to 4.
3122 * See DFM_WLEVEL_CTL.
3123 */
3124union cvmx_dfm_wlevel_rankx
3125{
3126	uint64_t u64;
3127	struct cvmx_dfm_wlevel_rankx_s
3128	{
3129#if __BYTE_ORDER == __BIG_ENDIAN
3130	uint64_t reserved_47_63               : 17;
3131	uint64_t status                       : 2;  /**< Indicates status of the write-leveling and where
3132                                                         the BYTE* programmings in <44:0> came from:
3133                                                         0 = BYTE* values are their reset value
3134                                                         1 = BYTE* values were set via a CSR write to this register
3135                                                         2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
3136                                                         3 = BYTE* values came from a complete write-leveling sequence, irrespective of
3137                                                             which lanes are masked via DFM_WLEVEL_CTL[LANEMASK] */
3138	uint64_t reserved_10_44               : 35;
3139	uint64_t byte1                        : 5;  /**< Deskew setting
3140                                                         Bit 0 of BYTE1 must be zero during normal operation */
3141	uint64_t byte0                        : 5;  /**< Deskew setting
3142                                                         Bit 0 of BYTE0 must be zero during normal operation */
3143#else
3144	uint64_t byte0                        : 5;
3145	uint64_t byte1                        : 5;
3146	uint64_t reserved_10_44               : 35;
3147	uint64_t status                       : 2;
3148	uint64_t reserved_47_63               : 17;
3149#endif
3150	} s;
3151	struct cvmx_dfm_wlevel_rankx_s        cn63xx;
3152	struct cvmx_dfm_wlevel_rankx_s        cn63xxp1;
3153};
3154typedef union cvmx_dfm_wlevel_rankx cvmx_dfm_wlevel_rankx_t;
3155
3156/**
3157 * cvmx_dfm_wodt_mask
3158 *
3159 * DFM_WODT_MASK = DFM Write OnDieTermination mask
3160 * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
3161 * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
3162 * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
3163 * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
3164 * in that rank. System designers may prefer different combinations of ODT ON's for writes
3165 * into different ranks. Octeon supports full programmability by way of the mask register below.
3166 * Each Rank position has its own 8-bit programmable field.
3167 * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
3168 * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
3169 * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
3170 * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
3171 * required, write 0 in this register.
3172 *
3173 * Notes:
3174 * - DFM_WODT_MASK functions a little differently than DFM_RODT_MASK.  While, in DFM_RODT_MASK, the other
3175 * rank(s) are ODT-ed, in DFM_WODT_MASK, the rank in which the write CAS is issued can be ODT-ed as well.
3176 * - For a two rank system and a write op to rank0: use RODT_D0_R0<1:0> to terminate lines on rank1 and/or rank0.
3177 * - For a two rank system and a write op to rank1: use RODT_D0_R1<1:0> to terminate lines on rank1 and/or rank0.
3178 * - When a given RANK is selected, the WODT mask for that RANK is used.
3179 *
3180 * DFM always writes 128-bit words independently via one write CAS operation per word.
3181 * When a WODT mask bit is set, DFM asserts the OCTEON ODT output pin(s) starting the same cycle
3182 * as the write CAS operation. Then, OCTEON normally continues to assert the ODT output pin(s) for five
3183 * more cycles - for a total of 6 cycles for the entire word write - satisfying the 6 cycle DDR3
3184 * ODTH8 requirements. But it is possible for DFM to issue two word writes  separated by as few
3185 * as WtW = 4 or 5 cycles. In that case, DFM asserts the ODT output pin(s) for the WODT mask of the
3186 * first word write for WtW cycles, then asserts the ODT output pin(s) for the WODT mask of the
3187 * second write for 6 cycles (or less if a third word write follows within 4 or 5
3188 * cycles of this second word write). Note that it may be necessary to force DFM to space back-to-back
3189 * word writes to different ranks apart by at least 6 cycles to prevent DDR3 ODTH8 violations.
3190 */
3191union cvmx_dfm_wodt_mask
3192{
3193	uint64_t u64;
3194	struct cvmx_dfm_wodt_mask_s
3195	{
3196#if __BYTE_ORDER == __BIG_ENDIAN
3197	uint64_t wodt_d3_r1                   : 8;  /**< Not used by DFM. */
3198	uint64_t wodt_d3_r0                   : 8;  /**< Not used by DFM. */
3199	uint64_t wodt_d2_r1                   : 8;  /**< Not used by DFM. */
3200	uint64_t wodt_d2_r0                   : 8;  /**< Not used by DFM. */
3201	uint64_t wodt_d1_r1                   : 8;  /**< Not used by DFM. */
3202	uint64_t wodt_d1_r0                   : 8;  /**< Not used by DFM. */
3203	uint64_t wodt_d0_r1                   : 8;  /**< Write ODT mask RANK1
3204                                                         WODT_D0_R1<7:2> not used by DFM.
3205                                                         WODT_D0_R1<1:0> is also not used by DFM when RANK_ENA is not set. */
3206	uint64_t wodt_d0_r0                   : 8;  /**< Write ODT mask RANK0
3207                                                         WODT_D0_R0<7:2> not used by DFM. */
3208#else
3209	uint64_t wodt_d0_r0                   : 8;
3210	uint64_t wodt_d0_r1                   : 8;
3211	uint64_t wodt_d1_r0                   : 8;
3212	uint64_t wodt_d1_r1                   : 8;
3213	uint64_t wodt_d2_r0                   : 8;
3214	uint64_t wodt_d2_r1                   : 8;
3215	uint64_t wodt_d3_r0                   : 8;
3216	uint64_t wodt_d3_r1                   : 8;
3217#endif
3218	} s;
3219	struct cvmx_dfm_wodt_mask_s           cn63xx;
3220	struct cvmx_dfm_wodt_mask_s           cn63xxp1;
3221};
3222typedef union cvmx_dfm_wodt_mask cvmx_dfm_wodt_mask_t;
3223
3224#endif
3225