cvmx-spxx-defs.h revision 215976
1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-spxx-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon spxx. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_SPXX_TYPEDEFS_H__ 53#define __CVMX_SPXX_TYPEDEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56static inline uint64_t CVMX_SPXX_BCKPRS_CNT(unsigned long block_id) 57{ 58 if (!( 59 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 60 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 61 cvmx_warn("CVMX_SPXX_BCKPRS_CNT(%lu) is invalid on this chip\n", block_id); 62 return CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull; 63} 64#else 65#define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull) 66#endif 67#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 68static inline uint64_t CVMX_SPXX_BIST_STAT(unsigned long block_id) 69{ 70 if (!( 71 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 72 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 73 cvmx_warn("CVMX_SPXX_BIST_STAT(%lu) is invalid on this chip\n", block_id); 74 return CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull; 75} 76#else 77#define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull) 78#endif 79#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 80static inline uint64_t CVMX_SPXX_CLK_CTL(unsigned long block_id) 81{ 82 if (!( 83 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 84 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 85 cvmx_warn("CVMX_SPXX_CLK_CTL(%lu) is invalid on this chip\n", block_id); 86 return CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull; 87} 88#else 89#define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull) 90#endif 91#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 92static inline uint64_t CVMX_SPXX_CLK_STAT(unsigned long block_id) 93{ 94 if (!( 95 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 96 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 97 cvmx_warn("CVMX_SPXX_CLK_STAT(%lu) is invalid on this chip\n", block_id); 98 return CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull; 99} 100#else 101#define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull) 102#endif 103#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 104static inline uint64_t CVMX_SPXX_DBG_DESKEW_CTL(unsigned long block_id) 105{ 106 if (!( 107 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 108 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 109 cvmx_warn("CVMX_SPXX_DBG_DESKEW_CTL(%lu) is invalid on this chip\n", block_id); 110 return CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull; 111} 112#else 113#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull) 114#endif 115#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 116static inline uint64_t CVMX_SPXX_DBG_DESKEW_STATE(unsigned long block_id) 117{ 118 if (!( 119 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 120 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 121 cvmx_warn("CVMX_SPXX_DBG_DESKEW_STATE(%lu) is invalid on this chip\n", block_id); 122 return CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull; 123} 124#else 125#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull) 126#endif 127#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 128static inline uint64_t CVMX_SPXX_DRV_CTL(unsigned long block_id) 129{ 130 if (!( 131 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 132 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 133 cvmx_warn("CVMX_SPXX_DRV_CTL(%lu) is invalid on this chip\n", block_id); 134 return CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull; 135} 136#else 137#define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull) 138#endif 139#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 140static inline uint64_t CVMX_SPXX_ERR_CTL(unsigned long block_id) 141{ 142 if (!( 143 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 144 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 145 cvmx_warn("CVMX_SPXX_ERR_CTL(%lu) is invalid on this chip\n", block_id); 146 return CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull; 147} 148#else 149#define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull) 150#endif 151#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 152static inline uint64_t CVMX_SPXX_INT_DAT(unsigned long block_id) 153{ 154 if (!( 155 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 156 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 157 cvmx_warn("CVMX_SPXX_INT_DAT(%lu) is invalid on this chip\n", block_id); 158 return CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull; 159} 160#else 161#define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull) 162#endif 163#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 164static inline uint64_t CVMX_SPXX_INT_MSK(unsigned long block_id) 165{ 166 if (!( 167 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 168 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 169 cvmx_warn("CVMX_SPXX_INT_MSK(%lu) is invalid on this chip\n", block_id); 170 return CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull; 171} 172#else 173#define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull) 174#endif 175#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 176static inline uint64_t CVMX_SPXX_INT_REG(unsigned long block_id) 177{ 178 if (!( 179 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 180 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 181 cvmx_warn("CVMX_SPXX_INT_REG(%lu) is invalid on this chip\n", block_id); 182 return CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull; 183} 184#else 185#define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull) 186#endif 187#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 188static inline uint64_t CVMX_SPXX_INT_SYNC(unsigned long block_id) 189{ 190 if (!( 191 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 192 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 193 cvmx_warn("CVMX_SPXX_INT_SYNC(%lu) is invalid on this chip\n", block_id); 194 return CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull; 195} 196#else 197#define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull) 198#endif 199#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 200static inline uint64_t CVMX_SPXX_TPA_ACC(unsigned long block_id) 201{ 202 if (!( 203 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 204 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 205 cvmx_warn("CVMX_SPXX_TPA_ACC(%lu) is invalid on this chip\n", block_id); 206 return CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull; 207} 208#else 209#define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull) 210#endif 211#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 212static inline uint64_t CVMX_SPXX_TPA_MAX(unsigned long block_id) 213{ 214 if (!( 215 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 216 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 217 cvmx_warn("CVMX_SPXX_TPA_MAX(%lu) is invalid on this chip\n", block_id); 218 return CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull; 219} 220#else 221#define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull) 222#endif 223#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 224static inline uint64_t CVMX_SPXX_TPA_SEL(unsigned long block_id) 225{ 226 if (!( 227 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 228 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 229 cvmx_warn("CVMX_SPXX_TPA_SEL(%lu) is invalid on this chip\n", block_id); 230 return CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull; 231} 232#else 233#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull) 234#endif 235#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 236static inline uint64_t CVMX_SPXX_TRN4_CTL(unsigned long block_id) 237{ 238 if (!( 239 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) || 240 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))))) 241 cvmx_warn("CVMX_SPXX_TRN4_CTL(%lu) is invalid on this chip\n", block_id); 242 return CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull; 243} 244#else 245#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull) 246#endif 247 248/** 249 * cvmx_spx#_bckprs_cnt 250 */ 251union cvmx_spxx_bckprs_cnt 252{ 253 uint64_t u64; 254 struct cvmx_spxx_bckprs_cnt_s 255 { 256#if __BYTE_ORDER == __BIG_ENDIAN 257 uint64_t reserved_32_63 : 32; 258 uint64_t cnt : 32; /**< Counts the number of core clock cycles in which 259 the SPI-4.2 receiver receives data once the TPA 260 for a particular port has been deasserted. The 261 desired port to watch can be selected with the 262 SPX_TPA_SEL[PRTSEL] field. CNT can be cleared by 263 writing all 1s to it. */ 264#else 265 uint64_t cnt : 32; 266 uint64_t reserved_32_63 : 32; 267#endif 268 } s; 269 struct cvmx_spxx_bckprs_cnt_s cn38xx; 270 struct cvmx_spxx_bckprs_cnt_s cn38xxp2; 271 struct cvmx_spxx_bckprs_cnt_s cn58xx; 272 struct cvmx_spxx_bckprs_cnt_s cn58xxp1; 273}; 274typedef union cvmx_spxx_bckprs_cnt cvmx_spxx_bckprs_cnt_t; 275 276/** 277 * cvmx_spx#_bist_stat 278 * 279 * Notes: 280 * Bist results encoding 281 * - 0: good (or bist in progress/never run) 282 * - 1: bad 283 */ 284union cvmx_spxx_bist_stat 285{ 286 uint64_t u64; 287 struct cvmx_spxx_bist_stat_s 288 { 289#if __BYTE_ORDER == __BIG_ENDIAN 290 uint64_t reserved_3_63 : 61; 291 uint64_t stat2 : 1; /**< Bist Results/No Repair (Tx calendar table) 292 (spx.stx.cal.calendar) */ 293 uint64_t stat1 : 1; /**< Bist Results/No Repair (Rx calendar table) 294 (spx.srx.spi4.cal.calendar) */ 295 uint64_t stat0 : 1; /**< Bist Results/No Repair (Spi4 receive datapath FIFO) 296 (spx.srx.spi4.dat.dpr) */ 297#else 298 uint64_t stat0 : 1; 299 uint64_t stat1 : 1; 300 uint64_t stat2 : 1; 301 uint64_t reserved_3_63 : 61; 302#endif 303 } s; 304 struct cvmx_spxx_bist_stat_s cn38xx; 305 struct cvmx_spxx_bist_stat_s cn38xxp2; 306 struct cvmx_spxx_bist_stat_s cn58xx; 307 struct cvmx_spxx_bist_stat_s cn58xxp1; 308}; 309typedef union cvmx_spxx_bist_stat cvmx_spxx_bist_stat_t; 310 311/** 312 * cvmx_spx#_clk_ctl 313 * 314 * Notes: 315 * * SRXDLCK 316 * When asserted, this bit locks the Spi4 receive DLLs. This bit also 317 * acts as the Spi4 receiver reset and must be asserted before the 318 * training sequences are used to initialize the interface. This bit 319 * only applies to the receiver interface. 320 * 321 * * RCVTRN 322 * Once the SRXDLCK bit is asserted and the DLLs have locked and the 323 * system has been programmed, software should assert this bit in order 324 * to start looking for valid training sequence and synchronize the 325 * interface. This bit only applies to the receiver interface. 326 * 327 * * DRPTRN 328 * The Spi4 receiver can either convert training packets into NOPs or 329 * drop them entirely. Dropping ticks allows the interface to deskew 330 * periodically if the dclk and eclk ratios are close. This bit only 331 * applies to the receiver interface. 332 * 333 * * SNDTRN 334 * When software sets this bit, it indicates that the Spi4 transmit 335 * interface has been setup and has seen the calendare status. Once the 336 * transmitter begins sending training data, the receiving device is free 337 * to start traversing the calendar table to synch the link. 338 * 339 * * STATRCV 340 * This bit determines which status clock edge to sample the status 341 * channel in Spi4 mode. Since the status channel is in the opposite 342 * direction to the datapath, the STATRCV actually effects the 343 * transmitter/TX block. 344 * 345 * * STATDRV 346 * This bit determines which status clock edge to drive the status 347 * channel in Spi4 mode. Since the status channel is in the opposite 348 * direction to the datapath, the STATDRV actually effects the 349 * receiver/RX block. 350 * 351 * * RUNBIST 352 * RUNBIST will beginning BIST/BISR in all the SPX compilied memories. 353 * These memories are... 354 * 355 * * spx.srx.spi4.dat.dpr // FIFO Spi4 to IMX 356 * * spx.stx.cal.calendar // Spi4 TX calendar table 357 * * spx.srx.spi4.cal.calendar // Spi4 RX calendar table 358 * 359 * RUNBIST must never be asserted when the interface is enabled. 360 * Furthmore, setting RUNBIST at any other time is destructive and can 361 * cause data and configuration corruption. The entire interface must be 362 * reconfigured when this bit is set. 363 * 364 * * CLKDLY 365 * CLKDLY should be kept at its reset value during normal operation. This 366 * register controls the SPI4.2 static clock positioning which normally only is 367 * set to the non-reset value in quarter clocking schemes. In this mode, the 368 * delay window is not large enough for slow clock freq, therefore clock and 369 * data must be statically positioned with CSRs. By changing the clock position 370 * relative to the data bits, we give the system a wider window. 371 * 372 * * SEETRN 373 * In systems in which no training data is sent to N2 or N2 cannot 374 * correctly sample the training data, software may pulse this bit by 375 * writing a '1' followed by a '0' in order to correctly set the 376 * receivers state. The receive data bus should be idle at this time 377 * (only NOPs on the bus). If N2 cannot see at least on training 378 * sequence, the data bus will not send any data to the core. The 379 * interface will hang. 380 */ 381union cvmx_spxx_clk_ctl 382{ 383 uint64_t u64; 384 struct cvmx_spxx_clk_ctl_s 385 { 386#if __BYTE_ORDER == __BIG_ENDIAN 387 uint64_t reserved_17_63 : 47; 388 uint64_t seetrn : 1; /**< Force the Spi4 receive into seeing a traing 389 sequence */ 390 uint64_t reserved_12_15 : 4; 391 uint64_t clkdly : 5; /**< Set the spx__clkdly lines to this value to 392 control the delay on the incoming dclk 393 (spx__clkdly) */ 394 uint64_t runbist : 1; /**< Write this bit to begin BIST testing in SPX */ 395 uint64_t statdrv : 1; /**< Spi4 status channel drive mode 396 - 1: Drive STAT on posedge of SCLK 397 - 0: Drive STAT on negedge of SCLK */ 398 uint64_t statrcv : 1; /**< Spi4 status channel sample mode 399 - 1: Sample STAT on posedge of SCLK 400 - 0: Sample STAT on negedge of SCLK */ 401 uint64_t sndtrn : 1; /**< Start sending training patterns on the Spi4 402 Tx Interface */ 403 uint64_t drptrn : 1; /**< Drop blocks of training packets */ 404 uint64_t rcvtrn : 1; /**< Write this bit once the DLL is locked to sync 405 on the training seqeunce */ 406 uint64_t srxdlck : 1; /**< Write this bit to lock the Spi4 receive DLL */ 407#else 408 uint64_t srxdlck : 1; 409 uint64_t rcvtrn : 1; 410 uint64_t drptrn : 1; 411 uint64_t sndtrn : 1; 412 uint64_t statrcv : 1; 413 uint64_t statdrv : 1; 414 uint64_t runbist : 1; 415 uint64_t clkdly : 5; 416 uint64_t reserved_12_15 : 4; 417 uint64_t seetrn : 1; 418 uint64_t reserved_17_63 : 47; 419#endif 420 } s; 421 struct cvmx_spxx_clk_ctl_s cn38xx; 422 struct cvmx_spxx_clk_ctl_s cn38xxp2; 423 struct cvmx_spxx_clk_ctl_s cn58xx; 424 struct cvmx_spxx_clk_ctl_s cn58xxp1; 425}; 426typedef union cvmx_spxx_clk_ctl cvmx_spxx_clk_ctl_t; 427 428/** 429 * cvmx_spx#_clk_stat 430 */ 431union cvmx_spxx_clk_stat 432{ 433 uint64_t u64; 434 struct cvmx_spxx_clk_stat_s 435 { 436#if __BYTE_ORDER == __BIG_ENDIAN 437 uint64_t reserved_11_63 : 53; 438 uint64_t stxcal : 1; /**< The transistion from Sync to Calendar on status 439 channel */ 440 uint64_t reserved_9_9 : 1; 441 uint64_t srxtrn : 1; /**< Saw a good data training sequence */ 442 uint64_t s4clk1 : 1; /**< Saw '1' on Spi4 transmit status forward clk input */ 443 uint64_t s4clk0 : 1; /**< Saw '0' on Spi4 transmit status forward clk input */ 444 uint64_t d4clk1 : 1; /**< Saw '1' on Spi4 receive data forward clk input */ 445 uint64_t d4clk0 : 1; /**< Saw '0' on Spi4 receive data forward clk input */ 446 uint64_t reserved_0_3 : 4; 447#else 448 uint64_t reserved_0_3 : 4; 449 uint64_t d4clk0 : 1; 450 uint64_t d4clk1 : 1; 451 uint64_t s4clk0 : 1; 452 uint64_t s4clk1 : 1; 453 uint64_t srxtrn : 1; 454 uint64_t reserved_9_9 : 1; 455 uint64_t stxcal : 1; 456 uint64_t reserved_11_63 : 53; 457#endif 458 } s; 459 struct cvmx_spxx_clk_stat_s cn38xx; 460 struct cvmx_spxx_clk_stat_s cn38xxp2; 461 struct cvmx_spxx_clk_stat_s cn58xx; 462 struct cvmx_spxx_clk_stat_s cn58xxp1; 463}; 464typedef union cvmx_spxx_clk_stat cvmx_spxx_clk_stat_t; 465 466/** 467 * cvmx_spx#_dbg_deskew_ctl 468 * 469 * Notes: 470 * These bits are meant as a backdoor to control Spi4 per-bit deskew. See 471 * that Spec for more details. 472 * 473 * The basic idea is to allow software to disable the auto-deskew widgets 474 * and make any adjustments by hand. These steps should only be taken 475 * once the RCVTRN bit is set and before any real traffic is sent on the 476 * Spi4 bus. Great care should be taken when messing with these bits as 477 * improper programmings can cause catestrophic or intermitent problems. 478 * 479 * The params we have to test are the MUX tap selects and the XCV delay 480 * tap selects. 481 * 482 * For the muxes, we can set each tap to a random value and then read 483 * back the taps. To write... 484 * 485 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set 486 * SPXX_DBG_DESKEW_CTL[OFFSET] = mux tap value (2-bits) 487 * SPXX_DBG_DESKEW_CTL[MUX] = go bit 488 * 489 * Notice this can all happen with a single CSR write. To read, first 490 * set the bit you to look at with the SPXX_DBG_DESKEW_CTL[BITSEL], then 491 * simply read SPXX_DBG_DESKEW_STATE[MUXSEL]... 492 * 493 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set 494 * SPXX_DBG_DESKEW_STATE[MUXSEL] = 2-bit value 495 * 496 * For the xcv delay taps, the CSR controls increment and decrement the 497 * 5-bit count value in the XCV. This is a saturating counter, so it 498 * will not wrap when decrementing below zero or incrementing above 31. 499 * 500 * To write... 501 * 502 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set 503 * SPXX_DBG_DESKEW_CTL[OFFSET] = tap value increment or decrement amount (5-bits) 504 * SPXX_DBG_DESKEW_CTL[INC|DEC] = go bit 505 * 506 * These values are copied in SPX, so that they can be read back by 507 * software by a similar mechanism to the MUX selects... 508 * 509 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set 510 * SPXX_DBG_DESKEW_STATE[OFFSET] = 5-bit value 511 * 512 * In addition, there is a reset bit that sets all the state back to the 513 * default/starting value of 0x10. 514 * 515 * SPXX_DBG_DESKEW_CTL[CLRDLY] = 1 516 * 517 * SINGLE STEP TRAINING MODE (WILMA) 518 * Debug feature that will enable the user to single-step the debug 519 * logic to watch initial movement and trends by putting the training 520 * machine in single step mode. 521 * 522 * * SPX*_DBG_DESKEW_CTL[SSTEP] 523 * This will put the training control logic into single step mode. We 524 * will not deskew in this scenario and will require the TX device to 525 * send continuous training sequences. 526 * 527 * It is required that SRX*_COM_CTL[INF_EN] be clear so that suspect 528 * data does not flow into the chip. 529 * 530 * Deasserting SPX*_DBG_DESKEW_CTL[SSTEP] will attempt to deskew as per 531 * the normal definition. Single step mode is for debug only. Special 532 * care must be given to correctly deskew the interface if normal 533 * operation is desired. 534 * 535 * * SPX*_DBG_DESKEW_CTL[SSTEP_GO] 536 * Each write of '1' to SSTEP_GO will go through a single training 537 * iteration and will perform... 538 * 539 * - DLL update, if SPX*_DBG_DESKEW_CTL[DLLDIS] is clear 540 * - coarse update, if SPX*_TRN4_CTL[MUX_EN] is set 541 * - single fine update, if SPX*_TRN4_CTL[MACRO_EN] is set and an edge 542 * was detected after walked +/- SPX*_TRN4_CTL[MAXDIST] taps. 543 * 544 * Writes to this register have no effect if the interface is not in 545 * SSTEP mode (SPX*_DBG_DESKEW_CTL[SSTEP]). 546 * 547 * The WILMA mode will be cleared at the final state transition, so 548 * that software can set SPX*_DBG_DESKEW_CTL[SSTEP] and 549 * SPX*_DBG_DESKEW_CTL[SSTEP_GO] before setting SPX*_CLK_CTL[RCVTRN] 550 * and the machine will go through the initial iteration and stop - 551 * waiting for another SPX*_DBG_DESKEW_CTL[SSTEP_GO] or an interface 552 * enable. 553 * 554 * * SPX*_DBG_DESKEW_CTL[FALL8] 555 * Determines how many pattern matches are required during training 556 * operations to fallout of training and begin processing the normal data 557 * stream. The default value is 10 pattern matches. The pattern that is 558 * used is dependent on the SPX*_DBG_DESKEW_CTL[FALLNOP] CSR which 559 * determines between non-training packets (the default) and NOPs. 560 * 561 * * SPX*_DBG_DESKEW_CTL[FALLNOP] 562 * Determines the pattern that is required during training operations to 563 * fallout of training and begin processing the normal data stream. The 564 * default value is to match against non-training data. Setting this 565 * bit, changes the behavior to watch for NOPs packet instead. 566 * 567 * This bit should not be changed dynamically while the link is 568 * operational. 569 */ 570union cvmx_spxx_dbg_deskew_ctl 571{ 572 uint64_t u64; 573 struct cvmx_spxx_dbg_deskew_ctl_s 574 { 575#if __BYTE_ORDER == __BIG_ENDIAN 576 uint64_t reserved_30_63 : 34; 577 uint64_t fallnop : 1; /**< Training fallout on NOP matches instead of 578 non-training matches. 579 (spx_csr__spi4_fallout_nop) */ 580 uint64_t fall8 : 1; /**< Training fallout at 8 pattern matches instead of 10 581 (spx_csr__spi4_fallout_8_match) */ 582 uint64_t reserved_26_27 : 2; 583 uint64_t sstep_go : 1; /**< Single Step Training Sequence 584 (spx_csr__spi4_single_step_go) */ 585 uint64_t sstep : 1; /**< Single Step Training Mode 586 (spx_csr__spi4_single_step_mode) */ 587 uint64_t reserved_22_23 : 2; 588 uint64_t clrdly : 1; /**< Resets the offset control in the XCV 589 (spx_csr__spi4_dll_clr_dly) */ 590 uint64_t dec : 1; /**< Decrement the offset by OFFSET for the Spi4 591 bit selected by BITSEL 592 (spx_csr__spi4_dbg_trn_dec) */ 593 uint64_t inc : 1; /**< Increment the offset by OFFSET for the Spi4 594 bit selected by BITSEL 595 (spx_csr__spi4_dbg_trn_inc) */ 596 uint64_t mux : 1; /**< Set the mux select tap for the Spi4 bit 597 selected by BITSEL 598 (spx_csr__spi4_dbg_trn_mux) */ 599 uint64_t offset : 5; /**< Adds or subtracts (Based on INC or DEC) the 600 offset to Spi4 bit BITSEL. 601 (spx_csr__spi4_dbg_trn_offset) */ 602 uint64_t bitsel : 5; /**< Select the Spi4 CTL or DAT bit 603 15-0 : Spi4 DAT[15:0] 604 16 : Spi4 CTL 605 - 31-17: Invalid 606 (spx_csr__spi4_dbg_trn_bitsel) */ 607 uint64_t offdly : 6; /**< Set the spx__offset lines to this value when 608 not in macro sequence 609 (spx_csr__spi4_mac_offdly) */ 610 uint64_t dllfrc : 1; /**< Force the Spi4 RX DLL to update 611 (spx_csr__spi4_dll_force) */ 612 uint64_t dlldis : 1; /**< Disable sending the update signal to the Spi4 613 RX DLL when set 614 (spx_csr__spi4_dll_trn_en) */ 615#else 616 uint64_t dlldis : 1; 617 uint64_t dllfrc : 1; 618 uint64_t offdly : 6; 619 uint64_t bitsel : 5; 620 uint64_t offset : 5; 621 uint64_t mux : 1; 622 uint64_t inc : 1; 623 uint64_t dec : 1; 624 uint64_t clrdly : 1; 625 uint64_t reserved_22_23 : 2; 626 uint64_t sstep : 1; 627 uint64_t sstep_go : 1; 628 uint64_t reserved_26_27 : 2; 629 uint64_t fall8 : 1; 630 uint64_t fallnop : 1; 631 uint64_t reserved_30_63 : 34; 632#endif 633 } s; 634 struct cvmx_spxx_dbg_deskew_ctl_s cn38xx; 635 struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2; 636 struct cvmx_spxx_dbg_deskew_ctl_s cn58xx; 637 struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1; 638}; 639typedef union cvmx_spxx_dbg_deskew_ctl cvmx_spxx_dbg_deskew_ctl_t; 640 641/** 642 * cvmx_spx#_dbg_deskew_state 643 * 644 * Notes: 645 * These bits are meant as a backdoor to control Spi4 per-bit deskew. See 646 * that Spec for more details. 647 */ 648union cvmx_spxx_dbg_deskew_state 649{ 650 uint64_t u64; 651 struct cvmx_spxx_dbg_deskew_state_s 652 { 653#if __BYTE_ORDER == __BIG_ENDIAN 654 uint64_t reserved_9_63 : 55; 655 uint64_t testres : 1; /**< Training Test Mode Result 656 (srx_spi4__test_mode_result) */ 657 uint64_t unxterm : 1; /**< Unexpected training terminiation 658 (srx_spi4__top_unxexp_trn_term) */ 659 uint64_t muxsel : 2; /**< The mux select value of the bit selected by 660 SPX_DBG_DESKEW_CTL[BITSEL] 661 (srx_spi4__trn_mux_sel) */ 662 uint64_t offset : 5; /**< The counter value of the bit selected by 663 SPX_DBG_DESKEW_CTL[BITSEL] 664 (srx_spi4__xcv_tap_select) */ 665#else 666 uint64_t offset : 5; 667 uint64_t muxsel : 2; 668 uint64_t unxterm : 1; 669 uint64_t testres : 1; 670 uint64_t reserved_9_63 : 55; 671#endif 672 } s; 673 struct cvmx_spxx_dbg_deskew_state_s cn38xx; 674 struct cvmx_spxx_dbg_deskew_state_s cn38xxp2; 675 struct cvmx_spxx_dbg_deskew_state_s cn58xx; 676 struct cvmx_spxx_dbg_deskew_state_s cn58xxp1; 677}; 678typedef union cvmx_spxx_dbg_deskew_state cvmx_spxx_dbg_deskew_state_t; 679 680/** 681 * cvmx_spx#_drv_ctl 682 * 683 * Notes: 684 * These bits all come from Duke - he will provide documentation and 685 * explanation. I'll just butcher it. 686 */ 687union cvmx_spxx_drv_ctl 688{ 689 uint64_t u64; 690 struct cvmx_spxx_drv_ctl_s 691 { 692#if __BYTE_ORDER == __BIG_ENDIAN 693 uint64_t reserved_0_63 : 64; 694#else 695 uint64_t reserved_0_63 : 64; 696#endif 697 } s; 698 struct cvmx_spxx_drv_ctl_cn38xx 699 { 700#if __BYTE_ORDER == __BIG_ENDIAN 701 uint64_t reserved_16_63 : 48; 702 uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */ 703 uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */ 704 uint64_t srx4cmp : 8; /**< Duke (spx__spi4_rx_rctl_comp) */ 705#else 706 uint64_t srx4cmp : 8; 707 uint64_t stx4pcmp : 4; 708 uint64_t stx4ncmp : 4; 709 uint64_t reserved_16_63 : 48; 710#endif 711 } cn38xx; 712 struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2; 713 struct cvmx_spxx_drv_ctl_cn58xx 714 { 715#if __BYTE_ORDER == __BIG_ENDIAN 716 uint64_t reserved_24_63 : 40; 717 uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */ 718 uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */ 719 uint64_t reserved_10_15 : 6; 720 uint64_t srx4cmp : 10; /**< Duke (spx__spi4_rx_rctl_comp) */ 721#else 722 uint64_t srx4cmp : 10; 723 uint64_t reserved_10_15 : 6; 724 uint64_t stx4pcmp : 4; 725 uint64_t stx4ncmp : 4; 726 uint64_t reserved_24_63 : 40; 727#endif 728 } cn58xx; 729 struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1; 730}; 731typedef union cvmx_spxx_drv_ctl cvmx_spxx_drv_ctl_t; 732 733/** 734 * cvmx_spx#_err_ctl 735 * 736 * SPX_ERR_CTL - Spi error control register 737 * 738 * 739 * Notes: 740 * * DIPPAY, DIPCLS, PRTNXA 741 * These bits control whether or not the packet's ERR bit is set when any of 742 * the these error is detected. If the corresponding error's bit is clear, 743 * the packet ERR will be set. If the error bit is set, the SPX will simply 744 * pass through the ERR bit without modifying it in anyway - the error bit 745 * may or may not have been set by the transmitter device. 746 */ 747union cvmx_spxx_err_ctl 748{ 749 uint64_t u64; 750 struct cvmx_spxx_err_ctl_s 751 { 752#if __BYTE_ORDER == __BIG_ENDIAN 753 uint64_t reserved_9_63 : 55; 754 uint64_t prtnxa : 1; /**< Spi4 - set the ERR bit on packets in which the 755 port is out-of-range */ 756 uint64_t dipcls : 1; /**< Spi4 DIPERR on closing control words cause the 757 ERR bit to be set */ 758 uint64_t dippay : 1; /**< Spi4 DIPERR on payload control words cause the 759 ERR bit to be set */ 760 uint64_t reserved_4_5 : 2; 761 uint64_t errcnt : 4; /**< Number of Dip4 errors before bringing down the 762 interface */ 763#else 764 uint64_t errcnt : 4; 765 uint64_t reserved_4_5 : 2; 766 uint64_t dippay : 1; 767 uint64_t dipcls : 1; 768 uint64_t prtnxa : 1; 769 uint64_t reserved_9_63 : 55; 770#endif 771 } s; 772 struct cvmx_spxx_err_ctl_s cn38xx; 773 struct cvmx_spxx_err_ctl_s cn38xxp2; 774 struct cvmx_spxx_err_ctl_s cn58xx; 775 struct cvmx_spxx_err_ctl_s cn58xxp1; 776}; 777typedef union cvmx_spxx_err_ctl cvmx_spxx_err_ctl_t; 778 779/** 780 * cvmx_spx#_int_dat 781 * 782 * SPX_INT_DAT - Interrupt Data Register 783 * 784 * 785 * Notes: 786 * Note: The SPX_INT_DAT[MUL] bit is set when multiple errors have been 787 * detected that would set any of the data fields: PRT, RSVOP, and CALBNK. 788 * 789 * The following errors will cause MUL to assert for PRT conflicts. 790 * - ABNORM 791 * - APERR 792 * - DPERR 793 * 794 * The following errors will cause MUL to assert for RSVOP conflicts. 795 * - RSVERR 796 * 797 * The following errors will cause MUL to assert for CALBNK conflicts. 798 * - CALERR 799 * 800 * The following errors will cause MUL to assert if multiple interrupts are 801 * asserted. 802 * - TPAOVR 803 * 804 * The MUL bit will be cleared once all outstanding errors have been 805 * cleared by software (not just MUL errors - all errors). 806 */ 807union cvmx_spxx_int_dat 808{ 809 uint64_t u64; 810 struct cvmx_spxx_int_dat_s 811 { 812#if __BYTE_ORDER == __BIG_ENDIAN 813 uint64_t reserved_32_63 : 32; 814 uint64_t mul : 1; /**< Multiple errors have occured */ 815 uint64_t reserved_14_30 : 17; 816 uint64_t calbnk : 2; /**< Spi4 Calendar table parity error bank */ 817 uint64_t rsvop : 4; /**< Spi4 reserved control word */ 818 uint64_t prt : 8; /**< Port associated with error */ 819#else 820 uint64_t prt : 8; 821 uint64_t rsvop : 4; 822 uint64_t calbnk : 2; 823 uint64_t reserved_14_30 : 17; 824 uint64_t mul : 1; 825 uint64_t reserved_32_63 : 32; 826#endif 827 } s; 828 struct cvmx_spxx_int_dat_s cn38xx; 829 struct cvmx_spxx_int_dat_s cn38xxp2; 830 struct cvmx_spxx_int_dat_s cn58xx; 831 struct cvmx_spxx_int_dat_s cn58xxp1; 832}; 833typedef union cvmx_spxx_int_dat cvmx_spxx_int_dat_t; 834 835/** 836 * cvmx_spx#_int_msk 837 * 838 * SPX_INT_MSK - Interrupt Mask Register 839 * 840 */ 841union cvmx_spxx_int_msk 842{ 843 uint64_t u64; 844 struct cvmx_spxx_int_msk_s 845 { 846#if __BYTE_ORDER == __BIG_ENDIAN 847 uint64_t reserved_12_63 : 52; 848 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ 849 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded 850 SPX_ERR_CTL[ERRCNT] */ 851 uint64_t diperr : 1; /**< Spi4 DIP4 error */ 852 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ 853 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ 854 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ 855 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ 856 uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */ 857 uint64_t reserved_2_3 : 2; 858 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ 859 uint64_t prtnxa : 1; /**< Port out of range */ 860#else 861 uint64_t prtnxa : 1; 862 uint64_t abnorm : 1; 863 uint64_t reserved_2_3 : 2; 864 uint64_t spiovr : 1; 865 uint64_t clserr : 1; 866 uint64_t drwnng : 1; 867 uint64_t rsverr : 1; 868 uint64_t tpaovr : 1; 869 uint64_t diperr : 1; 870 uint64_t syncerr : 1; 871 uint64_t calerr : 1; 872 uint64_t reserved_12_63 : 52; 873#endif 874 } s; 875 struct cvmx_spxx_int_msk_s cn38xx; 876 struct cvmx_spxx_int_msk_s cn38xxp2; 877 struct cvmx_spxx_int_msk_s cn58xx; 878 struct cvmx_spxx_int_msk_s cn58xxp1; 879}; 880typedef union cvmx_spxx_int_msk cvmx_spxx_int_msk_t; 881 882/** 883 * cvmx_spx#_int_reg 884 * 885 * SPX_INT_REG - Interrupt Register 886 * 887 * 888 * Notes: 889 * * PRTNXA 890 * This error indicates that the port on the Spi bus was not a valid port 891 * for the system. Spi4 accesses occur on payload control bit-times. The 892 * SRX can be configured with the exact number of ports available (by 893 * SRX_COM_CTL[PRTS] register). Any Spi access to anthing outside the range 894 * of 0 .. (SRX_COM_CTL[PRTS] - 1) is considered an error. The offending 895 * port is logged in SPX_INT_DAT[PRT] if there are no pending interrupts in 896 * SPX_INT_REG that require SPX_INT_DAT[PRT]. 897 * 898 * SRX will not drop the packet with the bogus port address. Instead, the 899 * port will be mapped into the supported port range. The remapped address 900 * in simply... 901 * 902 * Address = [ interfaceId, ADR[3:0] ] 903 * 904 * If the SPX detects that a PRTNXA error has occured, the packet will 905 * have its ERR bit set (or'ed in with the ERR bit from the transmitter) 906 * if the SPX_ERR_CTL[PRTNXA] bit is clear. 907 * 908 * In Spi4 mode, SPX will generate an interrupt for every 8B data burst 909 * associated with the invalid address. The SPX_INT_DAT[MUL] bit will never 910 * be set. 911 * 912 * * ABNORM 913 * This bit simply indicates that a given packet had abnormal terminiation. 914 * In Spi4 mode, this means that packet completed with an EOPS[1:0] code of 915 * 2'b01. This error can also be thought of as the application specific 916 * error (as mentioned in the Spi4 spec). The offending port is logged in 917 * SPX_INT_DAT[PRT] if there are no pending interrupts in SPX_INT_REG that 918 * require SPX_INT_DAT[PRT]. 919 * 920 * The ABNORM error is only raised when the ERR bit that comes from the 921 * Spi interface is set. It will never assert if any internal condition 922 * causes the ERR bit to assert (e.g. PRTNXA or DPERR). 923 * 924 * * SPIOVR 925 * This error indicates that the FIFOs that manage the async crossing from 926 * the Spi clocks to the core clock domains have overflowed. This is a 927 * fatal error and can cause much data/control corruption since ticks will 928 * be dropped and reordered. This is purely a function of clock ratios and 929 * correct system ratios should make this an impossible condition. 930 * 931 * * CLSERR 932 * This is a Spi4 error that indicates that a given data transfer burst 933 * that did not terminate with an EOP, did not end with the 16B alignment 934 * as per the Spi4 spec. The offending port cannot be logged since the 935 * block does not know the streamm terminated until the port switches. 936 * At that time, that packet has already been pushed down the pipe. 937 * 938 * The CLSERR bit does not actually check the Spi4 burst - just how data 939 * is accumulated for the downstream logic. Bursts that are separted by 940 * idles or training will still be merged into accumulated transfers and 941 * will not fire the CLSERR condition. The checker is really checking 942 * non-8B aligned, non-EOP data ticks that are sent downstream. These 943 * ticks are what will really mess up the core. 944 * 945 * This is an expensive fix, so we'll probably let it ride. We never 946 * claim to check Spi4 protocol anyway. 947 * 948 * * DRWNNG 949 * This error indicates that the Spi4 FIFO that services the GMX has 950 * overflowed. Like the SPIOVR error condition, correct system ratios 951 * should make this an impossible condition. 952 * 953 * * RSVERR 954 * This Spi4 error indicates that the Spi4 receiver has seen a reserve 955 * control packet. A reserve control packet is an invalid combiniation 956 * of bits on DAT[15:12]. Basically this is DAT[15] == 1'b0 and DAT[12] 957 * == 1'b1 (an SOP without a payload command). The RSVERR indicates an 958 * error has occured and SPX_INT_DAT[RSVOP] holds the first reserved 959 * opcode and will be set if there are no pending interrupts in 960 * SPX_INT_REG that require SPX_INT_DAT[RSVOP]. 961 * 962 * * TPAOVR 963 * This bit indicates that the TPA Watcher has flagged an event. See the 964 * TPA Watcher for a more detailed discussion. 965 * 966 * * DIPERR 967 * This bit indicates that the Spi4 receiver has encountered a DIP4 968 * miscompare on the datapath. A DIPERR can occur in an IDLE or a 969 * control word that frames a data burst. If the DIPERR occurs on a 970 * framing word there are three cases. 971 * 972 * 1) DIPERR occurs at the end of a data burst. The previous packet is 973 * marked with the ERR bit to be processed later if 974 * SPX_ERR_CTL[DIPCLS] is clear. 975 * 2) DIPERR occurs on a payload word. The subsequent packet is marked 976 * with the ERR bit to be processed later if SPX_ERR_CTL[DIPPAY] is 977 * clear. 978 * 3) DIPERR occurs on a control word that closes on packet and is a 979 * payload for another packet. In this case, both packets will have 980 * their ERR bit marked depending on the respective values of 981 * SPX_ERR_CTL[DIPCLS] and SPX_ERR_CTL[DIPPAY] as discussed above. 982 * 983 * * SYNCERR 984 * This bit indicates that the Spi4 receiver has encountered 985 * SPX_ERR_CTL[ERRCNT] consecutive Spi4 DIP4 errors and the interface 986 * should be synched. 987 * 988 * * CALERR 989 * This bit indicates that the Spi4 calendar table encountered a parity 990 * error. This error bit is associated with the calendar table on the RX 991 * interface - the interface that receives the Spi databus. Parity errors 992 * can occur during normal operation when the calendar table is constantly 993 * being read for the port information, or during initialization time, when 994 * the user has access. Since the calendar table is split into two banks, 995 * SPX_INT_DAT[CALBNK] indicates which banks have taken a parity error. 996 * CALBNK[1] indicates the error occured in the upper bank, while CALBNK[0] 997 * indicates that the error occured in the lower bank. SPX_INT_DAT[CALBNK] 998 * will be set if there are no pending interrupts in SPX_INT_REG that 999 * require SPX_INT_DAT[CALBNK]. 1000 * 1001 * * SPF 1002 * This bit indicates that a Spi fatal error has occurred. A fatal error 1003 * is defined as any error condition for which the corresponding 1004 * SPX_INT_SYNC bit is set. Therefore, conservative systems can halt the 1005 * interface on any error condition although this is not strictly 1006 * necessary. Some error are much more fatal in nature than others. 1007 * 1008 * PRTNXA, SPIOVR, CLSERR, DRWNNG, DIPERR, CALERR, and SYNCERR are examples 1009 * of fatal error for different reasons - usually because multiple port 1010 * streams could be effected. ABNORM, RSVERR, and TPAOVR are conditions 1011 * that are contained to a single packet which allows the interface to drop 1012 * a single packet and remain up and stable. 1013 */ 1014union cvmx_spxx_int_reg 1015{ 1016 uint64_t u64; 1017 struct cvmx_spxx_int_reg_s 1018 { 1019#if __BYTE_ORDER == __BIG_ENDIAN 1020 uint64_t reserved_32_63 : 32; 1021 uint64_t spf : 1; /**< Spi interface down */ 1022 uint64_t reserved_12_30 : 19; 1023 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ 1024 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded 1025 SPX_ERR_CTL[ERRCNT] */ 1026 uint64_t diperr : 1; /**< Spi4 DIP4 error */ 1027 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ 1028 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ 1029 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ 1030 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ 1031 uint64_t spiovr : 1; /**< Spi async FIFO overflow */ 1032 uint64_t reserved_2_3 : 2; 1033 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ 1034 uint64_t prtnxa : 1; /**< Port out of range */ 1035#else 1036 uint64_t prtnxa : 1; 1037 uint64_t abnorm : 1; 1038 uint64_t reserved_2_3 : 2; 1039 uint64_t spiovr : 1; 1040 uint64_t clserr : 1; 1041 uint64_t drwnng : 1; 1042 uint64_t rsverr : 1; 1043 uint64_t tpaovr : 1; 1044 uint64_t diperr : 1; 1045 uint64_t syncerr : 1; 1046 uint64_t calerr : 1; 1047 uint64_t reserved_12_30 : 19; 1048 uint64_t spf : 1; 1049 uint64_t reserved_32_63 : 32; 1050#endif 1051 } s; 1052 struct cvmx_spxx_int_reg_s cn38xx; 1053 struct cvmx_spxx_int_reg_s cn38xxp2; 1054 struct cvmx_spxx_int_reg_s cn58xx; 1055 struct cvmx_spxx_int_reg_s cn58xxp1; 1056}; 1057typedef union cvmx_spxx_int_reg cvmx_spxx_int_reg_t; 1058 1059/** 1060 * cvmx_spx#_int_sync 1061 * 1062 * SPX_INT_SYNC - Interrupt Sync Register 1063 * 1064 * 1065 * Notes: 1066 * This mask set indicates which exception condition should cause the 1067 * SPX_INT_REG[SPF] bit to assert 1068 * 1069 * It is recommended that software set the PRTNXA, SPIOVR, CLSERR, DRWNNG, 1070 * DIPERR, CALERR, and SYNCERR errors as synchronization events. Software is 1071 * free to synchronize the bus on other conditions, but this is the minimum 1072 * recommended set. 1073 */ 1074union cvmx_spxx_int_sync 1075{ 1076 uint64_t u64; 1077 struct cvmx_spxx_int_sync_s 1078 { 1079#if __BYTE_ORDER == __BIG_ENDIAN 1080 uint64_t reserved_12_63 : 52; 1081 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ 1082 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded 1083 SPX_ERR_CTL[ERRCNT] */ 1084 uint64_t diperr : 1; /**< Spi4 DIP4 error */ 1085 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ 1086 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ 1087 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ 1088 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ 1089 uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */ 1090 uint64_t reserved_2_3 : 2; 1091 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ 1092 uint64_t prtnxa : 1; /**< Port out of range */ 1093#else 1094 uint64_t prtnxa : 1; 1095 uint64_t abnorm : 1; 1096 uint64_t reserved_2_3 : 2; 1097 uint64_t spiovr : 1; 1098 uint64_t clserr : 1; 1099 uint64_t drwnng : 1; 1100 uint64_t rsverr : 1; 1101 uint64_t tpaovr : 1; 1102 uint64_t diperr : 1; 1103 uint64_t syncerr : 1; 1104 uint64_t calerr : 1; 1105 uint64_t reserved_12_63 : 52; 1106#endif 1107 } s; 1108 struct cvmx_spxx_int_sync_s cn38xx; 1109 struct cvmx_spxx_int_sync_s cn38xxp2; 1110 struct cvmx_spxx_int_sync_s cn58xx; 1111 struct cvmx_spxx_int_sync_s cn58xxp1; 1112}; 1113typedef union cvmx_spxx_int_sync cvmx_spxx_int_sync_t; 1114 1115/** 1116 * cvmx_spx#_tpa_acc 1117 * 1118 * SPX_TPA_ACC - TPA watcher byte accumulator 1119 * 1120 * 1121 * Notes: 1122 * This field allows the user to access the TPA watcher accumulator counter. 1123 * This register reflects the number of bytes sent to IMX once the port 1124 * specified by SPX_TPA_SEL[PRTSEL] has lost its TPA. The SPX_INT_REG[TPAOVR] 1125 * bit is asserted when CNT >= SPX_TPA_MAX[MAX]. The CNT will continue to 1126 * increment until the TPA for the port is asserted. At that point the CNT 1127 * value is frozen until software clears the interrupt bit. 1128 */ 1129union cvmx_spxx_tpa_acc 1130{ 1131 uint64_t u64; 1132 struct cvmx_spxx_tpa_acc_s 1133 { 1134#if __BYTE_ORDER == __BIG_ENDIAN 1135 uint64_t reserved_32_63 : 32; 1136 uint64_t cnt : 32; /**< TPA watcher accumulate count */ 1137#else 1138 uint64_t cnt : 32; 1139 uint64_t reserved_32_63 : 32; 1140#endif 1141 } s; 1142 struct cvmx_spxx_tpa_acc_s cn38xx; 1143 struct cvmx_spxx_tpa_acc_s cn38xxp2; 1144 struct cvmx_spxx_tpa_acc_s cn58xx; 1145 struct cvmx_spxx_tpa_acc_s cn58xxp1; 1146}; 1147typedef union cvmx_spxx_tpa_acc cvmx_spxx_tpa_acc_t; 1148 1149/** 1150 * cvmx_spx#_tpa_max 1151 * 1152 * SPX_TPA_MAX - TPA watcher assertion threshold 1153 * 1154 * 1155 * Notes: 1156 * The TPA watcher has the ability to notify the system with an interrupt when 1157 * too much data has been received on loss of TPA. The user sets the 1158 * SPX_TPA_MAX[MAX] register and when the watcher has accumulated that many 1159 * ticks, then the interrupt is conditionally raised (based on interrupt mask 1160 * bits). This feature will be disabled if the programmed count is zero. 1161 */ 1162union cvmx_spxx_tpa_max 1163{ 1164 uint64_t u64; 1165 struct cvmx_spxx_tpa_max_s 1166 { 1167#if __BYTE_ORDER == __BIG_ENDIAN 1168 uint64_t reserved_32_63 : 32; 1169 uint64_t max : 32; /**< TPA watcher TPA threshold */ 1170#else 1171 uint64_t max : 32; 1172 uint64_t reserved_32_63 : 32; 1173#endif 1174 } s; 1175 struct cvmx_spxx_tpa_max_s cn38xx; 1176 struct cvmx_spxx_tpa_max_s cn38xxp2; 1177 struct cvmx_spxx_tpa_max_s cn58xx; 1178 struct cvmx_spxx_tpa_max_s cn58xxp1; 1179}; 1180typedef union cvmx_spxx_tpa_max cvmx_spxx_tpa_max_t; 1181 1182/** 1183 * cvmx_spx#_tpa_sel 1184 * 1185 * SPX_TPA_SEL - TPA watcher port selector 1186 * 1187 * 1188 * Notes: 1189 * The TPA Watcher is primarily a debug vehicle used to help initial bringup 1190 * of a system. The TPA watcher counts bytes that roll in from the Spi 1191 * interface. The user programs the Spi port to watch using 1192 * SPX_TPA_SEL[PRTSEL]. Once the TPA is deasserted for that port, the watcher 1193 * begins to count the data ticks that have been delivered to the inbound 1194 * datapath (and eventually to the IOB). The result is that we can derive 1195 * turn-around times of the other device by watching how much data was sent 1196 * after a loss of TPA through the SPX_TPA_ACC[CNT] register. An optional 1197 * interrupt may be raised as well. See SPX_TPA_MAX for further information. 1198 * 1199 * TPA's can be deasserted for a number of reasons... 1200 * 1201 * 1) IPD indicates backpressure 1202 * 2) The GMX inbound FIFO is filling up and should BP 1203 * 3) User has out an override on the TPA wires 1204 */ 1205union cvmx_spxx_tpa_sel 1206{ 1207 uint64_t u64; 1208 struct cvmx_spxx_tpa_sel_s 1209 { 1210#if __BYTE_ORDER == __BIG_ENDIAN 1211 uint64_t reserved_4_63 : 60; 1212 uint64_t prtsel : 4; /**< TPA watcher port select */ 1213#else 1214 uint64_t prtsel : 4; 1215 uint64_t reserved_4_63 : 60; 1216#endif 1217 } s; 1218 struct cvmx_spxx_tpa_sel_s cn38xx; 1219 struct cvmx_spxx_tpa_sel_s cn38xxp2; 1220 struct cvmx_spxx_tpa_sel_s cn58xx; 1221 struct cvmx_spxx_tpa_sel_s cn58xxp1; 1222}; 1223typedef union cvmx_spxx_tpa_sel cvmx_spxx_tpa_sel_t; 1224 1225/** 1226 * cvmx_spx#_trn4_ctl 1227 * 1228 * Notes: 1229 * These bits are controls for the Spi4 RX bit deskew logic. See that Spec 1230 * for further details. 1231 * 1232 * * BOOT_BIT 1233 * On the initial training synchronization sequence, the hardware has the 1234 * BOOT_BIT set which means that it will continueously perform macro 1235 * operations. Once the BOOT_BIT is cleared, the macro machine will finish 1236 * the macro operation is working on and then return to the idle state. 1237 * Subsequent training sequences will only go through a single macro 1238 * operation in order to do slight deskews. 1239 * 1240 * * JITTER 1241 * Minimum value is 1. This parameter must be set for Spi4 mode using 1242 * auto-bit deskew. Regardless of the original intent, this field must be 1243 * set non-zero for deskew to function correctly. 1244 * 1245 * The thought is the JITTER range is no longer required since the macro 1246 * machine was enhanced to understand about edge direction. Originally 1247 * these bits were intended to compensate for clock jitter. 1248 * 1249 * dly: this is the intrinsic delay of each delay element 1250 * tap currently, it is 70ps-110ps. 1251 * jitter: amount of jitter we expect in the system (~200ps) 1252 * j: number of taps to account for jitter 1253 * 1254 * j = ((jitter / dly) + 1) 1255 * 1256 * * TRNTEST 1257 * This mode is used to test systems to make sure that the bit deskew 1258 * parameters have been correctly setup. After configuration, software can 1259 * set the TRNTEST mode bit. This should be done before SRX_COM_CTL[ST_EN] 1260 * is set such that we can be sure that the TX device is simply sending 1261 * continuous training patterns. 1262 * 1263 * The test mode samples every incoming bit-time and makes sure that it is 1264 * either a training control or a training data packet. If any other data 1265 * is observed, then SPX_DBG_DESKEW_STATE[TESTRES] will assert signaling a 1266 * test failure. 1267 * 1268 * Software must clear TRNTEST before training is terminated. 1269 * 1270 * * Example Spi4 RX init flow... 1271 * 1272 * 1) set the CLKDLY lines (SPXX_CLK_CTL[CLKDLY]) 1273 * - these bits must be set before the DLL can successfully lock 1274 * 1275 * 2) set the SRXDLCK (SPXX_CLK_CTL[SRXDLCK]) 1276 * - this is the DLL lock bit which also acts as a block reset 1277 * 1278 * 3) wait for the DLLs lock 1279 * 1280 * 4) set any desired fields in SPXX_DBG_DESKEW_CTL 1281 * - This register has only one field that most users will care about. 1282 * When set, DLLDIS will disable sending update pulses to the Spi4 RX 1283 * DLLs. This pulse allows the DLL to adjust to clock variations over 1284 * time. In general, it is desired behavior. 1285 * 1286 * 5) set fields in SPXX_TRN4_CTL 1287 * - These fields deal with the MUX training sequence 1288 * * MUX_EN 1289 * This is the enable bit for the mux select. The MUX select will 1290 * run in the training sequence between the DLL and the Macro 1291 * sequence when enabled. Once the MUX selects are selected, the 1292 * entire macro sequence must be rerun. The expectation is that 1293 * this is only run at boot time and this is bit cleared at/around 1294 * step \#8. 1295 * - These fields deal with the Macro training sequence 1296 * * MACRO_EN 1297 * This is the enable bit for the macro sequence. Macro sequences 1298 * will run after the DLL and MUX training sequences. Each macro 1299 * sequence can move the offset by one value. 1300 * * MAXDIST 1301 * This is how far we will search for an edge. Example... 1302 * 1303 * dly: this is the intrinsic delay of each delay element 1304 * tap currently, it is 70ps-110ps. 1305 * U: bit time period in time units. 1306 * 1307 * MAXDIST = MIN(16, ((bit_time / 2) / dly) 1308 * 1309 * Each MAXDIST iteration consists of an edge detect in the early 1310 * and late (+/-) directions in an attempt to center the data. This 1311 * requires two training transistions, the control/data and 1312 * data/control transistions which comprise a training sequence. 1313 * Therefore, the number of training sequences required for a single 1314 * macro operation is simply MAXDIST. 1315 * 1316 * 6) set the RCVTRN go bit (SPXX_CLK_CTL[RCVTRN]) 1317 * - this bit synchs on the first valid complete training cycle and 1318 * starts to process the training packets 1319 * 1320 * 6b) This is where software could manually set the controls as opposed to 1321 * letting the hardware do it. See the SPXX_DBG_DESKEW_CTL register 1322 * description for more detail. 1323 * 1324 * 7) the TX device must continue to send training packets for the initial 1325 * time period. 1326 * - this can be determined by... 1327 * 1328 * DLL: one training sequence for the DLL adjustment (regardless of enable/disable) 1329 * MUX: one training sequence for the Flop MUX taps (regardless of enable/disable) 1330 * INIT_SEQUENCES: max number of taps that we must move 1331 * 1332 * INIT_SEQUENCES = MIN(16, ((bit_time / 2) / dly)) 1333 * 1334 * INIT_TRN = DLL + MUX + ROUNDUP((INIT_SEQUENCES * (MAXDIST + 2))) 1335 * 1336 * 1337 * - software can either wait a fixed amount of time based on the clock 1338 * frequencies or poll the SPXX_CLK_STAT[SRXTRN] register. Each 1339 * assertion of SRXTRN means that at least one training sequence has 1340 * been received. Software can poll, clear, and repeat on this bit to 1341 * eventually count all required transistions. 1342 * 1343 * int cnt = 0; 1344 * while (cnt < INIT_TRN) [ 1345 * if (SPXX_CLK_STAT[SRXTRN]) [ 1346 * cnt++; 1347 * SPXX_CLK_STAT[SRXTRN] = 0; 1348 * ] 1349 * ] 1350 * 1351 * - subsequent training sequences will normally move the taps only 1352 * one position, so the ALPHA equation becomes... 1353 * 1354 * MAC = (MAXDIST == 0) ? 1 : ROUNDUP((1 * (MAXDIST + 2))) + 1 1355 * 1356 * ALPHA = DLL + MUX + MAC 1357 * 1358 * ergo, MAXDIST simplifies to... 1359 * 1360 * ALPHA = (MAXDIST == 0) ? 3 : MAXDIST + 5 1361 * 1362 * DLL and MUX and MAC will always require at least a training sequence 1363 * each - even if disabled. If the macro sequence is enabled, an 1364 * additional training sequenece at the end is necessary. The extra 1365 * sequence allows for all training state to be cleared before resuming 1366 * normal operation. 1367 * 1368 * 8) after the recevier gets enough training sequences in order to achieve 1369 * deskew lock, set SPXX_TRN4_CTL[CLR_BOOT] 1370 * - this disables the continuous macro sequences and puts into into one 1371 * macro sequnence per training operation 1372 * - optionally, the machine can choose to fall out of training if 1373 * enough NOPs follow the training operation (require at least 32 NOPs 1374 * to follow the training sequence). 1375 * 1376 * There must be at least MAXDIST + 3 training sequences after the 1377 * SPXX_TRN4_CTL[CLR_BOOT] is set or sufficient NOPs from the TX device. 1378 * 1379 * 9) the TX device continues to send training sequences until the RX 1380 * device sends a calendar transistion. This is controlled by 1381 * SRXX_COM_CTL[ST_EN]. Other restrictions require other Spi parameters 1382 * (e.g. the calendar table) to be setup before this bit can be enabled. 1383 * Once the entire interface is properly programmed, software writes 1384 * SRXX_COM_CTL[INF_EN]. At this point, the Spi4 packets will begin to 1385 * be sent into the N2K core and processed by the chip. 1386 */ 1387union cvmx_spxx_trn4_ctl 1388{ 1389 uint64_t u64; 1390 struct cvmx_spxx_trn4_ctl_s 1391 { 1392#if __BYTE_ORDER == __BIG_ENDIAN 1393 uint64_t reserved_13_63 : 51; 1394 uint64_t trntest : 1; /**< Training Test Mode 1395 This bit is only for initial bringup 1396 (spx_csr__spi4_trn_test_mode) */ 1397 uint64_t jitter : 3; /**< Accounts for jitter when the macro sequence is 1398 locking. The value is how many consecutive 1399 transititions before declaring en edge. Minimum 1400 value is 1. This parameter must be set for Spi4 1401 mode using auto-bit deskew. 1402 (spx_csr__spi4_mac_jitter) */ 1403 uint64_t clr_boot : 1; /**< Clear the macro boot sequence mode bit 1404 (spx_csr__spi4_mac_clr_boot) */ 1405 uint64_t set_boot : 1; /**< Enable the macro boot sequence mode bit 1406 (spx_csr__spi4_mac_set_boot) */ 1407 uint64_t maxdist : 5; /**< This field defines how far from center the 1408 deskew logic will search in a single macro 1409 sequence (spx_csr__spi4_mac_iters) */ 1410 uint64_t macro_en : 1; /**< Allow the macro sequence to center the sample 1411 point in the data window through hardware 1412 (spx_csr__spi4_mac_trn_en) */ 1413 uint64_t mux_en : 1; /**< Enable the hardware machine that selects the 1414 proper coarse FLOP selects 1415 (spx_csr__spi4_mux_trn_en) */ 1416#else 1417 uint64_t mux_en : 1; 1418 uint64_t macro_en : 1; 1419 uint64_t maxdist : 5; 1420 uint64_t set_boot : 1; 1421 uint64_t clr_boot : 1; 1422 uint64_t jitter : 3; 1423 uint64_t trntest : 1; 1424 uint64_t reserved_13_63 : 51; 1425#endif 1426 } s; 1427 struct cvmx_spxx_trn4_ctl_s cn38xx; 1428 struct cvmx_spxx_trn4_ctl_s cn38xxp2; 1429 struct cvmx_spxx_trn4_ctl_s cn58xx; 1430 struct cvmx_spxx_trn4_ctl_s cn58xxp1; 1431}; 1432typedef union cvmx_spxx_trn4_ctl cvmx_spxx_trn4_ctl_t; 1433 1434#endif 1435