1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-l2c-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon l2c. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_L2C_TYPEDEFS_H__ 53#define __CVMX_L2C_TYPEDEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56#define CVMX_L2C_BIG_CTL CVMX_L2C_BIG_CTL_FUNC() 57static inline uint64_t CVMX_L2C_BIG_CTL_FUNC(void) 58{ 59 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 60 cvmx_warn("CVMX_L2C_BIG_CTL not supported on this chip\n"); 61 return CVMX_ADD_IO_SEG(0x0001180080800030ull); 62} 63#else 64#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull)) 65#endif 66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 67#define CVMX_L2C_BST CVMX_L2C_BST_FUNC() 68static inline uint64_t CVMX_L2C_BST_FUNC(void) 69{ 70 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 71 cvmx_warn("CVMX_L2C_BST not supported on this chip\n"); 72 return CVMX_ADD_IO_SEG(0x00011800808007F8ull); 73} 74#else 75#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull)) 76#endif 77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 78#define CVMX_L2C_BST0 CVMX_L2C_BST0_FUNC() 79static inline uint64_t CVMX_L2C_BST0_FUNC(void) 80{ 81 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 82 cvmx_warn("CVMX_L2C_BST0 not supported on this chip\n"); 83 return CVMX_ADD_IO_SEG(0x00011800800007F8ull); 84} 85#else 86#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull)) 87#endif 88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 89#define CVMX_L2C_BST1 CVMX_L2C_BST1_FUNC() 90static inline uint64_t CVMX_L2C_BST1_FUNC(void) 91{ 92 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 93 cvmx_warn("CVMX_L2C_BST1 not supported on this chip\n"); 94 return CVMX_ADD_IO_SEG(0x00011800800007F0ull); 95} 96#else 97#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull)) 98#endif 99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 100#define CVMX_L2C_BST2 CVMX_L2C_BST2_FUNC() 101static inline uint64_t CVMX_L2C_BST2_FUNC(void) 102{ 103 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 104 cvmx_warn("CVMX_L2C_BST2 not supported on this chip\n"); 105 return CVMX_ADD_IO_SEG(0x00011800800007E8ull); 106} 107#else 108#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull)) 109#endif 110#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 111static inline uint64_t CVMX_L2C_BST_MEMX(unsigned long block_id) 112{ 113 if (!( 114 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 115 cvmx_warn("CVMX_L2C_BST_MEMX(%lu) is invalid on this chip\n", block_id); 116 return CVMX_ADD_IO_SEG(0x0001180080C007F8ull); 117} 118#else 119#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull)) 120#endif 121#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 122static inline uint64_t CVMX_L2C_BST_TDTX(unsigned long block_id) 123{ 124 if (!( 125 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 126 cvmx_warn("CVMX_L2C_BST_TDTX(%lu) is invalid on this chip\n", block_id); 127 return CVMX_ADD_IO_SEG(0x0001180080A007F0ull); 128} 129#else 130#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull)) 131#endif 132#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 133static inline uint64_t CVMX_L2C_BST_TTGX(unsigned long block_id) 134{ 135 if (!( 136 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 137 cvmx_warn("CVMX_L2C_BST_TTGX(%lu) is invalid on this chip\n", block_id); 138 return CVMX_ADD_IO_SEG(0x0001180080A007F8ull); 139} 140#else 141#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull)) 142#endif 143#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 144#define CVMX_L2C_CFG CVMX_L2C_CFG_FUNC() 145static inline uint64_t CVMX_L2C_CFG_FUNC(void) 146{ 147 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 148 cvmx_warn("CVMX_L2C_CFG not supported on this chip\n"); 149 return CVMX_ADD_IO_SEG(0x0001180080000000ull); 150} 151#else 152#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) 153#endif 154#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 155static inline uint64_t CVMX_L2C_COP0_MAPX(unsigned long offset) 156{ 157 if (!( 158 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535) || ((offset >= 16128) && (offset <= 16383)))))) 159 cvmx_warn("CVMX_L2C_COP0_MAPX(%lu) is invalid on this chip\n", offset); 160 return CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8; 161} 162#else 163#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8) 164#endif 165#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 166#define CVMX_L2C_CTL CVMX_L2C_CTL_FUNC() 167static inline uint64_t CVMX_L2C_CTL_FUNC(void) 168{ 169 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 170 cvmx_warn("CVMX_L2C_CTL not supported on this chip\n"); 171 return CVMX_ADD_IO_SEG(0x0001180080800000ull); 172} 173#else 174#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) 175#endif 176#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 177#define CVMX_L2C_DBG CVMX_L2C_DBG_FUNC() 178static inline uint64_t CVMX_L2C_DBG_FUNC(void) 179{ 180 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 181 cvmx_warn("CVMX_L2C_DBG not supported on this chip\n"); 182 return CVMX_ADD_IO_SEG(0x0001180080000030ull); 183} 184#else 185#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) 186#endif 187#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 188#define CVMX_L2C_DUT CVMX_L2C_DUT_FUNC() 189static inline uint64_t CVMX_L2C_DUT_FUNC(void) 190{ 191 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 192 cvmx_warn("CVMX_L2C_DUT not supported on this chip\n"); 193 return CVMX_ADD_IO_SEG(0x0001180080000050ull); 194} 195#else 196#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull)) 197#endif 198#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 199static inline uint64_t CVMX_L2C_DUT_MAPX(unsigned long offset) 200{ 201 if (!( 202 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535))))) 203 cvmx_warn("CVMX_L2C_DUT_MAPX(%lu) is invalid on this chip\n", offset); 204 return CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 2047) * 8; 205} 206#else 207#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 2047) * 8) 208#endif 209#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 210static inline uint64_t CVMX_L2C_ERR_TDTX(unsigned long block_id) 211{ 212 if (!( 213 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 214 cvmx_warn("CVMX_L2C_ERR_TDTX(%lu) is invalid on this chip\n", block_id); 215 return CVMX_ADD_IO_SEG(0x0001180080A007E0ull); 216} 217#else 218#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull)) 219#endif 220#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 221static inline uint64_t CVMX_L2C_ERR_TTGX(unsigned long block_id) 222{ 223 if (!( 224 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 225 cvmx_warn("CVMX_L2C_ERR_TTGX(%lu) is invalid on this chip\n", block_id); 226 return CVMX_ADD_IO_SEG(0x0001180080A007E8ull); 227} 228#else 229#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull)) 230#endif 231#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 232static inline uint64_t CVMX_L2C_ERR_VBFX(unsigned long block_id) 233{ 234 if (!( 235 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 236 cvmx_warn("CVMX_L2C_ERR_VBFX(%lu) is invalid on this chip\n", block_id); 237 return CVMX_ADD_IO_SEG(0x0001180080C007F0ull); 238} 239#else 240#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull)) 241#endif 242#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 243#define CVMX_L2C_ERR_XMC CVMX_L2C_ERR_XMC_FUNC() 244static inline uint64_t CVMX_L2C_ERR_XMC_FUNC(void) 245{ 246 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 247 cvmx_warn("CVMX_L2C_ERR_XMC not supported on this chip\n"); 248 return CVMX_ADD_IO_SEG(0x00011800808007D8ull); 249} 250#else 251#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull)) 252#endif 253#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 254#define CVMX_L2C_GRPWRR0 CVMX_L2C_GRPWRR0_FUNC() 255static inline uint64_t CVMX_L2C_GRPWRR0_FUNC(void) 256{ 257 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 258 cvmx_warn("CVMX_L2C_GRPWRR0 not supported on this chip\n"); 259 return CVMX_ADD_IO_SEG(0x00011800800000C8ull); 260} 261#else 262#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull)) 263#endif 264#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 265#define CVMX_L2C_GRPWRR1 CVMX_L2C_GRPWRR1_FUNC() 266static inline uint64_t CVMX_L2C_GRPWRR1_FUNC(void) 267{ 268 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 269 cvmx_warn("CVMX_L2C_GRPWRR1 not supported on this chip\n"); 270 return CVMX_ADD_IO_SEG(0x00011800800000D0ull); 271} 272#else 273#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull)) 274#endif 275#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 276#define CVMX_L2C_INT_EN CVMX_L2C_INT_EN_FUNC() 277static inline uint64_t CVMX_L2C_INT_EN_FUNC(void) 278{ 279 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 280 cvmx_warn("CVMX_L2C_INT_EN not supported on this chip\n"); 281 return CVMX_ADD_IO_SEG(0x0001180080000100ull); 282} 283#else 284#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull)) 285#endif 286#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 287#define CVMX_L2C_INT_ENA CVMX_L2C_INT_ENA_FUNC() 288static inline uint64_t CVMX_L2C_INT_ENA_FUNC(void) 289{ 290 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 291 cvmx_warn("CVMX_L2C_INT_ENA not supported on this chip\n"); 292 return CVMX_ADD_IO_SEG(0x0001180080800020ull); 293} 294#else 295#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull)) 296#endif 297#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 298#define CVMX_L2C_INT_REG CVMX_L2C_INT_REG_FUNC() 299static inline uint64_t CVMX_L2C_INT_REG_FUNC(void) 300{ 301 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 302 cvmx_warn("CVMX_L2C_INT_REG not supported on this chip\n"); 303 return CVMX_ADD_IO_SEG(0x0001180080800018ull); 304} 305#else 306#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull)) 307#endif 308#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 309#define CVMX_L2C_INT_STAT CVMX_L2C_INT_STAT_FUNC() 310static inline uint64_t CVMX_L2C_INT_STAT_FUNC(void) 311{ 312 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 313 cvmx_warn("CVMX_L2C_INT_STAT not supported on this chip\n"); 314 return CVMX_ADD_IO_SEG(0x00011800800000F8ull); 315} 316#else 317#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull)) 318#endif 319#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 320static inline uint64_t CVMX_L2C_IOCX_PFC(unsigned long block_id) 321{ 322 if (!( 323 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 324 cvmx_warn("CVMX_L2C_IOCX_PFC(%lu) is invalid on this chip\n", block_id); 325 return CVMX_ADD_IO_SEG(0x0001180080800420ull); 326} 327#else 328#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull)) 329#endif 330#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 331static inline uint64_t CVMX_L2C_IORX_PFC(unsigned long block_id) 332{ 333 if (!( 334 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 335 cvmx_warn("CVMX_L2C_IORX_PFC(%lu) is invalid on this chip\n", block_id); 336 return CVMX_ADD_IO_SEG(0x0001180080800428ull); 337} 338#else 339#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull)) 340#endif 341#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 342#define CVMX_L2C_LCKBASE CVMX_L2C_LCKBASE_FUNC() 343static inline uint64_t CVMX_L2C_LCKBASE_FUNC(void) 344{ 345 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 346 cvmx_warn("CVMX_L2C_LCKBASE not supported on this chip\n"); 347 return CVMX_ADD_IO_SEG(0x0001180080000058ull); 348} 349#else 350#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) 351#endif 352#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 353#define CVMX_L2C_LCKOFF CVMX_L2C_LCKOFF_FUNC() 354static inline uint64_t CVMX_L2C_LCKOFF_FUNC(void) 355{ 356 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 357 cvmx_warn("CVMX_L2C_LCKOFF not supported on this chip\n"); 358 return CVMX_ADD_IO_SEG(0x0001180080000060ull); 359} 360#else 361#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) 362#endif 363#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 364#define CVMX_L2C_LFB0 CVMX_L2C_LFB0_FUNC() 365static inline uint64_t CVMX_L2C_LFB0_FUNC(void) 366{ 367 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 368 cvmx_warn("CVMX_L2C_LFB0 not supported on this chip\n"); 369 return CVMX_ADD_IO_SEG(0x0001180080000038ull); 370} 371#else 372#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull)) 373#endif 374#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 375#define CVMX_L2C_LFB1 CVMX_L2C_LFB1_FUNC() 376static inline uint64_t CVMX_L2C_LFB1_FUNC(void) 377{ 378 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 379 cvmx_warn("CVMX_L2C_LFB1 not supported on this chip\n"); 380 return CVMX_ADD_IO_SEG(0x0001180080000040ull); 381} 382#else 383#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull)) 384#endif 385#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 386#define CVMX_L2C_LFB2 CVMX_L2C_LFB2_FUNC() 387static inline uint64_t CVMX_L2C_LFB2_FUNC(void) 388{ 389 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 390 cvmx_warn("CVMX_L2C_LFB2 not supported on this chip\n"); 391 return CVMX_ADD_IO_SEG(0x0001180080000048ull); 392} 393#else 394#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull)) 395#endif 396#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 397#define CVMX_L2C_LFB3 CVMX_L2C_LFB3_FUNC() 398static inline uint64_t CVMX_L2C_LFB3_FUNC(void) 399{ 400 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 401 cvmx_warn("CVMX_L2C_LFB3 not supported on this chip\n"); 402 return CVMX_ADD_IO_SEG(0x00011800800000B8ull); 403} 404#else 405#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull)) 406#endif 407#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 408#define CVMX_L2C_OOB CVMX_L2C_OOB_FUNC() 409static inline uint64_t CVMX_L2C_OOB_FUNC(void) 410{ 411 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 412 cvmx_warn("CVMX_L2C_OOB not supported on this chip\n"); 413 return CVMX_ADD_IO_SEG(0x00011800800000D8ull); 414} 415#else 416#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull)) 417#endif 418#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 419#define CVMX_L2C_OOB1 CVMX_L2C_OOB1_FUNC() 420static inline uint64_t CVMX_L2C_OOB1_FUNC(void) 421{ 422 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 423 cvmx_warn("CVMX_L2C_OOB1 not supported on this chip\n"); 424 return CVMX_ADD_IO_SEG(0x00011800800000E0ull); 425} 426#else 427#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull)) 428#endif 429#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 430#define CVMX_L2C_OOB2 CVMX_L2C_OOB2_FUNC() 431static inline uint64_t CVMX_L2C_OOB2_FUNC(void) 432{ 433 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 434 cvmx_warn("CVMX_L2C_OOB2 not supported on this chip\n"); 435 return CVMX_ADD_IO_SEG(0x00011800800000E8ull); 436} 437#else 438#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull)) 439#endif 440#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 441#define CVMX_L2C_OOB3 CVMX_L2C_OOB3_FUNC() 442static inline uint64_t CVMX_L2C_OOB3_FUNC(void) 443{ 444 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 445 cvmx_warn("CVMX_L2C_OOB3 not supported on this chip\n"); 446 return CVMX_ADD_IO_SEG(0x00011800800000F0ull); 447} 448#else 449#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull)) 450#endif 451#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0) 452#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1) 453#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2) 454#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3) 455#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 456#define CVMX_L2C_PFCTL CVMX_L2C_PFCTL_FUNC() 457static inline uint64_t CVMX_L2C_PFCTL_FUNC(void) 458{ 459 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 460 cvmx_warn("CVMX_L2C_PFCTL not supported on this chip\n"); 461 return CVMX_ADD_IO_SEG(0x0001180080000090ull); 462} 463#else 464#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) 465#endif 466#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 467static inline uint64_t CVMX_L2C_PFCX(unsigned long offset) 468{ 469 if (!( 470 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) || 471 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) || 472 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) || 473 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) || 474 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) || 475 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) || 476 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))))) 477 cvmx_warn("CVMX_L2C_PFCX(%lu) is invalid on this chip\n", offset); 478 return CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8; 479} 480#else 481#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8) 482#endif 483#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 484#define CVMX_L2C_PPGRP CVMX_L2C_PPGRP_FUNC() 485static inline uint64_t CVMX_L2C_PPGRP_FUNC(void) 486{ 487 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 488 cvmx_warn("CVMX_L2C_PPGRP not supported on this chip\n"); 489 return CVMX_ADD_IO_SEG(0x00011800800000C0ull); 490} 491#else 492#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull)) 493#endif 494#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 495static inline uint64_t CVMX_L2C_QOS_IOBX(unsigned long block_id) 496{ 497 if (!( 498 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 499 cvmx_warn("CVMX_L2C_QOS_IOBX(%lu) is invalid on this chip\n", block_id); 500 return CVMX_ADD_IO_SEG(0x0001180080880200ull); 501} 502#else 503#define CVMX_L2C_QOS_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080880200ull)) 504#endif 505#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 506static inline uint64_t CVMX_L2C_QOS_PPX(unsigned long offset) 507{ 508 if (!( 509 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))))) 510 cvmx_warn("CVMX_L2C_QOS_PPX(%lu) is invalid on this chip\n", offset); 511 return CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 7) * 8; 512} 513#else 514#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 7) * 8) 515#endif 516#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 517#define CVMX_L2C_QOS_WGT CVMX_L2C_QOS_WGT_FUNC() 518static inline uint64_t CVMX_L2C_QOS_WGT_FUNC(void) 519{ 520 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 521 cvmx_warn("CVMX_L2C_QOS_WGT not supported on this chip\n"); 522 return CVMX_ADD_IO_SEG(0x0001180080800008ull); 523} 524#else 525#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull)) 526#endif 527#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 528static inline uint64_t CVMX_L2C_RSCX_PFC(unsigned long block_id) 529{ 530 if (!( 531 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 532 cvmx_warn("CVMX_L2C_RSCX_PFC(%lu) is invalid on this chip\n", block_id); 533 return CVMX_ADD_IO_SEG(0x0001180080800410ull); 534} 535#else 536#define CVMX_L2C_RSCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800410ull)) 537#endif 538#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 539static inline uint64_t CVMX_L2C_RSDX_PFC(unsigned long block_id) 540{ 541 if (!( 542 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 543 cvmx_warn("CVMX_L2C_RSDX_PFC(%lu) is invalid on this chip\n", block_id); 544 return CVMX_ADD_IO_SEG(0x0001180080800418ull); 545} 546#else 547#define CVMX_L2C_RSDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800418ull)) 548#endif 549#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 550#define CVMX_L2C_SPAR0 CVMX_L2C_SPAR0_FUNC() 551static inline uint64_t CVMX_L2C_SPAR0_FUNC(void) 552{ 553 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 554 cvmx_warn("CVMX_L2C_SPAR0 not supported on this chip\n"); 555 return CVMX_ADD_IO_SEG(0x0001180080000068ull); 556} 557#else 558#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull)) 559#endif 560#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 561#define CVMX_L2C_SPAR1 CVMX_L2C_SPAR1_FUNC() 562static inline uint64_t CVMX_L2C_SPAR1_FUNC(void) 563{ 564 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 565 cvmx_warn("CVMX_L2C_SPAR1 not supported on this chip\n"); 566 return CVMX_ADD_IO_SEG(0x0001180080000070ull); 567} 568#else 569#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull)) 570#endif 571#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 572#define CVMX_L2C_SPAR2 CVMX_L2C_SPAR2_FUNC() 573static inline uint64_t CVMX_L2C_SPAR2_FUNC(void) 574{ 575 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 576 cvmx_warn("CVMX_L2C_SPAR2 not supported on this chip\n"); 577 return CVMX_ADD_IO_SEG(0x0001180080000078ull); 578} 579#else 580#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull)) 581#endif 582#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 583#define CVMX_L2C_SPAR3 CVMX_L2C_SPAR3_FUNC() 584static inline uint64_t CVMX_L2C_SPAR3_FUNC(void) 585{ 586 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 587 cvmx_warn("CVMX_L2C_SPAR3 not supported on this chip\n"); 588 return CVMX_ADD_IO_SEG(0x0001180080000080ull); 589} 590#else 591#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull)) 592#endif 593#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 594#define CVMX_L2C_SPAR4 CVMX_L2C_SPAR4_FUNC() 595static inline uint64_t CVMX_L2C_SPAR4_FUNC(void) 596{ 597 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 598 cvmx_warn("CVMX_L2C_SPAR4 not supported on this chip\n"); 599 return CVMX_ADD_IO_SEG(0x0001180080000088ull); 600} 601#else 602#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull)) 603#endif 604#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 605static inline uint64_t CVMX_L2C_TADX_ECC0(unsigned long block_id) 606{ 607 if (!( 608 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 609 cvmx_warn("CVMX_L2C_TADX_ECC0(%lu) is invalid on this chip\n", block_id); 610 return CVMX_ADD_IO_SEG(0x0001180080A00018ull); 611} 612#else 613#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull)) 614#endif 615#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 616static inline uint64_t CVMX_L2C_TADX_ECC1(unsigned long block_id) 617{ 618 if (!( 619 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 620 cvmx_warn("CVMX_L2C_TADX_ECC1(%lu) is invalid on this chip\n", block_id); 621 return CVMX_ADD_IO_SEG(0x0001180080A00020ull); 622} 623#else 624#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull)) 625#endif 626#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 627static inline uint64_t CVMX_L2C_TADX_IEN(unsigned long block_id) 628{ 629 if (!( 630 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 631 cvmx_warn("CVMX_L2C_TADX_IEN(%lu) is invalid on this chip\n", block_id); 632 return CVMX_ADD_IO_SEG(0x0001180080A00000ull); 633} 634#else 635#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull)) 636#endif 637#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 638static inline uint64_t CVMX_L2C_TADX_INT(unsigned long block_id) 639{ 640 if (!( 641 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 642 cvmx_warn("CVMX_L2C_TADX_INT(%lu) is invalid on this chip\n", block_id); 643 return CVMX_ADD_IO_SEG(0x0001180080A00028ull); 644} 645#else 646#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull)) 647#endif 648#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 649static inline uint64_t CVMX_L2C_TADX_PFC0(unsigned long block_id) 650{ 651 if (!( 652 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 653 cvmx_warn("CVMX_L2C_TADX_PFC0(%lu) is invalid on this chip\n", block_id); 654 return CVMX_ADD_IO_SEG(0x0001180080A00400ull); 655} 656#else 657#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull)) 658#endif 659#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 660static inline uint64_t CVMX_L2C_TADX_PFC1(unsigned long block_id) 661{ 662 if (!( 663 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 664 cvmx_warn("CVMX_L2C_TADX_PFC1(%lu) is invalid on this chip\n", block_id); 665 return CVMX_ADD_IO_SEG(0x0001180080A00408ull); 666} 667#else 668#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull)) 669#endif 670#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 671static inline uint64_t CVMX_L2C_TADX_PFC2(unsigned long block_id) 672{ 673 if (!( 674 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 675 cvmx_warn("CVMX_L2C_TADX_PFC2(%lu) is invalid on this chip\n", block_id); 676 return CVMX_ADD_IO_SEG(0x0001180080A00410ull); 677} 678#else 679#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull)) 680#endif 681#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 682static inline uint64_t CVMX_L2C_TADX_PFC3(unsigned long block_id) 683{ 684 if (!( 685 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 686 cvmx_warn("CVMX_L2C_TADX_PFC3(%lu) is invalid on this chip\n", block_id); 687 return CVMX_ADD_IO_SEG(0x0001180080A00418ull); 688} 689#else 690#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull)) 691#endif 692#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 693static inline uint64_t CVMX_L2C_TADX_PRF(unsigned long block_id) 694{ 695 if (!( 696 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 697 cvmx_warn("CVMX_L2C_TADX_PRF(%lu) is invalid on this chip\n", block_id); 698 return CVMX_ADD_IO_SEG(0x0001180080A00008ull); 699} 700#else 701#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull)) 702#endif 703#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 704static inline uint64_t CVMX_L2C_TADX_TAG(unsigned long block_id) 705{ 706 if (!( 707 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 708 cvmx_warn("CVMX_L2C_TADX_TAG(%lu) is invalid on this chip\n", block_id); 709 return CVMX_ADD_IO_SEG(0x0001180080A00010ull); 710} 711#else 712#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull)) 713#endif 714#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 715#define CVMX_L2C_VER_ID CVMX_L2C_VER_ID_FUNC() 716static inline uint64_t CVMX_L2C_VER_ID_FUNC(void) 717{ 718 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 719 cvmx_warn("CVMX_L2C_VER_ID not supported on this chip\n"); 720 return CVMX_ADD_IO_SEG(0x00011800808007E0ull); 721} 722#else 723#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull)) 724#endif 725#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 726#define CVMX_L2C_VER_IOB CVMX_L2C_VER_IOB_FUNC() 727static inline uint64_t CVMX_L2C_VER_IOB_FUNC(void) 728{ 729 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 730 cvmx_warn("CVMX_L2C_VER_IOB not supported on this chip\n"); 731 return CVMX_ADD_IO_SEG(0x00011800808007F0ull); 732} 733#else 734#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull)) 735#endif 736#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 737#define CVMX_L2C_VER_MSC CVMX_L2C_VER_MSC_FUNC() 738static inline uint64_t CVMX_L2C_VER_MSC_FUNC(void) 739{ 740 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 741 cvmx_warn("CVMX_L2C_VER_MSC not supported on this chip\n"); 742 return CVMX_ADD_IO_SEG(0x00011800808007D0ull); 743} 744#else 745#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull)) 746#endif 747#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 748#define CVMX_L2C_VER_PP CVMX_L2C_VER_PP_FUNC() 749static inline uint64_t CVMX_L2C_VER_PP_FUNC(void) 750{ 751 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 752 cvmx_warn("CVMX_L2C_VER_PP not supported on this chip\n"); 753 return CVMX_ADD_IO_SEG(0x00011800808007E8ull); 754} 755#else 756#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull)) 757#endif 758#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 759static inline uint64_t CVMX_L2C_VIRTID_IOBX(unsigned long block_id) 760{ 761 if (!( 762 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 763 cvmx_warn("CVMX_L2C_VIRTID_IOBX(%lu) is invalid on this chip\n", block_id); 764 return CVMX_ADD_IO_SEG(0x00011800808C0200ull); 765} 766#else 767#define CVMX_L2C_VIRTID_IOBX(block_id) (CVMX_ADD_IO_SEG(0x00011800808C0200ull)) 768#endif 769#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 770static inline uint64_t CVMX_L2C_VIRTID_PPX(unsigned long offset) 771{ 772 if (!( 773 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))))) 774 cvmx_warn("CVMX_L2C_VIRTID_PPX(%lu) is invalid on this chip\n", offset); 775 return CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 7) * 8; 776} 777#else 778#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 7) * 8) 779#endif 780#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 781#define CVMX_L2C_VRT_CTL CVMX_L2C_VRT_CTL_FUNC() 782static inline uint64_t CVMX_L2C_VRT_CTL_FUNC(void) 783{ 784 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 785 cvmx_warn("CVMX_L2C_VRT_CTL not supported on this chip\n"); 786 return CVMX_ADD_IO_SEG(0x0001180080800010ull); 787} 788#else 789#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull)) 790#endif 791#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 792static inline uint64_t CVMX_L2C_VRT_MEMX(unsigned long offset) 793{ 794 if (!( 795 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1023))))) 796 cvmx_warn("CVMX_L2C_VRT_MEMX(%lu) is invalid on this chip\n", offset); 797 return CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8; 798} 799#else 800#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8) 801#endif 802#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 803static inline uint64_t CVMX_L2C_WPAR_IOBX(unsigned long block_id) 804{ 805 if (!( 806 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 807 cvmx_warn("CVMX_L2C_WPAR_IOBX(%lu) is invalid on this chip\n", block_id); 808 return CVMX_ADD_IO_SEG(0x0001180080840200ull); 809} 810#else 811#define CVMX_L2C_WPAR_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080840200ull)) 812#endif 813#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 814static inline uint64_t CVMX_L2C_WPAR_PPX(unsigned long offset) 815{ 816 if (!( 817 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))))) 818 cvmx_warn("CVMX_L2C_WPAR_PPX(%lu) is invalid on this chip\n", offset); 819 return CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 7) * 8; 820} 821#else 822#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 7) * 8) 823#endif 824#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 825static inline uint64_t CVMX_L2C_XMCX_PFC(unsigned long block_id) 826{ 827 if (!( 828 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 829 cvmx_warn("CVMX_L2C_XMCX_PFC(%lu) is invalid on this chip\n", block_id); 830 return CVMX_ADD_IO_SEG(0x0001180080800400ull); 831} 832#else 833#define CVMX_L2C_XMCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800400ull)) 834#endif 835#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 836#define CVMX_L2C_XMC_CMD CVMX_L2C_XMC_CMD_FUNC() 837static inline uint64_t CVMX_L2C_XMC_CMD_FUNC(void) 838{ 839 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX))) 840 cvmx_warn("CVMX_L2C_XMC_CMD not supported on this chip\n"); 841 return CVMX_ADD_IO_SEG(0x0001180080800028ull); 842} 843#else 844#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull)) 845#endif 846#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 847static inline uint64_t CVMX_L2C_XMDX_PFC(unsigned long block_id) 848{ 849 if (!( 850 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))))) 851 cvmx_warn("CVMX_L2C_XMDX_PFC(%lu) is invalid on this chip\n", block_id); 852 return CVMX_ADD_IO_SEG(0x0001180080800408ull); 853} 854#else 855#define CVMX_L2C_XMDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800408ull)) 856#endif 857 858/** 859 * cvmx_l2c_big_ctl 860 * 861 * L2C_BIG_CTL = L2C Big memory control register 862 * 863 * 864 * Notes: 865 * (1) BIGRD interrupts can occur during normal operation as the PP's are allowed to prefetch to 866 * non-existent memory locations. Therefore, BIGRD is for informational purposes only. 867 * 868 * (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB, and L2C_VER_MSC will be 869 * loaded just like a store which is blocked by VRTWR. Additionally, L2C_ERR_XMC will be loaded. 870 */ 871union cvmx_l2c_big_ctl 872{ 873 uint64_t u64; 874 struct cvmx_l2c_big_ctl_s 875 { 876#if __BYTE_ORDER == __BIG_ENDIAN 877 uint64_t reserved_8_63 : 56; 878 uint64_t maxdram : 4; /**< Amount of configured DRAM 879 0 = reserved 880 1 = 512MB 881 2 = 1GB 882 3 = 2GB 883 4 = 4GB 884 5 = 8GB 885 6 = 16GB 886 7 = 32GB 887 8 = 64GB (**reserved in 63xx**) 888 9 = 128GB (**reserved in 63xx**) 889 10-15 reserved 890 Violations of this limit causes 891 L2C to set L2C_INT_REG[BIGRD/BIGWR]. */ 892 uint64_t reserved_1_3 : 3; 893 uint64_t disable : 1; /**< When set, disables the BIGWR/BIGRD logic completely 894 and reverts HOLEWR to 63xx pass 1.x behavior. 895 When clear, BIGWR and HOLEWR block stores in the same 896 same manner as the VRT logic, and BIGRD is reported. */ 897#else 898 uint64_t disable : 1; 899 uint64_t reserved_1_3 : 3; 900 uint64_t maxdram : 4; 901 uint64_t reserved_8_63 : 56; 902#endif 903 } s; 904 struct cvmx_l2c_big_ctl_s cn63xx; 905}; 906typedef union cvmx_l2c_big_ctl cvmx_l2c_big_ctl_t; 907 908/** 909 * cvmx_l2c_bst 910 * 911 * L2C_BST = L2C BIST Status 912 * 913 */ 914union cvmx_l2c_bst 915{ 916 uint64_t u64; 917 struct cvmx_l2c_bst_s 918 { 919#if __BYTE_ORDER == __BIG_ENDIAN 920 uint64_t reserved_38_63 : 26; 921 uint64_t dutfl : 6; /**< BIST failure status for PP0-5 DUT */ 922 uint64_t reserved_17_31 : 15; 923 uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */ 924 uint64_t reserved_13_15 : 3; 925 uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */ 926 uint64_t reserved_9_11 : 3; 927 uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */ 928 uint64_t reserved_5_7 : 3; 929 uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */ 930 uint64_t reserved_1_3 : 3; 931 uint64_t tdffl : 1; /**< BIST failure status for TDF0 */ 932#else 933 uint64_t tdffl : 1; 934 uint64_t reserved_1_3 : 3; 935 uint64_t vrtfl : 1; 936 uint64_t reserved_5_7 : 3; 937 uint64_t dutresfl : 1; 938 uint64_t reserved_9_11 : 3; 939 uint64_t iocdatfl : 1; 940 uint64_t reserved_13_15 : 3; 941 uint64_t ioccmdfl : 1; 942 uint64_t reserved_17_31 : 15; 943 uint64_t dutfl : 6; 944 uint64_t reserved_38_63 : 26; 945#endif 946 } s; 947 struct cvmx_l2c_bst_s cn63xx; 948 struct cvmx_l2c_bst_s cn63xxp1; 949}; 950typedef union cvmx_l2c_bst cvmx_l2c_bst_t; 951 952/** 953 * cvmx_l2c_bst0 954 * 955 * L2C_BST0 = L2C BIST 0 CTL/STAT 956 * 957 */ 958union cvmx_l2c_bst0 959{ 960 uint64_t u64; 961 struct cvmx_l2c_bst0_s 962 { 963#if __BYTE_ORDER == __BIG_ENDIAN 964 uint64_t reserved_24_63 : 40; 965 uint64_t dtbnk : 1; /**< DuTag Bank# 966 When DT=1(BAD), this field provides additional information 967 about which DuTag Bank (0/1) failed. */ 968 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 969 - 0: GOOD (or bist in progress/never run) 970 - 1: BAD */ 971 uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) 972 [12]: i (0=FORWARD/1=REVERSE pass) 973 [11:10]: j (Pattern# 1 of 4) 974 [9:4]: k (DT Index 1 of 64) 975 [3:0]: l (DT# 1 of 16 DTs) */ 976 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 977 - 0: GOOD (or bist in progress/never run) 978 - 1: BAD */ 979 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 980 - 0: GOOD (or bist in progress/never run) 981 - 1: BAD */ 982 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 983 - 0: GOOD (or bist in progress/never run) 984 - 1: BAD */ 985#else 986 uint64_t wlb_dat : 4; 987 uint64_t stin_msk : 1; 988 uint64_t dt : 1; 989 uint64_t dtcnt : 13; 990 uint64_t wlb_msk : 4; 991 uint64_t dtbnk : 1; 992 uint64_t reserved_24_63 : 40; 993#endif 994 } s; 995 struct cvmx_l2c_bst0_cn30xx 996 { 997#if __BYTE_ORDER == __BIG_ENDIAN 998 uint64_t reserved_23_63 : 41; 999 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1000 - 0: GOOD (or bist in progress/never run) 1001 - 1: BAD */ 1002 uint64_t reserved_15_18 : 4; 1003 uint64_t dtcnt : 9; /**< DuTag BiST Counter (used to help isolate the failure) 1004 [8]: i (0=FORWARD/1=REVERSE pass) 1005 [7:6]: j (Pattern# 1 of 4) 1006 [5:0]: k (DT Index 1 of 64) */ 1007 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1008 - 0: GOOD (or bist in progress/never run) 1009 - 1: BAD */ 1010 uint64_t reserved_4_4 : 1; 1011 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1012 - 0: GOOD (or bist in progress/never run) 1013 - 1: BAD */ 1014#else 1015 uint64_t wlb_dat : 4; 1016 uint64_t reserved_4_4 : 1; 1017 uint64_t dt : 1; 1018 uint64_t dtcnt : 9; 1019 uint64_t reserved_15_18 : 4; 1020 uint64_t wlb_msk : 4; 1021 uint64_t reserved_23_63 : 41; 1022#endif 1023 } cn30xx; 1024 struct cvmx_l2c_bst0_cn31xx 1025 { 1026#if __BYTE_ORDER == __BIG_ENDIAN 1027 uint64_t reserved_23_63 : 41; 1028 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1029 - 0: GOOD (or bist in progress/never run) 1030 - 1: BAD */ 1031 uint64_t reserved_16_18 : 3; 1032 uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) 1033 [9]: i (0=FORWARD/1=REVERSE pass) 1034 [8:7]: j (Pattern# 1 of 4) 1035 [6:1]: k (DT Index 1 of 64) 1036 [0]: l (DT# 1 of 2 DTs) */ 1037 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1038 - 0: GOOD (or bist in progress/never run) 1039 - 1: BAD */ 1040 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1041 - 0: GOOD (or bist in progress/never run) 1042 - 1: BAD */ 1043 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1044 - 0: GOOD (or bist in progress/never run) 1045 - 1: BAD */ 1046#else 1047 uint64_t wlb_dat : 4; 1048 uint64_t stin_msk : 1; 1049 uint64_t dt : 1; 1050 uint64_t dtcnt : 10; 1051 uint64_t reserved_16_18 : 3; 1052 uint64_t wlb_msk : 4; 1053 uint64_t reserved_23_63 : 41; 1054#endif 1055 } cn31xx; 1056 struct cvmx_l2c_bst0_cn38xx 1057 { 1058#if __BYTE_ORDER == __BIG_ENDIAN 1059 uint64_t reserved_19_63 : 45; 1060 uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) 1061 [12]: i (0=FORWARD/1=REVERSE pass) 1062 [11:10]: j (Pattern# 1 of 4) 1063 [9:4]: k (DT Index 1 of 64) 1064 [3:0]: l (DT# 1 of 16 DTs) */ 1065 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1066 - 0: GOOD (or bist in progress/never run) 1067 - 1: BAD */ 1068 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1069 - 0: GOOD (or bist in progress/never run) 1070 - 1: BAD */ 1071 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1072 - 0: GOOD (or bist in progress/never run) 1073 - 1: BAD */ 1074#else 1075 uint64_t wlb_dat : 4; 1076 uint64_t stin_msk : 1; 1077 uint64_t dt : 1; 1078 uint64_t dtcnt : 13; 1079 uint64_t reserved_19_63 : 45; 1080#endif 1081 } cn38xx; 1082 struct cvmx_l2c_bst0_cn38xx cn38xxp2; 1083 struct cvmx_l2c_bst0_cn50xx 1084 { 1085#if __BYTE_ORDER == __BIG_ENDIAN 1086 uint64_t reserved_24_63 : 40; 1087 uint64_t dtbnk : 1; /**< DuTag Bank# 1088 When DT=1(BAD), this field provides additional information 1089 about which DuTag Bank (0/1) failed. */ 1090 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1091 - 0: GOOD (or bist in progress/never run) 1092 - 1: BAD */ 1093 uint64_t reserved_16_18 : 3; 1094 uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) 1095 [9]: i (0=FORWARD/1=REVERSE pass) 1096 [8:7]: j (Pattern# 1 of 4) 1097 [6:1]: k (DT Index 1 of 64) 1098 [0]: l (DT# 1 of 2 DTs) */ 1099 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1100 - 0: GOOD (or bist in progress/never run) 1101 - 1: BAD */ 1102 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1103 - 0: GOOD (or bist in progress/never run) 1104 - 1: BAD */ 1105 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1106 - 0: GOOD (or bist in progress/never run) 1107 - 1: BAD */ 1108#else 1109 uint64_t wlb_dat : 4; 1110 uint64_t stin_msk : 1; 1111 uint64_t dt : 1; 1112 uint64_t dtcnt : 10; 1113 uint64_t reserved_16_18 : 3; 1114 uint64_t wlb_msk : 4; 1115 uint64_t dtbnk : 1; 1116 uint64_t reserved_24_63 : 40; 1117#endif 1118 } cn50xx; 1119 struct cvmx_l2c_bst0_cn50xx cn52xx; 1120 struct cvmx_l2c_bst0_cn50xx cn52xxp1; 1121 struct cvmx_l2c_bst0_s cn56xx; 1122 struct cvmx_l2c_bst0_s cn56xxp1; 1123 struct cvmx_l2c_bst0_s cn58xx; 1124 struct cvmx_l2c_bst0_s cn58xxp1; 1125}; 1126typedef union cvmx_l2c_bst0 cvmx_l2c_bst0_t; 1127 1128/** 1129 * cvmx_l2c_bst1 1130 * 1131 * L2C_BST1 = L2C BIST 1 CTL/STAT 1132 * 1133 */ 1134union cvmx_l2c_bst1 1135{ 1136 uint64_t u64; 1137 struct cvmx_l2c_bst1_s 1138 { 1139#if __BYTE_ORDER == __BIG_ENDIAN 1140 uint64_t reserved_9_63 : 55; 1141 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1142 - 0: GOOD (or bist in progress/never run) 1143 - 1: BAD */ 1144#else 1145 uint64_t l2t : 9; 1146 uint64_t reserved_9_63 : 55; 1147#endif 1148 } s; 1149 struct cvmx_l2c_bst1_cn30xx 1150 { 1151#if __BYTE_ORDER == __BIG_ENDIAN 1152 uint64_t reserved_16_63 : 48; 1153 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1154 - 0: GOOD (or bist in progress/never run) 1155 - 1: BAD */ 1156 uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) 1157 - 0: GOOD (or bist in progress/never run) 1158 - 1: BAD */ 1159 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1160 - 0: GOOD (or bist in progress/never run) 1161 - 1: BAD */ 1162 uint64_t reserved_5_8 : 4; 1163 uint64_t l2t : 5; /**< Bist Results for L2T (USE+4SET RAMs) 1164 - 0: GOOD (or bist in progress/never run) 1165 - 1: BAD */ 1166#else 1167 uint64_t l2t : 5; 1168 uint64_t reserved_5_8 : 4; 1169 uint64_t vab_vwcf : 1; 1170 uint64_t lrf : 2; 1171 uint64_t vwdf : 4; 1172 uint64_t reserved_16_63 : 48; 1173#endif 1174 } cn30xx; 1175 struct cvmx_l2c_bst1_cn30xx cn31xx; 1176 struct cvmx_l2c_bst1_cn38xx 1177 { 1178#if __BYTE_ORDER == __BIG_ENDIAN 1179 uint64_t reserved_16_63 : 48; 1180 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1181 - 0: GOOD (or bist in progress/never run) 1182 - 1: BAD */ 1183 uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) 1184 - 0: GOOD (or bist in progress/never run) 1185 - 1: BAD */ 1186 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1187 - 0: GOOD (or bist in progress/never run) 1188 - 1: BAD */ 1189 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1190 - 0: GOOD (or bist in progress/never run) 1191 - 1: BAD */ 1192#else 1193 uint64_t l2t : 9; 1194 uint64_t vab_vwcf : 1; 1195 uint64_t lrf : 2; 1196 uint64_t vwdf : 4; 1197 uint64_t reserved_16_63 : 48; 1198#endif 1199 } cn38xx; 1200 struct cvmx_l2c_bst1_cn38xx cn38xxp2; 1201 struct cvmx_l2c_bst1_cn38xx cn50xx; 1202 struct cvmx_l2c_bst1_cn52xx 1203 { 1204#if __BYTE_ORDER == __BIG_ENDIAN 1205 uint64_t reserved_19_63 : 45; 1206 uint64_t plc2 : 1; /**< Bist Results for PLC2 RAM 1207 - 0: GOOD (or bist in progress/never run) 1208 - 1: BAD */ 1209 uint64_t plc1 : 1; /**< Bist Results for PLC1 RAM 1210 - 0: GOOD (or bist in progress/never run) 1211 - 1: BAD */ 1212 uint64_t plc0 : 1; /**< Bist Results for PLC0 RAM 1213 - 0: GOOD (or bist in progress/never run) 1214 - 1: BAD */ 1215 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1216 - 0: GOOD (or bist in progress/never run) 1217 - 1: BAD */ 1218 uint64_t reserved_11_11 : 1; 1219 uint64_t ilc : 1; /**< Bist Results for ILC RAM 1220 - 0: GOOD (or bist in progress/never run) 1221 - 1: BAD */ 1222 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1223 - 0: GOOD (or bist in progress/never run) 1224 - 1: BAD */ 1225 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1226 - 0: GOOD (or bist in progress/never run) 1227 - 1: BAD */ 1228#else 1229 uint64_t l2t : 9; 1230 uint64_t vab_vwcf : 1; 1231 uint64_t ilc : 1; 1232 uint64_t reserved_11_11 : 1; 1233 uint64_t vwdf : 4; 1234 uint64_t plc0 : 1; 1235 uint64_t plc1 : 1; 1236 uint64_t plc2 : 1; 1237 uint64_t reserved_19_63 : 45; 1238#endif 1239 } cn52xx; 1240 struct cvmx_l2c_bst1_cn52xx cn52xxp1; 1241 struct cvmx_l2c_bst1_cn56xx 1242 { 1243#if __BYTE_ORDER == __BIG_ENDIAN 1244 uint64_t reserved_24_63 : 40; 1245 uint64_t plc2 : 1; /**< Bist Results for LRF RAMs (ILC) 1246 - 0: GOOD (or bist in progress/never run) 1247 - 1: BAD */ 1248 uint64_t plc1 : 1; /**< Bist Results for LRF RAMs (ILC) 1249 - 0: GOOD (or bist in progress/never run) 1250 - 1: BAD */ 1251 uint64_t plc0 : 1; /**< Bist Results for LRF RAMs (ILC) 1252 - 0: GOOD (or bist in progress/never run) 1253 - 1: BAD */ 1254 uint64_t ilc : 1; /**< Bist Results for LRF RAMs (ILC) 1255 - 0: GOOD (or bist in progress/never run) 1256 - 1: BAD */ 1257 uint64_t vwdf1 : 4; /**< Bist Results for VWDF1 RAMs 1258 - 0: GOOD (or bist in progress/never run) 1259 - 1: BAD */ 1260 uint64_t vwdf0 : 4; /**< Bist Results for VWDF0 RAMs 1261 - 0: GOOD (or bist in progress/never run) 1262 - 1: BAD */ 1263 uint64_t vab_vwcf1 : 1; /**< Bist Results for VAB VWCF1_MEM */ 1264 uint64_t reserved_10_10 : 1; 1265 uint64_t vab_vwcf0 : 1; /**< Bist Results for VAB VWCF0_MEM 1266 - 0: GOOD (or bist in progress/never run) 1267 - 1: BAD */ 1268 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1269 - 0: GOOD (or bist in progress/never run) 1270 - 1: BAD */ 1271#else 1272 uint64_t l2t : 9; 1273 uint64_t vab_vwcf0 : 1; 1274 uint64_t reserved_10_10 : 1; 1275 uint64_t vab_vwcf1 : 1; 1276 uint64_t vwdf0 : 4; 1277 uint64_t vwdf1 : 4; 1278 uint64_t ilc : 1; 1279 uint64_t plc0 : 1; 1280 uint64_t plc1 : 1; 1281 uint64_t plc2 : 1; 1282 uint64_t reserved_24_63 : 40; 1283#endif 1284 } cn56xx; 1285 struct cvmx_l2c_bst1_cn56xx cn56xxp1; 1286 struct cvmx_l2c_bst1_cn38xx cn58xx; 1287 struct cvmx_l2c_bst1_cn38xx cn58xxp1; 1288}; 1289typedef union cvmx_l2c_bst1 cvmx_l2c_bst1_t; 1290 1291/** 1292 * cvmx_l2c_bst2 1293 * 1294 * L2C_BST2 = L2C BIST 2 CTL/STAT 1295 * 1296 */ 1297union cvmx_l2c_bst2 1298{ 1299 uint64_t u64; 1300 struct cvmx_l2c_bst2_s 1301 { 1302#if __BYTE_ORDER == __BIG_ENDIAN 1303 uint64_t reserved_16_63 : 48; 1304 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1305 - 0: GOOD (or bist in progress/never run) 1306 - 1: BAD */ 1307 uint64_t reserved_4_11 : 8; 1308 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1309 - 1: BAD */ 1310 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1311 - 1: BAD */ 1312 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1313 - 0: GOOD (or bist in progress/never run) 1314 - 1: BAD */ 1315 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1316 - 0: GOOD (or bist in progress/never run) 1317 - 1: BAD */ 1318#else 1319 uint64_t xrddat : 1; 1320 uint64_t xrdmsk : 1; 1321 uint64_t picbst : 1; 1322 uint64_t ipcbst : 1; 1323 uint64_t reserved_4_11 : 8; 1324 uint64_t mrb : 4; 1325 uint64_t reserved_16_63 : 48; 1326#endif 1327 } s; 1328 struct cvmx_l2c_bst2_cn30xx 1329 { 1330#if __BYTE_ORDER == __BIG_ENDIAN 1331 uint64_t reserved_16_63 : 48; 1332 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1333 - 0: GOOD (or bist in progress/never run) 1334 - 1: BAD */ 1335 uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs 1336 - 0: GOOD (or bist in progress/never run) 1337 - 1: BAD */ 1338 uint64_t reserved_4_7 : 4; 1339 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1340 - 0: GOOD (or bist in progress/never run) 1341 - 1: BAD */ 1342 uint64_t reserved_2_2 : 1; 1343 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1344 - 0: GOOD (or bist in progress/never run) 1345 - 1: BAD */ 1346 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1347 - 0: GOOD (or bist in progress/never run) 1348 - 1: BAD */ 1349#else 1350 uint64_t xrddat : 1; 1351 uint64_t xrdmsk : 1; 1352 uint64_t reserved_2_2 : 1; 1353 uint64_t ipcbst : 1; 1354 uint64_t reserved_4_7 : 4; 1355 uint64_t rmdf : 4; 1356 uint64_t mrb : 4; 1357 uint64_t reserved_16_63 : 48; 1358#endif 1359 } cn30xx; 1360 struct cvmx_l2c_bst2_cn30xx cn31xx; 1361 struct cvmx_l2c_bst2_cn38xx 1362 { 1363#if __BYTE_ORDER == __BIG_ENDIAN 1364 uint64_t reserved_16_63 : 48; 1365 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1366 - 0: GOOD (or bist in progress/never run) 1367 - 1: BAD */ 1368 uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs 1369 - 0: GOOD (or bist in progress/never run) 1370 - 1: BAD */ 1371 uint64_t rhdf : 4; /**< Bist Results for RHDF RAMs 1372 - 0: GOOD (or bist in progress/never run) 1373 - 1: BAD */ 1374 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1375 - 1: BAD */ 1376 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1377 - 1: BAD */ 1378 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1379 - 0: GOOD (or bist in progress/never run) 1380 - 1: BAD */ 1381 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1382 - 0: GOOD (or bist in progress/never run) 1383 - 1: BAD */ 1384#else 1385 uint64_t xrddat : 1; 1386 uint64_t xrdmsk : 1; 1387 uint64_t picbst : 1; 1388 uint64_t ipcbst : 1; 1389 uint64_t rhdf : 4; 1390 uint64_t rmdf : 4; 1391 uint64_t mrb : 4; 1392 uint64_t reserved_16_63 : 48; 1393#endif 1394 } cn38xx; 1395 struct cvmx_l2c_bst2_cn38xx cn38xxp2; 1396 struct cvmx_l2c_bst2_cn30xx cn50xx; 1397 struct cvmx_l2c_bst2_cn30xx cn52xx; 1398 struct cvmx_l2c_bst2_cn30xx cn52xxp1; 1399 struct cvmx_l2c_bst2_cn56xx 1400 { 1401#if __BYTE_ORDER == __BIG_ENDIAN 1402 uint64_t reserved_16_63 : 48; 1403 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1404 - 0: GOOD (or bist in progress/never run) 1405 - 1: BAD */ 1406 uint64_t rmdb : 4; /**< Bist Results for RMDB RAMs 1407 - 0: GOOD (or bist in progress/never run) 1408 - 1: BAD */ 1409 uint64_t rhdb : 4; /**< Bist Results for RHDB RAMs 1410 - 0: GOOD (or bist in progress/never run) 1411 - 1: BAD */ 1412 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1413 - 1: BAD */ 1414 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1415 - 1: BAD */ 1416 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1417 - 0: GOOD (or bist in progress/never run) 1418 - 1: BAD */ 1419 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1420 - 0: GOOD (or bist in progress/never run) 1421 - 1: BAD */ 1422#else 1423 uint64_t xrddat : 1; 1424 uint64_t xrdmsk : 1; 1425 uint64_t picbst : 1; 1426 uint64_t ipcbst : 1; 1427 uint64_t rhdb : 4; 1428 uint64_t rmdb : 4; 1429 uint64_t mrb : 4; 1430 uint64_t reserved_16_63 : 48; 1431#endif 1432 } cn56xx; 1433 struct cvmx_l2c_bst2_cn56xx cn56xxp1; 1434 struct cvmx_l2c_bst2_cn56xx cn58xx; 1435 struct cvmx_l2c_bst2_cn56xx cn58xxp1; 1436}; 1437typedef union cvmx_l2c_bst2 cvmx_l2c_bst2_t; 1438 1439/** 1440 * cvmx_l2c_bst_mem# 1441 * 1442 * L2C_BST_MEM = L2C MEM BIST Status 1443 * 1444 * 1445 * Notes: 1446 * (1) CLEAR_BIST must be written to 1 before START_BIST is written to 1 using a separate CSR write. 1447 * 1448 * (2) CLEAR_BIST must not be changed after writing START_BIST to 1 until the BIST operation completes 1449 * (indicated by START_BIST returning to 0) or operation is undefined. 1450 */ 1451union cvmx_l2c_bst_memx 1452{ 1453 uint64_t u64; 1454 struct cvmx_l2c_bst_memx_s 1455 { 1456#if __BYTE_ORDER == __BIG_ENDIAN 1457 uint64_t start_bist : 1; /**< When written to 1, starts BIST. Will read 1 until 1458 BIST is complete (see Note). */ 1459 uint64_t clear_bist : 1; /**< When BIST is triggered, run clear BIST (see Note) */ 1460 uint64_t reserved_5_61 : 57; 1461 uint64_t rdffl : 1; /**< BIST failure status for RDF */ 1462 uint64_t vbffl : 4; /**< BIST failure status for VBF0-3 */ 1463#else 1464 uint64_t vbffl : 4; 1465 uint64_t rdffl : 1; 1466 uint64_t reserved_5_61 : 57; 1467 uint64_t clear_bist : 1; 1468 uint64_t start_bist : 1; 1469#endif 1470 } s; 1471 struct cvmx_l2c_bst_memx_s cn63xx; 1472 struct cvmx_l2c_bst_memx_s cn63xxp1; 1473}; 1474typedef union cvmx_l2c_bst_memx cvmx_l2c_bst_memx_t; 1475 1476/** 1477 * cvmx_l2c_bst_tdt# 1478 * 1479 * L2C_BST_TDT = L2C TAD DaTa BIST Status 1480 * 1481 */ 1482union cvmx_l2c_bst_tdtx 1483{ 1484 uint64_t u64; 1485 struct cvmx_l2c_bst_tdtx_s 1486 { 1487#if __BYTE_ORDER == __BIG_ENDIAN 1488 uint64_t reserved_32_63 : 32; 1489 uint64_t fbfrspfl : 8; /**< BIST failure status for quad 0-7 FBF RSP read port */ 1490 uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */ 1491 uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF */ 1492 uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */ 1493#else 1494 uint64_t l2dfl : 8; 1495 uint64_t fbffl : 8; 1496 uint64_t sbffl : 8; 1497 uint64_t fbfrspfl : 8; 1498 uint64_t reserved_32_63 : 32; 1499#endif 1500 } s; 1501 struct cvmx_l2c_bst_tdtx_s cn63xx; 1502 struct cvmx_l2c_bst_tdtx_cn63xxp1 1503 { 1504#if __BYTE_ORDER == __BIG_ENDIAN 1505 uint64_t reserved_24_63 : 40; 1506 uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */ 1507 uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF */ 1508 uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */ 1509#else 1510 uint64_t l2dfl : 8; 1511 uint64_t fbffl : 8; 1512 uint64_t sbffl : 8; 1513 uint64_t reserved_24_63 : 40; 1514#endif 1515 } cn63xxp1; 1516}; 1517typedef union cvmx_l2c_bst_tdtx cvmx_l2c_bst_tdtx_t; 1518 1519/** 1520 * cvmx_l2c_bst_ttg# 1521 * 1522 * L2C_BST_TTG = L2C TAD TaG BIST Status 1523 * 1524 */ 1525union cvmx_l2c_bst_ttgx 1526{ 1527 uint64_t u64; 1528 struct cvmx_l2c_bst_ttgx_s 1529 { 1530#if __BYTE_ORDER == __BIG_ENDIAN 1531 uint64_t reserved_17_63 : 47; 1532 uint64_t lrufl : 1; /**< BIST failure status for tag LRU */ 1533 uint64_t tagfl : 16; /**< BIST failure status for tag ways 0-15 */ 1534#else 1535 uint64_t tagfl : 16; 1536 uint64_t lrufl : 1; 1537 uint64_t reserved_17_63 : 47; 1538#endif 1539 } s; 1540 struct cvmx_l2c_bst_ttgx_s cn63xx; 1541 struct cvmx_l2c_bst_ttgx_s cn63xxp1; 1542}; 1543typedef union cvmx_l2c_bst_ttgx cvmx_l2c_bst_ttgx_t; 1544 1545/** 1546 * cvmx_l2c_cfg 1547 * 1548 * Specify the RSL base addresses for the block 1549 * 1550 * L2C_CFG = L2C Configuration 1551 * 1552 * Description: 1553 */ 1554union cvmx_l2c_cfg 1555{ 1556 uint64_t u64; 1557 struct cvmx_l2c_cfg_s 1558 { 1559#if __BYTE_ORDER == __BIG_ENDIAN 1560 uint64_t reserved_20_63 : 44; 1561 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 1562 Indicates when the L2C HW Bist sequence(short or long) is 1563 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 1564 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 1565 When the previous state was '0' and SW writes a '1', 1566 the long bist sequence (enhanced 13N March) is performed. 1567 SW can then read the L2C_CFG[BSTRUN] which will indicate 1568 that the long bist sequence is running. When BSTRUN-=0, 1569 the state of the L2D_BST[0-3] registers contain information 1570 which reflects the status of the recent long bist sequence. 1571 NOTE: SW must never write LBIST=0 while Long Bist is running 1572 (ie: when BSTRUN=1 never write LBIST=0). 1573 NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS 1574 Fuse is blown. */ 1575 uint64_t xor_bank : 1; /**< L2C XOR Bank Bit 1576 When both LMC's are enabled(DPRES1=1/DPRES0=1), this 1577 bit determines how addresses are assigned to 1578 LMC port(s). 1579 XOR_BANK| LMC# 1580 ----------+--------------------------------- 1581 0 | byte address[7] 1582 1 | byte address[7] XOR byte address[12] 1583 Example: If both LMC ports are enabled (DPRES1=1/DPRES0=1) 1584 and XOR_BANK=1, then addr[7] XOR addr[12] is used to determine 1585 which LMC Port# a reference is directed to. */ 1586 uint64_t dpres1 : 1; /**< DDR1 Present/LMC1 Enable 1587 When DPRES1 is set, LMC#1 is enabled(DDR1 pins at 1588 the BOTTOM of the chip are active). 1589 NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), 1590 see XOR_BANK bit to determine how a reference is 1591 assigned to a DDR/LMC port. (Also, in dual-LMC configuration, 1592 the address sent to the targeted LMC port is the 1593 address shifted right by one). 1594 NOTE: For power-savings, the DPRES1 is also used to 1595 disable DDR1/LMC1 clocks. */ 1596 uint64_t dpres0 : 1; /**< DDR0 Present/LMC0 Enable 1597 When DPRES0 is set, LMC#0 is enabled(DDR0 pins at 1598 the BOTTOM of the chip are active). 1599 NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), 1600 see XOR_BANK bit to determine how a reference is 1601 assigned to a DDR/LMC port. (Also, in dual-LMC configuration, 1602 the address sent to the targeted LMC port is the 1603 address shifted right by one). 1604 NOTE: For power-savings, the DPRES0 is also used to 1605 disable DDR0/LMC0 clocks. */ 1606 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 1607 When set, the L2C dual-fill performance feature is 1608 disabled. 1609 NOTE: This bit is only intended to evaluate the 1610 effectiveness of the dual-fill feature. For OPTIMAL 1611 performance, this bit should ALWAYS be zero. */ 1612 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1613 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1614 When FPEN is enabled and the LFB is empty, the 1615 forward progress counter (FPCNT) is initialized to: 1616 FPCNT[24:0] = 2^(9+FPEXP) 1617 When the LFB is non-empty the FPCNT is decremented 1618 (every eclk interval). If the FPCNT reaches zero, 1619 the LFB no longer accepts new requests until either 1620 a) all of the current LFB entries have completed 1621 (to ensure forward progress). 1622 b) FPEMPTY=0 and another forward progress count 1623 interval timeout expires. 1624 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1625 (For eclk=500MHz(2ns), this would be ~4us). */ 1626 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1627 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1628 When set, if the forward progress counter expires, 1629 all new LFB-NQs are stopped UNTIL all current LFB 1630 entries have completed. 1631 When clear, if the forward progress counter expires, 1632 all new LFB-NQs are stopped UNTIL either 1633 a) all current LFB entries have completed. 1634 b) another forward progress interval expires 1635 NOTE: We may want to FREEZE/HANG the system when 1636 we encounter an LFB entry cannot complete, and there 1637 may be times when we want to allow further LFB-NQs 1638 to be permitted to help in further analyzing the 1639 source */ 1640 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1641 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1642 When set, enables the Forward Progress Counter to 1643 prevent new LFB entries from enqueueing until ALL 1644 current LFB entries have completed. */ 1645 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1646 When set, the L2 Tag/Data Store will alias the 11-bit 1647 index with the low order 11-bits of the tag. 1648 index[17:7] = (tag[28:18] ^ index[17:7]) 1649 NOTE: This bit must only be modified at boot time, 1650 when it can be guaranteed that no blocks have been 1651 loaded into the L2 Cache. 1652 The index aliasing is a performance enhancement feature 1653 which reduces the L2 cache thrashing experienced for 1654 regular stride references. 1655 NOTE: The index alias is stored in the LFB and VAB, and 1656 its effects are reversed for memory references (Victims, 1657 STT-Misses and Read-Misses) */ 1658 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1659 become less than or equal to the MWF_CRD, the L2C will 1660 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1661 writes (victims) higher priority. */ 1662 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1663 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1664 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1665 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1666 STRSC(ST RSP no invalidate)] */ 1667 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1668 - 0: Fixed Priority - 1669 IOB->PP requests are higher priority than 1670 PP->IOB requests 1671 - 1: Round Robin - 1672 I/O requests from PP and IOB are serviced in 1673 round robin */ 1674 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1675 - 0: Fixed Priority - 1676 IOB memory requests are higher priority than PP 1677 memory requests. 1678 - 1: Round Robin - 1679 Memory requests from PP and IOB are serviced in 1680 round robin. */ 1681#else 1682 uint64_t lrf_arb_mode : 1; 1683 uint64_t rfb_arb_mode : 1; 1684 uint64_t rsp_arb_mode : 1; 1685 uint64_t mwf_crd : 4; 1686 uint64_t idxalias : 1; 1687 uint64_t fpen : 1; 1688 uint64_t fpempty : 1; 1689 uint64_t fpexp : 4; 1690 uint64_t dfill_dis : 1; 1691 uint64_t dpres0 : 1; 1692 uint64_t dpres1 : 1; 1693 uint64_t xor_bank : 1; 1694 uint64_t lbist : 1; 1695 uint64_t bstrun : 1; 1696 uint64_t reserved_20_63 : 44; 1697#endif 1698 } s; 1699 struct cvmx_l2c_cfg_cn30xx 1700 { 1701#if __BYTE_ORDER == __BIG_ENDIAN 1702 uint64_t reserved_14_63 : 50; 1703 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1704 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1705 When FPEN is enabled and the LFB is empty, the 1706 forward progress counter (FPCNT) is initialized to: 1707 FPCNT[24:0] = 2^(9+FPEXP) 1708 When the LFB is non-empty the FPCNT is decremented 1709 (every eclk interval). If the FPCNT reaches zero, 1710 the LFB no longer accepts new requests until either 1711 a) all of the current LFB entries have completed 1712 (to ensure forward progress). 1713 b) FPEMPTY=0 and another forward progress count 1714 interval timeout expires. 1715 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1716 (For eclk=500MHz(2ns), this would be ~4us). */ 1717 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1718 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1719 When set, if the forward progress counter expires, 1720 all new LFB-NQs are stopped UNTIL all current LFB 1721 entries have completed. 1722 When clear, if the forward progress counter expires, 1723 all new LFB-NQs are stopped UNTIL either 1724 a) all current LFB entries have completed. 1725 b) another forward progress interval expires 1726 NOTE: We may want to FREEZE/HANG the system when 1727 we encounter an LFB entry cannot complete, and there 1728 may be times when we want to allow further LFB-NQs 1729 to be permitted to help in further analyzing the 1730 source */ 1731 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1732 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1733 When set, enables the Forward Progress Counter to 1734 prevent new LFB entries from enqueueing until ALL 1735 current LFB entries have completed. */ 1736 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1737 When set, the L2 Tag/Data Store will alias the 8-bit 1738 index with the low order 8-bits of the tag. 1739 index[14:7] = (tag[22:15] ^ index[14:7]) 1740 NOTE: This bit must only be modified at boot time, 1741 when it can be guaranteed that no blocks have been 1742 loaded into the L2 Cache. 1743 The index aliasing is a performance enhancement feature 1744 which reduces the L2 cache thrashing experienced for 1745 regular stride references. 1746 NOTE: The index alias is stored in the LFB and VAB, and 1747 its effects are reversed for memory references (Victims, 1748 STT-Misses and Read-Misses) */ 1749 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1750 become less than or equal to the MWF_CRD, the L2C will 1751 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1752 writes (victims) higher priority. */ 1753 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1754 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1755 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1756 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1757 STRSC(ST RSP no invalidate)] */ 1758 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1759 - 0: Fixed Priority - 1760 IOB->PP requests are higher priority than 1761 PP->IOB requests 1762 - 1: Round Robin - 1763 I/O requests from PP and IOB are serviced in 1764 round robin */ 1765 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1766 - 0: Fixed Priority - 1767 IOB memory requests are higher priority than PP 1768 memory requests. 1769 - 1: Round Robin - 1770 Memory requests from PP and IOB are serviced in 1771 round robin. */ 1772#else 1773 uint64_t lrf_arb_mode : 1; 1774 uint64_t rfb_arb_mode : 1; 1775 uint64_t rsp_arb_mode : 1; 1776 uint64_t mwf_crd : 4; 1777 uint64_t idxalias : 1; 1778 uint64_t fpen : 1; 1779 uint64_t fpempty : 1; 1780 uint64_t fpexp : 4; 1781 uint64_t reserved_14_63 : 50; 1782#endif 1783 } cn30xx; 1784 struct cvmx_l2c_cfg_cn30xx cn31xx; 1785 struct cvmx_l2c_cfg_cn30xx cn38xx; 1786 struct cvmx_l2c_cfg_cn30xx cn38xxp2; 1787 struct cvmx_l2c_cfg_cn50xx 1788 { 1789#if __BYTE_ORDER == __BIG_ENDIAN 1790 uint64_t reserved_20_63 : 44; 1791 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 1792 Indicates when the L2C HW Bist sequence(short or long) is 1793 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 1794 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 1795 When the previous state was '0' and SW writes a '1', 1796 the long bist sequence (enhanced 13N March) is performed. 1797 SW can then read the L2C_CFG[BSTRUN] which will indicate 1798 that the long bist sequence is running. When BSTRUN-=0, 1799 the state of the L2D_BST[0-3] registers contain information 1800 which reflects the status of the recent long bist sequence. 1801 NOTE: SW must never write LBIST=0 while Long Bist is running 1802 (ie: when BSTRUN=1 never write LBIST=0). */ 1803 uint64_t reserved_14_17 : 4; 1804 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1805 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1806 When FPEN is enabled and the LFB is empty, the 1807 forward progress counter (FPCNT) is initialized to: 1808 FPCNT[24:0] = 2^(9+FPEXP) 1809 When the LFB is non-empty the FPCNT is decremented 1810 (every eclk interval). If the FPCNT reaches zero, 1811 the LFB no longer accepts new requests until either 1812 a) all of the current LFB entries have completed 1813 (to ensure forward progress). 1814 b) FPEMPTY=0 and another forward progress count 1815 interval timeout expires. 1816 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1817 (For eclk=500MHz(2ns), this would be ~4us). */ 1818 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1819 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1820 When set, if the forward progress counter expires, 1821 all new LFB-NQs are stopped UNTIL all current LFB 1822 entries have completed. 1823 When clear, if the forward progress counter expires, 1824 all new LFB-NQs are stopped UNTIL either 1825 a) all current LFB entries have completed. 1826 b) another forward progress interval expires 1827 NOTE: We may want to FREEZE/HANG the system when 1828 we encounter an LFB entry cannot complete, and there 1829 may be times when we want to allow further LFB-NQs 1830 to be permitted to help in further analyzing the 1831 source */ 1832 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1833 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1834 When set, enables the Forward Progress Counter to 1835 prevent new LFB entries from enqueueing until ALL 1836 current LFB entries have completed. */ 1837 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1838 When set, the L2 Tag/Data Store will alias the 7-bit 1839 index with the low order 7-bits of the tag. 1840 index[13:7] = (tag[20:14] ^ index[13:7]) 1841 NOTE: This bit must only be modified at boot time, 1842 when it can be guaranteed that no blocks have been 1843 loaded into the L2 Cache. 1844 The index aliasing is a performance enhancement feature 1845 which reduces the L2 cache thrashing experienced for 1846 regular stride references. 1847 NOTE: The index alias is stored in the LFB and VAB, and 1848 its effects are reversed for memory references (Victims, 1849 STT-Misses and Read-Misses) */ 1850 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1851 become less than or equal to the MWF_CRD, the L2C will 1852 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1853 writes (victims) higher priority. */ 1854 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1855 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1856 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1857 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1858 STRSC(ST RSP no invalidate)] */ 1859 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1860 - 0: Fixed Priority - 1861 IOB->PP requests are higher priority than 1862 PP->IOB requests 1863 - 1: Round Robin - 1864 I/O requests from PP and IOB are serviced in 1865 round robin */ 1866 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1867 - 0: Fixed Priority - 1868 IOB memory requests are higher priority than PP 1869 memory requests. 1870 - 1: Round Robin - 1871 Memory requests from PP and IOB are serviced in 1872 round robin. */ 1873#else 1874 uint64_t lrf_arb_mode : 1; 1875 uint64_t rfb_arb_mode : 1; 1876 uint64_t rsp_arb_mode : 1; 1877 uint64_t mwf_crd : 4; 1878 uint64_t idxalias : 1; 1879 uint64_t fpen : 1; 1880 uint64_t fpempty : 1; 1881 uint64_t fpexp : 4; 1882 uint64_t reserved_14_17 : 4; 1883 uint64_t lbist : 1; 1884 uint64_t bstrun : 1; 1885 uint64_t reserved_20_63 : 44; 1886#endif 1887 } cn50xx; 1888 struct cvmx_l2c_cfg_cn50xx cn52xx; 1889 struct cvmx_l2c_cfg_cn50xx cn52xxp1; 1890 struct cvmx_l2c_cfg_s cn56xx; 1891 struct cvmx_l2c_cfg_s cn56xxp1; 1892 struct cvmx_l2c_cfg_cn58xx 1893 { 1894#if __BYTE_ORDER == __BIG_ENDIAN 1895 uint64_t reserved_20_63 : 44; 1896 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 1897 Indicates when the L2C HW Bist sequence(short or long) is 1898 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 1899 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 1900 When the previous state was '0' and SW writes a '1', 1901 the long bist sequence (enhanced 13N March) is performed. 1902 SW can then read the L2C_CFG[BSTRUN] which will indicate 1903 that the long bist sequence is running. When BSTRUN-=0, 1904 the state of the L2D_BST[0-3] registers contain information 1905 which reflects the status of the recent long bist sequence. 1906 NOTE: SW must never write LBIST=0 while Long Bist is running 1907 (ie: when BSTRUN=1 never write LBIST=0). 1908 NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS 1909 Fuse is blown. */ 1910 uint64_t reserved_15_17 : 3; 1911 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 1912 When set, the L2C dual-fill performance feature is 1913 disabled. 1914 NOTE: This bit is only intended to evaluate the 1915 effectiveness of the dual-fill feature. For OPTIMAL 1916 performance, this bit should ALWAYS be zero. */ 1917 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1918 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1919 When FPEN is enabled and the LFB is empty, the 1920 forward progress counter (FPCNT) is initialized to: 1921 FPCNT[24:0] = 2^(9+FPEXP) 1922 When the LFB is non-empty the FPCNT is decremented 1923 (every eclk interval). If the FPCNT reaches zero, 1924 the LFB no longer accepts new requests until either 1925 a) all of the current LFB entries have completed 1926 (to ensure forward progress). 1927 b) FPEMPTY=0 and another forward progress count 1928 interval timeout expires. 1929 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1930 (For eclk=500MHz(2ns), this would be ~4us). */ 1931 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1932 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1933 When set, if the forward progress counter expires, 1934 all new LFB-NQs are stopped UNTIL all current LFB 1935 entries have completed. 1936 When clear, if the forward progress counter expires, 1937 all new LFB-NQs are stopped UNTIL either 1938 a) all current LFB entries have completed. 1939 b) another forward progress interval expires 1940 NOTE: We may want to FREEZE/HANG the system when 1941 we encounter an LFB entry cannot complete, and there 1942 may be times when we want to allow further LFB-NQs 1943 to be permitted to help in further analyzing the 1944 source */ 1945 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1946 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1947 When set, enables the Forward Progress Counter to 1948 prevent new LFB entries from enqueueing until ALL 1949 current LFB entries have completed. */ 1950 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1951 When set, the L2 Tag/Data Store will alias the 11-bit 1952 index with the low order 11-bits of the tag. 1953 index[17:7] = (tag[28:18] ^ index[17:7]) 1954 NOTE: This bit must only be modified at boot time, 1955 when it can be guaranteed that no blocks have been 1956 loaded into the L2 Cache. 1957 The index aliasing is a performance enhancement feature 1958 which reduces the L2 cache thrashing experienced for 1959 regular stride references. 1960 NOTE: The index alias is stored in the LFB and VAB, and 1961 its effects are reversed for memory references (Victims, 1962 STT-Misses and Read-Misses) */ 1963 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1964 become less than or equal to the MWF_CRD, the L2C will 1965 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1966 writes (victims) higher priority. */ 1967 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1968 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1969 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1970 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1971 STRSC(ST RSP no invalidate)] */ 1972 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1973 - 0: Fixed Priority - 1974 IOB->PP requests are higher priority than 1975 PP->IOB requests 1976 - 1: Round Robin - 1977 I/O requests from PP and IOB are serviced in 1978 round robin */ 1979 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1980 - 0: Fixed Priority - 1981 IOB memory requests are higher priority than PP 1982 memory requests. 1983 - 1: Round Robin - 1984 Memory requests from PP and IOB are serviced in 1985 round robin. */ 1986#else 1987 uint64_t lrf_arb_mode : 1; 1988 uint64_t rfb_arb_mode : 1; 1989 uint64_t rsp_arb_mode : 1; 1990 uint64_t mwf_crd : 4; 1991 uint64_t idxalias : 1; 1992 uint64_t fpen : 1; 1993 uint64_t fpempty : 1; 1994 uint64_t fpexp : 4; 1995 uint64_t dfill_dis : 1; 1996 uint64_t reserved_15_17 : 3; 1997 uint64_t lbist : 1; 1998 uint64_t bstrun : 1; 1999 uint64_t reserved_20_63 : 44; 2000#endif 2001 } cn58xx; 2002 struct cvmx_l2c_cfg_cn58xxp1 2003 { 2004#if __BYTE_ORDER == __BIG_ENDIAN 2005 uint64_t reserved_15_63 : 49; 2006 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 2007 When set, the L2C dual-fill performance feature is 2008 disabled. 2009 NOTE: This bit is only intended to evaluate the 2010 effectiveness of the dual-fill feature. For OPTIMAL 2011 performance, this bit should ALWAYS be zero. */ 2012 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 2013 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2014 When FPEN is enabled and the LFB is empty, the 2015 forward progress counter (FPCNT) is initialized to: 2016 FPCNT[24:0] = 2^(9+FPEXP) 2017 When the LFB is non-empty the FPCNT is decremented 2018 (every eclk interval). If the FPCNT reaches zero, 2019 the LFB no longer accepts new requests until either 2020 a) all of the current LFB entries have completed 2021 (to ensure forward progress). 2022 b) FPEMPTY=0 and another forward progress count 2023 interval timeout expires. 2024 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 2025 (For eclk=500MHz(2ns), this would be ~4us). */ 2026 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 2027 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2028 When set, if the forward progress counter expires, 2029 all new LFB-NQs are stopped UNTIL all current LFB 2030 entries have completed. 2031 When clear, if the forward progress counter expires, 2032 all new LFB-NQs are stopped UNTIL either 2033 a) all current LFB entries have completed. 2034 b) another forward progress interval expires 2035 NOTE: We may want to FREEZE/HANG the system when 2036 we encounter an LFB entry cannot complete, and there 2037 may be times when we want to allow further LFB-NQs 2038 to be permitted to help in further analyzing the 2039 source */ 2040 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 2041 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2042 When set, enables the Forward Progress Counter to 2043 prevent new LFB entries from enqueueing until ALL 2044 current LFB entries have completed. */ 2045 uint64_t idxalias : 1; /**< L2C Index Alias Enable 2046 When set, the L2 Tag/Data Store will alias the 11-bit 2047 index with the low order 11-bits of the tag. 2048 index[17:7] = (tag[28:18] ^ index[17:7]) 2049 NOTE: This bit must only be modified at boot time, 2050 when it can be guaranteed that no blocks have been 2051 loaded into the L2 Cache. 2052 The index aliasing is a performance enhancement feature 2053 which reduces the L2 cache thrashing experienced for 2054 regular stride references. 2055 NOTE: The index alias is stored in the LFB and VAB, and 2056 its effects are reversed for memory references (Victims, 2057 STT-Misses and Read-Misses) */ 2058 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 2059 become less than or equal to the MWF_CRD, the L2C will 2060 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 2061 writes (victims) higher priority. */ 2062 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 2063 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 2064 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 2065 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 2066 STRSC(ST RSP no invalidate)] */ 2067 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 2068 - 0: Fixed Priority - 2069 IOB->PP requests are higher priority than 2070 PP->IOB requests 2071 - 1: Round Robin - 2072 I/O requests from PP and IOB are serviced in 2073 round robin */ 2074 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 2075 - 0: Fixed Priority - 2076 IOB memory requests are higher priority than PP 2077 memory requests. 2078 - 1: Round Robin - 2079 Memory requests from PP and IOB are serviced in 2080 round robin. */ 2081#else 2082 uint64_t lrf_arb_mode : 1; 2083 uint64_t rfb_arb_mode : 1; 2084 uint64_t rsp_arb_mode : 1; 2085 uint64_t mwf_crd : 4; 2086 uint64_t idxalias : 1; 2087 uint64_t fpen : 1; 2088 uint64_t fpempty : 1; 2089 uint64_t fpexp : 4; 2090 uint64_t dfill_dis : 1; 2091 uint64_t reserved_15_63 : 49; 2092#endif 2093 } cn58xxp1; 2094}; 2095typedef union cvmx_l2c_cfg cvmx_l2c_cfg_t; 2096 2097/** 2098 * cvmx_l2c_cop0_map# 2099 * 2100 * L2C_COP0_MAP = PP COP0 register memory mapped region 2101 * 2102 * Description: PP COP0 register mapped region. 2103 * 2104 * NOTE: for 63xx, if the PPID is outside the range of 0-5,63 the write will be ignored and reads 2105 * will return 0x2bad2bad2bad2bad 2106 * 2107 * Notes: 2108 * (1) There are 256 COP0 registers per PP. Registers 0-255 map to PP0's COP0 registers, 256-511 are 2109 * mapped to PP1's, etc. A special set X PP63 (registers 16128-16383) are for broadcast writes. 2110 * Any write done to these registers will take effect in ALL PPs. Note the means the L2C_COP0_MAP 2111 * register to access can be gotten by: 2112 * 2113 * REGNUM = [ PPID[5:0], rd[4:0], sel[2:0] ] 2114 * 2115 * where rd and sel are as defined in the HRM description of Core Coprocessor 0 registers 2116 * and note 4 below. 2117 * 2118 * (2) if a COP0 register cannot be accessed by this mechanism the write be silently ignored and the 2119 * read data will be 0xBADDEED. 2120 * 2121 * (3) for 63xx, if the PPID is outside the range of 0-5,63 or if the PP in question is in reset a 2122 * write will be ignored and reads will timeout the RSL bus. 2123 * 2124 * (4) Referring to note (1) above, the following rd/sel values are supported: 2125 * 2126 * NOTE: Put only the "Customer type" in HRM. do not put the "Real type" in HRM. 2127 * 2128 * Customer Real 2129 * rd sel type Description type 2130 * ======+=======+==========+==============================================+========= 2131 * 4 2 RO COP0 UserLocal RW 2132 * 7 0 RO COP0 HWREna RW 2133 * 9 0 RO COP0 Count RW 2134 * 9 6 RO COP0 CvmCount RW 2135 * 9 7 RO COP0 CvmCtl RW 2136 * 11 0 RO COP0 Compare RW 2137 * 11 6 RW COP0 PowThrottle RW 2138 * 12 0 RO COP0 Status RW 2139 * 12 1 RO COP0 IntCtl RO 2140 * 12 2 RO COP0 SRSCtl RO 2141 * 13 0 RO COP0 Cause RW 2142 * 14 0 RO COP0 EPC RW 2143 * 15 0 RO COP0 PrID RO 2144 * 15 1 RO COP0 EBase RW 2145 * 16 0 RO PC Issue Debug Info (see details below) RO 2146 * 16 1 RO PC Fetch Debug Info (see details below) RO 2147 * 16 2 RO PC Fill Debug Info (see details below) RO 2148 * 16 3 RO PC Misc Debug Info (see details below) RO 2149 * 18 0 RO COP0 WatchLo0 RW 2150 * 19 0 RO COP0 WatchHi0 RW 2151 * 22 0 RO COP0 MultiCoreDebug RW 2152 * 23 0 RO COP0 Debug RW 2153 * 23 6 RO COP0 Debug2 RO 2154 * 24 0 RO COP0 DEPC RW 2155 * 25 0 RO COP0 PerfCnt Control0 RW 2156 * 25 1 RO COP0 PerfCnt Counter0 RW 2157 * 25 2 RO COP0 PerfCnt Control1 RW 2158 * 25 3 RO COP0 PerfCnt Counter1 RW 2159 * 27 0 RO COP0 CacheErr (icache) RW 2160 * 28 0 RO COP0 TagLo (icache) RW 2161 * 28 1 RO COP0 DataLo (icache) RW 2162 * 29 1 RO COP0 DataHi (icache) RW 2163 * 30 0 RO COP0 ErrorEPC RW 2164 * 31 0 RO COP0 DESAVE RW 2165 * 31 2 RO COP0 Scratch RW 2166 * 31 3 RO COP0 Scratch1 RW 2167 * 31 4 RO COP0 Scratch2 RW 2168 * 2169 * - PC Issue Debug Info 2170 * 2171 * - 63:2 pc0_5a<63:2> // often VA<63:2> of the next instruction to issue 2172 * // but can also be the VA of an instruction executing/replaying on pipe 0 2173 * // or can also be a VA being filled into the instruction cache 2174 * // or can also be unpredictable 2175 * // <61:49> RAZ 2176 * 1 illegal // set when illegal VA 2177 * 0 delayslot // set when VA is delayslot (prior branch may be either taken or not taken) 2178 * 2179 * - PC Fetch Debug Info 2180 * 2181 * - 63:0 fetch_address_3a // VA being fetched from the instruction cache 2182 * // <61:49>, <1:0> RAZ 2183 * 2184 * - PC Fill Debug Info 2185 * 2186 * - 63:0 fill_address_4a<63:2> // VA<63:2> being filled into instruction cache 2187 * // valid when waiting_for_ifill_4a is set (see PC Misc Debug Info below) 2188 * // <61:49> RAZ 2189 * 1 illegal // set when illegal VA 2190 * 0 RAZ 2191 * 2192 * - PC Misc Debug Info 2193 * 2194 * - 63:3 RAZ 2195 * 2 mem_stall_3a // stall term from L1 memory system 2196 * 1 waiting_for_pfill_4a // when waiting_for_ifill_4a is set, indicates whether instruction cache fill is due to a prefetch 2197 * 0 waiting_for_ifill_4a // set when there is an outstanding instruction cache fill 2198 */ 2199union cvmx_l2c_cop0_mapx 2200{ 2201 uint64_t u64; 2202 struct cvmx_l2c_cop0_mapx_s 2203 { 2204#if __BYTE_ORDER == __BIG_ENDIAN 2205 uint64_t data : 64; /**< Data to write to/read from designated PP's COP0 2206 register. */ 2207#else 2208 uint64_t data : 64; 2209#endif 2210 } s; 2211 struct cvmx_l2c_cop0_mapx_s cn63xx; 2212 struct cvmx_l2c_cop0_mapx_s cn63xxp1; 2213}; 2214typedef union cvmx_l2c_cop0_mapx cvmx_l2c_cop0_mapx_t; 2215 2216/** 2217 * cvmx_l2c_ctl 2218 * 2219 * L2C_CTL = L2C Control 2220 * 2221 * 2222 * Notes: 2223 * (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB. 2224 * 2225 * (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE, and VBFDBE errors for 2226 * the purposes of testing error handling code. When one (or both) of these bits are set a PL2 2227 * which misses in the L2 will fill with the appropriate error in the first 2 OWs of the fill. 2228 * Software can determine which OW pair gets the error by choosing the desired fill order 2229 * (address<6:5>). A PL2 which hits in the L2 will not inject any errors. Therefore sending a 2230 * WBIL2 prior to the PL2 is recommended to make a miss likely (if multiple processors are involved 2231 * software must be careful to be sure no other processor or IO device can bring the block into the 2232 * L2). 2233 * 2234 * To generate a VBFSBE or VBFDBE, software must first get the cache block into the cache with an 2235 * error using a PL2 which misses the L2. Then a store partial to a portion of the cache block 2236 * without the error must change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will 2237 * trigger the VBFSBE/VBFDBE error. 2238 */ 2239union cvmx_l2c_ctl 2240{ 2241 uint64_t u64; 2242 struct cvmx_l2c_ctl_s 2243 { 2244#if __BYTE_ORDER == __BIG_ENDIAN 2245 uint64_t reserved_28_63 : 36; 2246 uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */ 2247 uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */ 2248 uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */ 2249 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2250 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2251 (0 means 16, 1-15 as expected) */ 2252 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2253 (0 means 16, 1-15 as expected) */ 2254 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2255 == 0, round-robin 2256 == 1, static priority 2257 1. IOR data 2258 2. STIN/FILLs 2259 3. STDN/SCDN/SCFL */ 2260 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2261 == 0, fully determined through QOS 2262 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2263 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2264 uint64_t ef_cnt : 7; /**< LMC early fill count 2265 Specifies the number of cycles after the first LMC 2266 fill cycle to wait before requesting a fill on the 2267 RSC/RSD bus. 2268 // 7 dclks (we've received 1st out of 8 2269 // by the time we start counting) 2270 ef_cnt = (7 * dclk0_period) / rclk_period; 2271 // + 1 rclk if the dclk and rclk edges don't 2272 // stay in the same position 2273 if ((dclk0_gen.period % rclk_gen.period) != 0) 2274 ef_cnt = ef_cnt + 1; 2275 // + 2 rclk synchronization uncertainty 2276 ef_cnt = ef_cnt + 2; 2277 // - 3 rclks to recognize first write 2278 ef_cnt = ef_cnt - 3; 2279 // + 3 rclks to perform first write 2280 ef_cnt = ef_cnt + 3; 2281 // - 9 rclks minimum latency from counter expire 2282 // to final fbf read 2283 ef_cnt = ef_cnt - 9; */ 2284 uint64_t vab_thresh : 4; /**< VAB Threshold 2285 When the number of valid VABs exceeds this number the 2286 L2C increases the priority of all writes in the LMC. */ 2287 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2288 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2289#else 2290 uint64_t disidxalias : 1; 2291 uint64_t disecc : 1; 2292 uint64_t vab_thresh : 4; 2293 uint64_t ef_cnt : 7; 2294 uint64_t ef_ena : 1; 2295 uint64_t xmc_arb_mode : 1; 2296 uint64_t rsp_arb_mode : 1; 2297 uint64_t maxlfb : 4; 2298 uint64_t maxvab : 4; 2299 uint64_t discclk : 1; 2300 uint64_t l2dfdbe : 1; 2301 uint64_t l2dfsbe : 1; 2302 uint64_t disstgl2i : 1; 2303 uint64_t reserved_28_63 : 36; 2304#endif 2305 } s; 2306 struct cvmx_l2c_ctl_s cn63xx; 2307 struct cvmx_l2c_ctl_cn63xxp1 2308 { 2309#if __BYTE_ORDER == __BIG_ENDIAN 2310 uint64_t reserved_25_63 : 39; 2311 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2312 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2313 (0 means 16, 1-15 as expected) */ 2314 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2315 (0 means 16, 1-15 as expected) */ 2316 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2317 == 0, round-robin 2318 == 1, static priority 2319 1. IOR data 2320 2. STIN/FILLs 2321 3. STDN/SCDN/SCFL */ 2322 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2323 == 0, fully determined through QOS 2324 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2325 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2326 uint64_t ef_cnt : 7; /**< LMC early fill count 2327 Specifies the number of cycles after the first LMC 2328 fill cycle to wait before requesting a fill on the 2329 RSC/RSD bus. 2330 // 7 dclks (we've received 1st out of 8 2331 // by the time we start counting) 2332 ef_cnt = (7 * dclk0_period) / rclk_period; 2333 // + 1 rclk if the dclk and rclk edges don't 2334 // stay in the same position 2335 if ((dclk0_gen.period % rclk_gen.period) != 0) 2336 ef_cnt = ef_cnt + 1; 2337 // + 2 rclk synchronization uncertainty 2338 ef_cnt = ef_cnt + 2; 2339 // - 3 rclks to recognize first write 2340 ef_cnt = ef_cnt - 3; 2341 // + 3 rclks to perform first write 2342 ef_cnt = ef_cnt + 3; 2343 // - 9 rclks minimum latency from counter expire 2344 // to final fbf read 2345 ef_cnt = ef_cnt - 9; */ 2346 uint64_t vab_thresh : 4; /**< VAB Threshold 2347 When the number of valid VABs exceeds this number the 2348 L2C increases the priority of all writes in the LMC. */ 2349 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2350 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2351#else 2352 uint64_t disidxalias : 1; 2353 uint64_t disecc : 1; 2354 uint64_t vab_thresh : 4; 2355 uint64_t ef_cnt : 7; 2356 uint64_t ef_ena : 1; 2357 uint64_t xmc_arb_mode : 1; 2358 uint64_t rsp_arb_mode : 1; 2359 uint64_t maxlfb : 4; 2360 uint64_t maxvab : 4; 2361 uint64_t discclk : 1; 2362 uint64_t reserved_25_63 : 39; 2363#endif 2364 } cn63xxp1; 2365}; 2366typedef union cvmx_l2c_ctl cvmx_l2c_ctl_t; 2367 2368/** 2369 * cvmx_l2c_dbg 2370 * 2371 * L2C_DBG = L2C DEBUG Register 2372 * 2373 * Description: L2C Tag/Data Store Debug Register 2374 * 2375 * Notes: 2376 * (1) When using the L2T, L2D or FINV Debug probe feature, the LDD command WILL NOT update the DuTags. 2377 * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one set) 2378 * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back 2379 * dirty data to memory to maintain coherency. 2380 * (4) L2 Cache Lock Down feature MUST BE disabled (L2C_LCKBASE[LCK_ENA]=0) if ANY of the L2C debug 2381 * features (L2T, L2D, FINV) are enabled. 2382 */ 2383union cvmx_l2c_dbg 2384{ 2385 uint64_t u64; 2386 struct cvmx_l2c_dbg_s 2387 { 2388#if __BYTE_ORDER == __BIG_ENDIAN 2389 uint64_t reserved_15_63 : 49; 2390 uint64_t lfb_enum : 4; /**< Specifies the LFB Entry# which is to be captured. */ 2391 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2392 the LFB specified by LFB_ENUM[3:0] are captured 2393 into the L2C_LFB(0/1/2) registers. 2394 NOTE: Some fields of the LFB entry are unpredictable 2395 and dependent on usage. This is only intended to be 2396 used for HW debug. */ 2397 uint64_t ppnum : 4; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2398 is enabled, this field determines which one-of-16 2399 PPs is selected as the diagnostic PP. */ 2400 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2401 is enabled, this field determines 1-of-n targeted 2402 sets to act upon. 2403 NOTE: L2C_DBG[SET] must never equal a crippled or 2404 unusable set (see UMSK* registers and Cripple mode 2405 fuses). */ 2406 uint64_t finv : 1; /**< Flush-Invalidate. 2407 When flush-invalidate is enable (FINV=1), all STF 2408 (L1 store-miss) commands generated from the diagnostic PP 2409 (L2C_DBG[PPNUM]) will invalidate the specified set 2410 (L2C_DBG[SET]) at the index specified in the STF 2411 address[17:7]. If a dirty block is detected (D=1), it is 2412 written back to memory. The contents of the invalid 2413 L2 Cache line is also 'scrubbed' with the STF write data. 2414 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2415 STF address[17:7] refers to the 'aliased' address. 2416 NOTE: An STF command with write data=ZEROES can be 2417 generated by SW using the Prefetch instruction with 2418 Hint=30d "prepare for Store", followed by a SYNCW. 2419 What is seen at the L2C as an STF w/wrdcnt=0 with all 2420 of its mask bits clear (indicates zero-fill data). 2421 A flush-invalidate will 'force-hit' the L2 cache at 2422 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2423 If the cache block is dirty, it is also written back 2424 to memory. The DuTag state is probed/updated as normal 2425 for an STF request. 2426 TYPICAL APPLICATIONS: 2427 1) L2 Tag/Data ECC SW Recovery 2428 2) Cache Unlocking 2429 NOTE: If the cacheline had been previously LOCKED(L=1), 2430 a flush-invalidate operation will explicitly UNLOCK 2431 (L=0) the set/index specified. 2432 NOTE: The diagnostic PP cores can generate STF 2433 commands to the L2 Cache whenever all 128 bytes in a 2434 block are written. SW must take this into consideration 2435 to avoid 'errant' Flush-Invalidates. */ 2436 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2437 returned directly from the L2 Data Store 2438 (regardless of hit/miss) when an LDD(L1 load-miss) command 2439 is issued from a PP determined by the L2C_DBG[PPNUM] 2440 field. The selected set# is determined by the 2441 L2C_DBG[SET] field, and the index is determined 2442 from the address[17:7] associated with the LDD 2443 command. 2444 This 'force-hit' will NOT alter the current L2 Tag 2445 state OR the DuTag state. */ 2446 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:18]] 2447 is returned on the data bus starting at +32(and +96) bytes 2448 offset from the beginning of cacheline when an LDD 2449 (L1 load-miss) command is issued from a PP determined by 2450 the L2C_DBG[PPNUM] field. 2451 The selected L2 set# is determined by the L2C_DBG[SET] 2452 field, and the L2 index is determined from the 2453 phys_addr[17:7] associated with the LDD command. 2454 This 'L2 force-hit' will NOT alter the current L2 Tag 2455 state OR the DuTag state. 2456 NOTE: The diagnostic PP should issue a d-stream load 2457 to an aligned cacheline+0x20(+0x60) in order to have the 2458 return VDLUTAG information (in OW2/OW6) written directly 2459 into the proper PP register. The diagnostic PP should also 2460 flush it's local L1 cache after use(to ensure data 2461 coherency). 2462 NOTE: The position of the VDLUTAG data in the destination 2463 register is dependent on the endian mode(big/little). 2464 NOTE: N3K-Pass2 modification. (This bit's functionality 2465 has changed since Pass1-in the following way). 2466 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2467 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2468 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2469 conditionally latched into the L2D_FSYN0/1 CSRs if an 2470 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 2471#else 2472 uint64_t l2t : 1; 2473 uint64_t l2d : 1; 2474 uint64_t finv : 1; 2475 uint64_t set : 3; 2476 uint64_t ppnum : 4; 2477 uint64_t lfb_dmp : 1; 2478 uint64_t lfb_enum : 4; 2479 uint64_t reserved_15_63 : 49; 2480#endif 2481 } s; 2482 struct cvmx_l2c_dbg_cn30xx 2483 { 2484#if __BYTE_ORDER == __BIG_ENDIAN 2485 uint64_t reserved_13_63 : 51; 2486 uint64_t lfb_enum : 2; /**< Specifies the LFB Entry# which is to be captured. */ 2487 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2488 the LFB specified by LFB_ENUM are captured 2489 into the L2C_LFB(0/1/2) registers. 2490 NOTE: Some fields of the LFB entry are unpredictable 2491 and dependent on usage. This is only intended to be 2492 used for HW debug. */ 2493 uint64_t reserved_7_9 : 3; 2494 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2495 is enabled, this field determines which 2496 PP is selected as the diagnostic PP. 2497 NOTE: For CN30XX single core PPNUM=0 (MBZ) */ 2498 uint64_t reserved_5_5 : 1; 2499 uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2500 is enabled, this field determines 1-of-n targeted 2501 sets to act upon. 2502 NOTE: L2C_DBG[SET] must never equal a crippled or 2503 unusable set (see UMSK* registers and Cripple mode 2504 fuses). */ 2505 uint64_t finv : 1; /**< Flush-Invalidate. 2506 When flush-invalidate is enable (FINV=1), all STF 2507 (L1 store-miss) commands generated from the PP will invalidate 2508 the specified set(L2C_DBG[SET]) at the index specified 2509 in the STF address[14:7]. If a dirty block is detected(D=1), 2510 it is written back to memory. The contents of the invalid 2511 L2 Cache line is also 'scrubbed' with the STF write data. 2512 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2513 STF address[14:7] refers to the 'aliased' address. 2514 NOTE: An STF command with write data=ZEROES can be 2515 generated by SW using the Prefetch instruction with 2516 Hint=30d "prepare for Store", followed by a SYNCW. 2517 What is seen at the L2C as an STF w/wrdcnt=0 with all 2518 of its mask bits clear (indicates zero-fill data). 2519 A flush-invalidate will 'force-hit' the L2 cache at 2520 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2521 If the cache block is dirty, it is also written back 2522 to memory. The DuTag state is probed/updated as normal 2523 for an STF request. 2524 TYPICAL APPLICATIONS: 2525 1) L2 Tag/Data ECC SW Recovery 2526 2) Cache Unlocking 2527 NOTE: If the cacheline had been previously LOCKED(L=1), 2528 a flush-invalidate operation will explicitly UNLOCK 2529 (L=0) the set/index specified. 2530 NOTE: The PP can generate STF(L1 store-miss) 2531 commands to the L2 Cache whenever all 128 bytes in a 2532 block are written. SW must take this into consideration 2533 to avoid 'errant' Flush-Invalidates. */ 2534 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2535 returned directly from the L2 Data Store 2536 (regardless of hit/miss) when an LDD(L1 load-miss) 2537 command is issued from the PP. 2538 The selected set# is determined by the 2539 L2C_DBG[SET] field, and the index is determined 2540 from the address[14:7] associated with the LDD 2541 command. 2542 This 'force-hit' will NOT alter the current L2 Tag 2543 state OR the DuTag state. */ 2544 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:15]] 2545 is returned on the data bus starting at +32(and +96) bytes 2546 offset from the beginning of cacheline when an LDD 2547 (L1 load-miss) command is issued from the PP. 2548 The selected L2 set# is determined by the L2C_DBG[SET] 2549 field, and the L2 index is determined from the 2550 phys_addr[14:7] associated with the LDD command. 2551 This 'L2 force-hit' will NOT alter the current L2 Tag 2552 state OR the DuTag state. 2553 NOTE: The diagnostic PP should issue a d-stream load 2554 to an aligned cacheline+0x20(+0x60) in order to have the 2555 return VDLUTAG information (in OW2/OW6) written directly 2556 into the proper PP register. The diagnostic PP should also 2557 flush it's local L1 cache after use(to ensure data 2558 coherency). 2559 NOTE: The position of the VDLUTAG data in the destination 2560 register is dependent on the endian mode(big/little). 2561 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2562 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2563 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2564 conditionally latched into the L2D_FSYN0/1 CSRs if an 2565 LDD(L1 load-miss) is detected. */ 2566#else 2567 uint64_t l2t : 1; 2568 uint64_t l2d : 1; 2569 uint64_t finv : 1; 2570 uint64_t set : 2; 2571 uint64_t reserved_5_5 : 1; 2572 uint64_t ppnum : 1; 2573 uint64_t reserved_7_9 : 3; 2574 uint64_t lfb_dmp : 1; 2575 uint64_t lfb_enum : 2; 2576 uint64_t reserved_13_63 : 51; 2577#endif 2578 } cn30xx; 2579 struct cvmx_l2c_dbg_cn31xx 2580 { 2581#if __BYTE_ORDER == __BIG_ENDIAN 2582 uint64_t reserved_14_63 : 50; 2583 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 2584 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2585 the LFB specified by LFB_ENUM are captured 2586 into the L2C_LFB(0/1/2) registers. 2587 NOTE: Some fields of the LFB entry are unpredictable 2588 and dependent on usage. This is only intended to be 2589 used for HW debug. */ 2590 uint64_t reserved_7_9 : 3; 2591 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2592 is enabled, this field determines which 2593 PP is selected as the diagnostic PP. */ 2594 uint64_t reserved_5_5 : 1; 2595 uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2596 is enabled, this field determines 1-of-n targeted 2597 sets to act upon. 2598 NOTE: L2C_DBG[SET] must never equal a crippled or 2599 unusable set (see UMSK* registers and Cripple mode 2600 fuses). */ 2601 uint64_t finv : 1; /**< Flush-Invalidate. 2602 When flush-invalidate is enable (FINV=1), all STF 2603 (L1 store-miss) commands generated from the diagnostic PP 2604 (L2C_DBG[PPNUM]) will invalidate the specified set 2605 (L2C_DBG[SET]) at the index specified in the STF 2606 address[15:7]. If a dirty block is detected (D=1), it is 2607 written back to memory. The contents of the invalid 2608 L2 Cache line is also 'scrubbed' with the STF write data. 2609 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2610 STF address[15:7] refers to the 'aliased' address. 2611 NOTE: An STF command with write data=ZEROES can be 2612 generated by SW using the Prefetch instruction with 2613 Hint=30d "prepare for Store", followed by a SYNCW. 2614 What is seen at the L2C as an STF w/wrdcnt=0 with all 2615 of its mask bits clear (indicates zero-fill data). 2616 A flush-invalidate will 'force-hit' the L2 cache at 2617 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2618 If the cache block is dirty, it is also written back 2619 to memory. The DuTag state is probed/updated as normal 2620 for an STF request. 2621 TYPICAL APPLICATIONS: 2622 1) L2 Tag/Data ECC SW Recovery 2623 2) Cache Unlocking 2624 NOTE: If the cacheline had been previously LOCKED(L=1), 2625 a flush-invalidate operation will explicitly UNLOCK 2626 (L=0) the set/index specified. 2627 NOTE: The diagnostic PP cores can generate STF(L1 store-miss) 2628 commands to the L2 Cache whenever all 128 bytes in a 2629 block are written. SW must take this into consideration 2630 to avoid 'errant' Flush-Invalidates. */ 2631 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2632 returned directly from the L2 Data Store 2633 (regardless of hit/miss) when an LDD(L1 load-miss) 2634 command is issued from a PP determined by the 2635 L2C_DBG[PPNUM] field. The selected set# is determined 2636 by the L2C_DBG[SET] field, and the index is determined 2637 from the address[15:7] associated with the LDD command. 2638 This 'L2 force-hit' will NOT alter the current L2 Tag 2639 state OR the DuTag state. */ 2640 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] 2641 is returned on the data bus starting at +32(and +96) bytes 2642 offset from the beginning of cacheline when an LDD 2643 (L1 load-miss) command is issued from a PP determined by 2644 the L2C_DBG[PPNUM] field. 2645 The selected L2 set# is determined by the L2C_DBG[SET] 2646 field, and the L2 index is determined from the 2647 phys_addr[15:7] associated with the LDD command. 2648 This 'L2 force-hit' will NOT alter the current L2 Tag 2649 state OR the DuTag state. 2650 NOTE: The diagnostic PP should issue a d-stream load 2651 to an aligned cacheline+0x20(+0x60) in order to have the 2652 return VDLUTAG information (in OW2/OW6) written directly 2653 into the proper PP register. The diagnostic PP should also 2654 flush it's local L1 cache after use(to ensure data 2655 coherency). 2656 NOTE: The position of the VDLUTAG data in the destination 2657 register is dependent on the endian mode(big/little). 2658 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2659 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2660 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2661 conditionally latched into the L2D_FSYN0/1 CSRs if an 2662 LDD(L1 load-miss) is detected from the diagnostic PP 2663 (L2C_DBG[PPNUM]). */ 2664#else 2665 uint64_t l2t : 1; 2666 uint64_t l2d : 1; 2667 uint64_t finv : 1; 2668 uint64_t set : 2; 2669 uint64_t reserved_5_5 : 1; 2670 uint64_t ppnum : 1; 2671 uint64_t reserved_7_9 : 3; 2672 uint64_t lfb_dmp : 1; 2673 uint64_t lfb_enum : 3; 2674 uint64_t reserved_14_63 : 50; 2675#endif 2676 } cn31xx; 2677 struct cvmx_l2c_dbg_s cn38xx; 2678 struct cvmx_l2c_dbg_s cn38xxp2; 2679 struct cvmx_l2c_dbg_cn50xx 2680 { 2681#if __BYTE_ORDER == __BIG_ENDIAN 2682 uint64_t reserved_14_63 : 50; 2683 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 2684 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2685 the LFB specified by LFB_ENUM[2:0] are captured 2686 into the L2C_LFB(0/1/2) registers. 2687 NOTE: Some fields of the LFB entry are unpredictable 2688 and dependent on usage. This is only intended to be 2689 used for HW debug. */ 2690 uint64_t reserved_7_9 : 3; 2691 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2692 is enabled, this field determines which 1-of-2 2693 PPs is selected as the diagnostic PP. */ 2694 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2695 is enabled, this field determines 1-of-n targeted 2696 sets to act upon. 2697 NOTE: L2C_DBG[SET] must never equal a crippled or 2698 unusable set (see UMSK* registers and Cripple mode 2699 fuses). */ 2700 uint64_t finv : 1; /**< Flush-Invalidate. 2701 When flush-invalidate is enable (FINV=1), all STF 2702 (L1 store-miss) commands generated from the diagnostic PP 2703 (L2C_DBG[PPNUM]) will invalidate the specified set 2704 (L2C_DBG[SET]) at the index specified in the STF 2705 address[13:7]. If a dirty block is detected (D=1), it is 2706 written back to memory. The contents of the invalid 2707 L2 Cache line is also 'scrubbed' with the STF write data. 2708 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2709 STF address[13:7] refers to the 'aliased' address. 2710 NOTE: An STF command with write data=ZEROES can be 2711 generated by SW using the Prefetch instruction with 2712 Hint=30d "prepare for Store", followed by a SYNCW. 2713 What is seen at the L2C as an STF w/wrdcnt=0 with all 2714 of its mask bits clear (indicates zero-fill data). 2715 A flush-invalidate will 'force-hit' the L2 cache at 2716 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2717 If the cache block is dirty, it is also written back 2718 to memory. The DuTag state is probed/updated as normal 2719 for an STF request. 2720 TYPICAL APPLICATIONS: 2721 1) L2 Tag/Data ECC SW Recovery 2722 2) Cache Unlocking 2723 NOTE: If the cacheline had been previously LOCKED(L=1), 2724 a flush-invalidate operation will explicitly UNLOCK 2725 (L=0) the set/index specified. 2726 NOTE: The diagnostic PP cores can generate STF 2727 commands to the L2 Cache whenever all 128 bytes in a 2728 block are written. SW must take this into consideration 2729 to avoid 'errant' Flush-Invalidates. */ 2730 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2731 returned directly from the L2 Data Store 2732 (regardless of hit/miss) when an LDD(L1 load-miss) command 2733 is issued from a PP determined by the L2C_DBG[PPNUM] 2734 field. The selected set# is determined by the 2735 L2C_DBG[SET] field, and the index is determined 2736 from the address[13:7] associated with the LDD 2737 command. 2738 This 'force-hit' will NOT alter the current L2 Tag 2739 state OR the DuTag state. */ 2740 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:14]] 2741 is returned on the data bus starting at +32(and +96) bytes 2742 offset from the beginning of cacheline when an LDD 2743 (L1 load-miss) command is issued from a PP determined by 2744 the L2C_DBG[PPNUM] field. 2745 The selected L2 set# is determined by the L2C_DBG[SET] 2746 field, and the L2 index is determined from the 2747 phys_addr[13:7] associated with the LDD command. 2748 This 'L2 force-hit' will NOT alter the current L2 Tag 2749 state OR the DuTag state. 2750 NOTE: The diagnostic PP should issue a d-stream load 2751 to an aligned cacheline+0x20(+0x60) in order to have the 2752 return VDLUTAG information (in OW2/OW6) written directly 2753 into the proper PP register. The diagnostic PP should also 2754 flush it's local L1 cache after use(to ensure data 2755 coherency). 2756 NOTE: The position of the VDLUTAG data in the destination 2757 register is dependent on the endian mode(big/little). 2758 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2759 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2760 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2761 conditionally latched into the L2D_FSYN0/1 CSRs if an 2762 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 2763#else 2764 uint64_t l2t : 1; 2765 uint64_t l2d : 1; 2766 uint64_t finv : 1; 2767 uint64_t set : 3; 2768 uint64_t ppnum : 1; 2769 uint64_t reserved_7_9 : 3; 2770 uint64_t lfb_dmp : 1; 2771 uint64_t lfb_enum : 3; 2772 uint64_t reserved_14_63 : 50; 2773#endif 2774 } cn50xx; 2775 struct cvmx_l2c_dbg_cn52xx 2776 { 2777#if __BYTE_ORDER == __BIG_ENDIAN 2778 uint64_t reserved_14_63 : 50; 2779 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 2780 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2781 the LFB specified by LFB_ENUM[2:0] are captured 2782 into the L2C_LFB(0/1/2) registers. 2783 NOTE: Some fields of the LFB entry are unpredictable 2784 and dependent on usage. This is only intended to be 2785 used for HW debug. */ 2786 uint64_t reserved_8_9 : 2; 2787 uint64_t ppnum : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2788 is enabled, this field determines which 1-of-4 2789 PPs is selected as the diagnostic PP. */ 2790 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2791 is enabled, this field determines 1-of-n targeted 2792 sets to act upon. 2793 NOTE: L2C_DBG[SET] must never equal a crippled or 2794 unusable set (see UMSK* registers and Cripple mode 2795 fuses). */ 2796 uint64_t finv : 1; /**< Flush-Invalidate. 2797 When flush-invalidate is enable (FINV=1), all STF 2798 (L1 store-miss) commands generated from the diagnostic PP 2799 (L2C_DBG[PPNUM]) will invalidate the specified set 2800 (L2C_DBG[SET]) at the index specified in the STF 2801 address[15:7]. If a dirty block is detected (D=1), it is 2802 written back to memory. The contents of the invalid 2803 L2 Cache line is also 'scrubbed' with the STF write data. 2804 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2805 STF address[15:7] refers to the 'aliased' address. 2806 NOTE: An STF command with write data=ZEROES can be 2807 generated by SW using the Prefetch instruction with 2808 Hint=30d "prepare for Store", followed by a SYNCW. 2809 What is seen at the L2C as an STF w/wrdcnt=0 with all 2810 of its mask bits clear (indicates zero-fill data). 2811 A flush-invalidate will 'force-hit' the L2 cache at 2812 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2813 If the cache block is dirty, it is also written back 2814 to memory. The DuTag state is probed/updated as normal 2815 for an STF request. 2816 TYPICAL APPLICATIONS: 2817 1) L2 Tag/Data ECC SW Recovery 2818 2) Cache Unlocking 2819 NOTE: If the cacheline had been previously LOCKED(L=1), 2820 a flush-invalidate operation will explicitly UNLOCK 2821 (L=0) the set/index specified. 2822 NOTE: The diagnostic PP cores can generate STF 2823 commands to the L2 Cache whenever all 128 bytes in a 2824 block are written. SW must take this into consideration 2825 to avoid 'errant' Flush-Invalidates. */ 2826 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2827 returned directly from the L2 Data Store 2828 (regardless of hit/miss) when an LDD(L1 load-miss) command 2829 is issued from a PP determined by the L2C_DBG[PPNUM] 2830 field. The selected set# is determined by the 2831 L2C_DBG[SET] field, and the index is determined 2832 from the address[15:7] associated with the LDD 2833 command. 2834 This 'force-hit' will NOT alter the current L2 Tag 2835 state OR the DuTag state. */ 2836 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] 2837 is returned on the data bus starting at +32(and +96) bytes 2838 offset from the beginning of cacheline when an LDD 2839 (L1 load-miss) command is issued from a PP determined by 2840 the L2C_DBG[PPNUM] field. 2841 The selected L2 set# is determined by the L2C_DBG[SET] 2842 field, and the L2 index is determined from the 2843 phys_addr[15:7] associated with the LDD command. 2844 This 'L2 force-hit' will NOT alter the current L2 Tag 2845 state OR the DuTag state. 2846 NOTE: The diagnostic PP should issue a d-stream load 2847 to an aligned cacheline+0x20(+0x60) in order to have the 2848 return VDLUTAG information (in OW2/OW6) written directly 2849 into the proper PP register. The diagnostic PP should also 2850 flush it's local L1 cache after use(to ensure data 2851 coherency). 2852 NOTE: The position of the VDLUTAG data in the destination 2853 register is dependent on the endian mode(big/little). 2854 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2855 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2856 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2857 conditionally latched into the L2D_FSYN0/1 CSRs if an 2858 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 2859#else 2860 uint64_t l2t : 1; 2861 uint64_t l2d : 1; 2862 uint64_t finv : 1; 2863 uint64_t set : 3; 2864 uint64_t ppnum : 2; 2865 uint64_t reserved_8_9 : 2; 2866 uint64_t lfb_dmp : 1; 2867 uint64_t lfb_enum : 3; 2868 uint64_t reserved_14_63 : 50; 2869#endif 2870 } cn52xx; 2871 struct cvmx_l2c_dbg_cn52xx cn52xxp1; 2872 struct cvmx_l2c_dbg_s cn56xx; 2873 struct cvmx_l2c_dbg_s cn56xxp1; 2874 struct cvmx_l2c_dbg_s cn58xx; 2875 struct cvmx_l2c_dbg_s cn58xxp1; 2876}; 2877typedef union cvmx_l2c_dbg cvmx_l2c_dbg_t; 2878 2879/** 2880 * cvmx_l2c_dut 2881 * 2882 * L2C_DUT = L2C DUTAG Register 2883 * 2884 * Description: L2C Duplicate Tag State Register 2885 * 2886 * Notes: 2887 * (1) When using the L2T, L2D or FINV Debug probe feature, an LDD command issued by the diagnostic PP 2888 * WILL NOT update the DuTags. 2889 * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one enabled at a time). 2890 * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back 2891 * dirty data to memory to maintain coherency. (A side effect of FINV is that an LDD L2 fill is 2892 * launched which fills data into the L2 DS). 2893 */ 2894union cvmx_l2c_dut 2895{ 2896 uint64_t u64; 2897 struct cvmx_l2c_dut_s 2898 { 2899#if __BYTE_ORDER == __BIG_ENDIAN 2900 uint64_t reserved_32_63 : 32; 2901 uint64_t dtena : 1; /**< DuTag Diagnostic read enable. 2902 When L2C_DUT[DTENA]=1, all LDD(L1 load-miss) 2903 commands issued from the diagnostic PP 2904 (L2C_DBG[PPNUM]) will capture the DuTag state (V|L1TAG) 2905 of the PP#(specified in the LDD address[29:26] into 2906 the L2C_DUT CSR register. This allows the diagPP to 2907 read ALL DuTags (from any PP). 2908 The DuTag Set# to capture is extracted from the LDD 2909 address[25:20]. The diagnostic PP would issue the 2910 LDD then read the L2C_DUT register (one at a time). 2911 This LDD 'L2 force-hit' will NOT alter the current L2 2912 Tag State OR the DuTag state. 2913 NOTE: For CN58XX the DuTag SIZE has doubled (to 16KB) 2914 where each DuTag is organized as 2x 64-way entries. 2915 The LDD address[7] determines which 1(of-2) internal 2916 64-ways to select. 2917 The fill data is returned directly from the L2 Data 2918 Store(regardless of hit/miss) when an LDD command 2919 is issued from a PP determined by the L2C_DBG[PPNUM] 2920 field. The selected L2 Set# is determined by the 2921 L2C_DBG[SET] field, and the index is determined 2922 from the address[17:7] associated with the LDD 2923 command. 2924 This 'L2 force-hit' will NOT alter the current L2 Tag 2925 state OR the DuTag state. 2926 NOTE: In order for the DiagPP to generate an LDD command 2927 to the L2C, it must first force an L1 Dcache flush. */ 2928 uint64_t reserved_30_30 : 1; 2929 uint64_t dt_vld : 1; /**< Duplicate L1 Tag Valid bit latched in for previous 2930 LDD(L1 load-miss) command sourced by diagnostic PP. */ 2931 uint64_t dt_tag : 29; /**< Duplicate L1 Tag[35:7] latched in for previous 2932 LDD(L1 load-miss) command sourced by diagnostic PP. */ 2933#else 2934 uint64_t dt_tag : 29; 2935 uint64_t dt_vld : 1; 2936 uint64_t reserved_30_30 : 1; 2937 uint64_t dtena : 1; 2938 uint64_t reserved_32_63 : 32; 2939#endif 2940 } s; 2941 struct cvmx_l2c_dut_s cn30xx; 2942 struct cvmx_l2c_dut_s cn31xx; 2943 struct cvmx_l2c_dut_s cn38xx; 2944 struct cvmx_l2c_dut_s cn38xxp2; 2945 struct cvmx_l2c_dut_s cn50xx; 2946 struct cvmx_l2c_dut_s cn52xx; 2947 struct cvmx_l2c_dut_s cn52xxp1; 2948 struct cvmx_l2c_dut_s cn56xx; 2949 struct cvmx_l2c_dut_s cn56xxp1; 2950 struct cvmx_l2c_dut_s cn58xx; 2951 struct cvmx_l2c_dut_s cn58xxp1; 2952}; 2953typedef union cvmx_l2c_dut cvmx_l2c_dut_t; 2954 2955/** 2956 * cvmx_l2c_dut_map# 2957 * 2958 * L2C_DUT_MAP = L2C DUT memory map region 2959 * 2960 * Description: Address of the start of the region mapped to the duplicate tag. Can be used to read 2961 * and write the raw duplicate tag CAM. Writes should be used only with great care as they can easily 2962 * destroy the coherency of the memory system. In any case this region is expected to only be used 2963 * for debug. 2964 * 2965 * This base address should be combined with PP virtual ID, L1 way and L1 set to produce the final 2966 * address as follows: 2967 * addr<63:14> L2C_DUT_MAP<63:14> 2968 * addr<13:11> PP VID 2969 * addr<10:6> L1 way 2970 * addr<5:3> L1 set 2971 * addr<2:0> UNUSED 2972 * 2973 * Notes: 2974 * (1) The tag is 37:10 from the 38-bit OCTEON physical address after hole removal. (The hole is between DR0 2975 * and DR1. Remove the hole by subtracting 256MB from 38-bit OCTEON L2/DRAM physical addresses >= 512 MB.) 2976 */ 2977union cvmx_l2c_dut_mapx 2978{ 2979 uint64_t u64; 2980 struct cvmx_l2c_dut_mapx_s 2981 { 2982#if __BYTE_ORDER == __BIG_ENDIAN 2983 uint64_t reserved_38_63 : 26; 2984 uint64_t tag : 28; /**< The tag value (see Note 1) */ 2985 uint64_t reserved_1_9 : 9; 2986 uint64_t valid : 1; /**< The valid bit */ 2987#else 2988 uint64_t valid : 1; 2989 uint64_t reserved_1_9 : 9; 2990 uint64_t tag : 28; 2991 uint64_t reserved_38_63 : 26; 2992#endif 2993 } s; 2994 struct cvmx_l2c_dut_mapx_s cn63xx; 2995 struct cvmx_l2c_dut_mapx_s cn63xxp1; 2996}; 2997typedef union cvmx_l2c_dut_mapx cvmx_l2c_dut_mapx_t; 2998 2999/** 3000 * cvmx_l2c_err_tdt# 3001 * 3002 * L2C_ERR_TDT = L2C TAD DaTa Error Info 3003 * 3004 * 3005 * Notes: 3006 * (1) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields 3007 * are not associated with the errors currently logged by the status bits and should be ignored. 3008 * This can occur, for example, because of a race between a write to clear a DBE and a new, lower 3009 * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields 3010 * will still be locked, but the new SBE error status bit will still be set. 3011 * 3012 * (2) The four types of errors have differing priorities. Priority (from lowest to highest) is SBE, 3013 * VSBE, DBE, VDBE. A error will lock the WAYIDX, and SYN fields for other errors of equal or 3014 * lower priority until cleared by software. This means that the error information is always 3015 * (assuming the TYPE field matches) for the highest priority error logged in the status bits. 3016 * 3017 * (3) If VSBE or VDBE are set (and the TYPE field matches), the WAYIDX fields are valid and the 3018 * syndrome can be found in L2C_ERR_VBF. 3019 * 3020 * (4) The syndrome is recorded for DBE errors, though the utility of the value is not clear. 3021 */ 3022union cvmx_l2c_err_tdtx 3023{ 3024 uint64_t u64; 3025 struct cvmx_l2c_err_tdtx_s 3026 { 3027#if __BYTE_ORDER == __BIG_ENDIAN 3028 uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */ 3029 uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */ 3030 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3031 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3032 uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */ 3033 uint64_t reserved_21_49 : 29; 3034 uint64_t wayidx : 17; /**< Way, index, OW of the L2 block containing the error */ 3035 uint64_t reserved_2_3 : 2; 3036 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3037 0 - VSBE 3038 1 - VDBE 3039 2 - SBE 3040 3 - DBE */ 3041#else 3042 uint64_t type : 2; 3043 uint64_t reserved_2_3 : 2; 3044 uint64_t wayidx : 17; 3045 uint64_t reserved_21_49 : 29; 3046 uint64_t syn : 10; 3047 uint64_t vsbe : 1; 3048 uint64_t vdbe : 1; 3049 uint64_t sbe : 1; 3050 uint64_t dbe : 1; 3051#endif 3052 } s; 3053 struct cvmx_l2c_err_tdtx_s cn63xx; 3054 struct cvmx_l2c_err_tdtx_s cn63xxp1; 3055}; 3056typedef union cvmx_l2c_err_tdtx cvmx_l2c_err_tdtx_t; 3057 3058/** 3059 * cvmx_l2c_err_ttg# 3060 * 3061 * L2C_ERR_TTG = L2C TAD TaG Error Info 3062 * 3063 * 3064 * Notes: 3065 * (1) The priority of errors (highest to lowest) is DBE, SBE, NOWAY. An error will lock the SYN, and 3066 * WAYIDX fields for equal or lower priority errors until cleared by software. 3067 * 3068 * (2) The syndrome is recorded for DBE errors, though the utility of the value is not clear. 3069 * 3070 * (3) A NOWAY error does not change the value of the SYN field, and leaves WAYIDX[20:17] 3071 * unpredictable. WAYIDX[16:7] is the L2 block index associated with the command which had no way 3072 * to allocate. 3073 * 3074 * (4) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields 3075 * are not associated with the errors currently logged by the status bits and should be ignored. 3076 * This can occur, for example, because of a race between a write to clear a DBE and a new, lower 3077 * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields 3078 * will still be locked, but the new SBE error status bit will still be set. 3079 */ 3080union cvmx_l2c_err_ttgx 3081{ 3082 uint64_t u64; 3083 struct cvmx_l2c_err_ttgx_s 3084 { 3085#if __BYTE_ORDER == __BIG_ENDIAN 3086 uint64_t dbe : 1; /**< Double-Bit ECC error */ 3087 uint64_t sbe : 1; /**< Single-Bit ECC error */ 3088 uint64_t noway : 1; /**< No way was available for allocation. 3089 L2C sets NOWAY during its processing of a 3090 transaction whenever it needed/wanted to allocate 3091 a WAY in the L2 cache, but was unable to. NOWAY==1 3092 is (generally) not an indication that L2C failed to 3093 complete transactions. Rather, it is a hint of 3094 possible performance degradation. (For example, L2C 3095 must read-modify-write DRAM for every transaction 3096 that updates some, but not all, of the bytes in a 3097 cache block, misses in the L2 cache, and cannot 3098 allocate a WAY.) There is one "failure" case where 3099 L2C will set NOWAY: when it cannot leave a block 3100 locked in the L2 cache as part of a LCKL2 3101 transaction. */ 3102 uint64_t reserved_56_60 : 5; 3103 uint64_t syn : 6; /**< Syndrome for the single-bit error */ 3104 uint64_t reserved_21_49 : 29; 3105 uint64_t wayidx : 14; /**< Way and index of the L2 block containing the error */ 3106 uint64_t reserved_2_6 : 5; 3107 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3108 0 - not valid 3109 1 - NOWAY 3110 2 - SBE 3111 3 - DBE */ 3112#else 3113 uint64_t type : 2; 3114 uint64_t reserved_2_6 : 5; 3115 uint64_t wayidx : 14; 3116 uint64_t reserved_21_49 : 29; 3117 uint64_t syn : 6; 3118 uint64_t reserved_56_60 : 5; 3119 uint64_t noway : 1; 3120 uint64_t sbe : 1; 3121 uint64_t dbe : 1; 3122#endif 3123 } s; 3124 struct cvmx_l2c_err_ttgx_s cn63xx; 3125 struct cvmx_l2c_err_ttgx_s cn63xxp1; 3126}; 3127typedef union cvmx_l2c_err_ttgx cvmx_l2c_err_ttgx_t; 3128 3129/** 3130 * cvmx_l2c_err_vbf# 3131 * 3132 * L2C_ERR_VBF = L2C VBF Error Info 3133 * 3134 * 3135 * Notes: 3136 * (1) The way/index information is stored in L2C_ERR_TDT, assuming no later interrupt occurred to 3137 * overwrite the information. See the notes associated with L2C_ERR_TDT for full details. 3138 * 3139 * (2) The first VSBE will lock the register for other VSBE's. A VDBE, however, will overwrite a 3140 * previously logged VSBE. Once a VDBE has been logged all later errors will not be logged. This 3141 * means that if VDBE is set the information in the register is for the VDBE, if VDBE is clear and 3142 * VSBE is set the register contains information about the VSBE. 3143 * 3144 * (3) The syndrome is recorded for VDBE errors, though the utility of the value is not clear. 3145 * 3146 * (4) If the status bit corresponding to the value of the TYPE field is not set the SYN field is not 3147 * associated with the errors currently logged by the status bits and should be ignored. This can 3148 * occur, for example, because of a race between a write to clear a VDBE and a new, lower priority, 3149 * VSBE error occuring. If the VSBE arrives prior to the VDBE clear the SYN field will still be 3150 * locked, but the new VSBE error status bit will still be set. 3151 */ 3152union cvmx_l2c_err_vbfx 3153{ 3154 uint64_t u64; 3155 struct cvmx_l2c_err_vbfx_s 3156 { 3157#if __BYTE_ORDER == __BIG_ENDIAN 3158 uint64_t reserved_62_63 : 2; 3159 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3160 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3161 uint64_t vsyn : 10; /**< VBF syndrome (valid only if VSBE/VDBE is set) */ 3162 uint64_t reserved_2_49 : 48; 3163 uint64_t type : 2; /**< The type of error the SYN were latched for. 3164 0 - VSBE 3165 1 - VDBE */ 3166#else 3167 uint64_t type : 2; 3168 uint64_t reserved_2_49 : 48; 3169 uint64_t vsyn : 10; 3170 uint64_t vsbe : 1; 3171 uint64_t vdbe : 1; 3172 uint64_t reserved_62_63 : 2; 3173#endif 3174 } s; 3175 struct cvmx_l2c_err_vbfx_s cn63xx; 3176 struct cvmx_l2c_err_vbfx_s cn63xxp1; 3177}; 3178typedef union cvmx_l2c_err_vbfx cvmx_l2c_err_vbfx_t; 3179 3180/** 3181 * cvmx_l2c_err_xmc 3182 * 3183 * L2C_ERR_XMC = L2C XMC request error 3184 * 3185 * Description: records error information for HOLE*, BIG* and VRT* interrupts. 3186 * 3187 * Notes: 3188 * (1) The first BIGWR/HOLEWR/VRT* interrupt will lock the register until L2C_INT_REG[6:1] are 3189 * cleared. 3190 * 3191 * (2) ADDR<15:0> will always be zero for VRT* interrupts. 3192 * 3193 * (3) ADDR is the 38-bit OCTEON physical address after hole removal. (The hole is between DR0 3194 * and DR1. Remove the hole by subtracting 256MB from all 38-bit OCTEON L2/DRAM physical addresses 3195 * >= 512 MB.) 3196 * 3197 * (4) For 63xx pass 2.0 and all 68xx ADDR<15:0> will ALWAYS be zero. 3198 */ 3199union cvmx_l2c_err_xmc 3200{ 3201 uint64_t u64; 3202 struct cvmx_l2c_err_xmc_s 3203 { 3204#if __BYTE_ORDER == __BIG_ENDIAN 3205 uint64_t cmd : 6; /**< XMC command or request causing error */ 3206 uint64_t reserved_52_57 : 6; 3207 uint64_t sid : 4; /**< XMC sid of request causing error */ 3208 uint64_t reserved_38_47 : 10; 3209 uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */ 3210#else 3211 uint64_t addr : 38; 3212 uint64_t reserved_38_47 : 10; 3213 uint64_t sid : 4; 3214 uint64_t reserved_52_57 : 6; 3215 uint64_t cmd : 6; 3216#endif 3217 } s; 3218 struct cvmx_l2c_err_xmc_s cn63xx; 3219 struct cvmx_l2c_err_xmc_s cn63xxp1; 3220}; 3221typedef union cvmx_l2c_err_xmc cvmx_l2c_err_xmc_t; 3222 3223/** 3224 * cvmx_l2c_grpwrr0 3225 * 3226 * L2C_GRPWRR0 = L2C PP Weighted Round \#0 Register 3227 * 3228 * Description: Defines Weighted rounds(32) for Group PLC0,PLC1 3229 * 3230 * Notes: 3231 * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP 3232 * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). 3233 */ 3234union cvmx_l2c_grpwrr0 3235{ 3236 uint64_t u64; 3237 struct cvmx_l2c_grpwrr0_s 3238 { 3239#if __BYTE_ORDER == __BIG_ENDIAN 3240 uint64_t plc1rmsk : 32; /**< PLC1 Group#1 Weighted Round Mask 3241 Each bit represents 1 of 32 rounds 3242 for Group \#1's participation. When a 'round' bit is 3243 set, Group#1 is 'masked' and DOES NOT participate. 3244 When a 'round' bit is clear, Group#1 WILL 3245 participate in the arbitration for this round. */ 3246 uint64_t plc0rmsk : 32; /**< PLC Group#0 Weighted Round Mask 3247 Each bit represents 1 of 32 rounds 3248 for Group \#0's participation. When a 'round' bit is 3249 set, Group#0 is 'masked' and DOES NOT participate. 3250 When a 'round' bit is clear, Group#0 WILL 3251 participate in the arbitration for this round. */ 3252#else 3253 uint64_t plc0rmsk : 32; 3254 uint64_t plc1rmsk : 32; 3255#endif 3256 } s; 3257 struct cvmx_l2c_grpwrr0_s cn52xx; 3258 struct cvmx_l2c_grpwrr0_s cn52xxp1; 3259 struct cvmx_l2c_grpwrr0_s cn56xx; 3260 struct cvmx_l2c_grpwrr0_s cn56xxp1; 3261}; 3262typedef union cvmx_l2c_grpwrr0 cvmx_l2c_grpwrr0_t; 3263 3264/** 3265 * cvmx_l2c_grpwrr1 3266 * 3267 * L2C_GRPWRR1 = L2C PP Weighted Round \#1 Register 3268 * 3269 * Description: Defines Weighted Rounds(32) for Group PLC2,ILC 3270 * 3271 * Notes: 3272 * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP 3273 * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). 3274 */ 3275union cvmx_l2c_grpwrr1 3276{ 3277 uint64_t u64; 3278 struct cvmx_l2c_grpwrr1_s 3279 { 3280#if __BYTE_ORDER == __BIG_ENDIAN 3281 uint64_t ilcrmsk : 32; /**< ILC (IOB) Weighted Round Mask 3282 Each bit represents 1 of 32 rounds 3283 for IOB participation. When a 'round' bit is 3284 set, IOB is 'masked' and DOES NOT participate. 3285 When a 'round' bit is clear, IOB WILL 3286 participate in the arbitration for this round. */ 3287 uint64_t plc2rmsk : 32; /**< PLC Group#2 Weighted Round Mask 3288 Each bit represents 1 of 32 rounds 3289 for Group \#2's participation. When a 'round' bit is 3290 set, Group#2 is 'masked' and DOES NOT participate. 3291 When a 'round' bit is clear, Group#2 WILL 3292 participate in the arbitration for this round. */ 3293#else 3294 uint64_t plc2rmsk : 32; 3295 uint64_t ilcrmsk : 32; 3296#endif 3297 } s; 3298 struct cvmx_l2c_grpwrr1_s cn52xx; 3299 struct cvmx_l2c_grpwrr1_s cn52xxp1; 3300 struct cvmx_l2c_grpwrr1_s cn56xx; 3301 struct cvmx_l2c_grpwrr1_s cn56xxp1; 3302}; 3303typedef union cvmx_l2c_grpwrr1 cvmx_l2c_grpwrr1_t; 3304 3305/** 3306 * cvmx_l2c_int_en 3307 * 3308 * L2C_INT_EN = L2C Global Interrupt Enable Register 3309 * 3310 * Description: 3311 */ 3312union cvmx_l2c_int_en 3313{ 3314 uint64_t u64; 3315 struct cvmx_l2c_int_en_s 3316 { 3317#if __BYTE_ORDER == __BIG_ENDIAN 3318 uint64_t reserved_9_63 : 55; 3319 uint64_t lck2ena : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit 3320 NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA2] */ 3321 uint64_t lckena : 1; /**< L2 Tag Lock Error Interrupt Enable bit 3322 NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA] */ 3323 uint64_t l2ddeden : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit 3324 When set, allows interrupts to be reported on double bit 3325 (uncorrectable) errors from the L2 Data Arrays. 3326 NOTE: This is the 'same' bit as L2D_ERR[DED_INTENA] */ 3327 uint64_t l2dsecen : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit 3328 When set, allows interrupts to be reported on single bit 3329 (correctable) errors from the L2 Data Arrays. 3330 NOTE: This is the 'same' bit as L2D_ERR[SEC_INTENA] */ 3331 uint64_t l2tdeden : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt 3332 NOTE: This is the 'same' bit as L2T_ERR[DED_INTENA] */ 3333 uint64_t l2tsecen : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt 3334 Enable bit. When set, allows interrupts to be 3335 reported on single bit (correctable) errors from 3336 the L2 Tag Arrays. 3337 NOTE: This is the 'same' bit as L2T_ERR[SEC_INTENA] */ 3338 uint64_t oob3en : 1; /**< DMA Out of Bounds Interrupt Enable Range#3 */ 3339 uint64_t oob2en : 1; /**< DMA Out of Bounds Interrupt Enable Range#2 */ 3340 uint64_t oob1en : 1; /**< DMA Out of Bounds Interrupt Enable Range#1 */ 3341#else 3342 uint64_t oob1en : 1; 3343 uint64_t oob2en : 1; 3344 uint64_t oob3en : 1; 3345 uint64_t l2tsecen : 1; 3346 uint64_t l2tdeden : 1; 3347 uint64_t l2dsecen : 1; 3348 uint64_t l2ddeden : 1; 3349 uint64_t lckena : 1; 3350 uint64_t lck2ena : 1; 3351 uint64_t reserved_9_63 : 55; 3352#endif 3353 } s; 3354 struct cvmx_l2c_int_en_s cn52xx; 3355 struct cvmx_l2c_int_en_s cn52xxp1; 3356 struct cvmx_l2c_int_en_s cn56xx; 3357 struct cvmx_l2c_int_en_s cn56xxp1; 3358}; 3359typedef union cvmx_l2c_int_en cvmx_l2c_int_en_t; 3360 3361/** 3362 * cvmx_l2c_int_ena 3363 * 3364 * L2C_INT_ENA = L2C Interrupt Enable 3365 * 3366 */ 3367union cvmx_l2c_int_ena 3368{ 3369 uint64_t u64; 3370 struct cvmx_l2c_int_ena_s 3371 { 3372#if __BYTE_ORDER == __BIG_ENDIAN 3373 uint64_t reserved_8_63 : 56; 3374 uint64_t bigrd : 1; /**< Read reference past MAXDRAM enable */ 3375 uint64_t bigwr : 1; /**< Write reference past MAXDRAM enable */ 3376 uint64_t vrtpe : 1; /**< Virtualization memory parity error */ 3377 uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */ 3378 uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */ 3379 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */ 3380 uint64_t holewr : 1; /**< Write reference to 256MB hole enable */ 3381 uint64_t holerd : 1; /**< Read reference to 256MB hole enable */ 3382#else 3383 uint64_t holerd : 1; 3384 uint64_t holewr : 1; 3385 uint64_t vrtwr : 1; 3386 uint64_t vrtidrng : 1; 3387 uint64_t vrtadrng : 1; 3388 uint64_t vrtpe : 1; 3389 uint64_t bigwr : 1; 3390 uint64_t bigrd : 1; 3391 uint64_t reserved_8_63 : 56; 3392#endif 3393 } s; 3394 struct cvmx_l2c_int_ena_s cn63xx; 3395 struct cvmx_l2c_int_ena_cn63xxp1 3396 { 3397#if __BYTE_ORDER == __BIG_ENDIAN 3398 uint64_t reserved_6_63 : 58; 3399 uint64_t vrtpe : 1; /**< Virtualization memory parity error */ 3400 uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */ 3401 uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */ 3402 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */ 3403 uint64_t holewr : 1; /**< Write reference to 256MB hole enable */ 3404 uint64_t holerd : 1; /**< Read reference to 256MB hole enable */ 3405#else 3406 uint64_t holerd : 1; 3407 uint64_t holewr : 1; 3408 uint64_t vrtwr : 1; 3409 uint64_t vrtidrng : 1; 3410 uint64_t vrtadrng : 1; 3411 uint64_t vrtpe : 1; 3412 uint64_t reserved_6_63 : 58; 3413#endif 3414 } cn63xxp1; 3415}; 3416typedef union cvmx_l2c_int_ena cvmx_l2c_int_ena_t; 3417 3418/** 3419 * cvmx_l2c_int_reg 3420 * 3421 * L2C_INT_REG = L2C Interrupt Register 3422 * 3423 */ 3424union cvmx_l2c_int_reg 3425{ 3426 uint64_t u64; 3427 struct cvmx_l2c_int_reg_s 3428 { 3429#if __BYTE_ORDER == __BIG_ENDIAN 3430 uint64_t reserved_17_63 : 47; 3431 uint64_t tad0 : 1; /**< When set, the enabled interrupt is in either 3432 the L2C_ERR_TDT0 or L2C_ERR_TTG0 CSR */ 3433 uint64_t reserved_8_15 : 8; 3434 uint64_t bigrd : 1; /**< Read reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3435 uint64_t bigwr : 1; /**< Write reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3436 uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error 3437 Whenever an L2C_VRT_MEM read finds a parity error, 3438 that L2C_VRT_MEM cannot cause stores to be blocked. 3439 Software should correct the error. */ 3440 uint64_t vrtadrng : 1; /**< Address outside of virtualization range 3441 Set when a L2C_VRT_CTL[MEMSZ] violation blocked a 3442 store. 3443 L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */ 3444 uint64_t vrtidrng : 1; /**< Virtualization ID out of range 3445 Set when a L2C_VRT_CTL[NUMID] violation blocked a 3446 store. */ 3447 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write 3448 Set when L2C_VRT_MEM blocked a store. */ 3449 uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */ 3450 uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */ 3451#else 3452 uint64_t holerd : 1; 3453 uint64_t holewr : 1; 3454 uint64_t vrtwr : 1; 3455 uint64_t vrtidrng : 1; 3456 uint64_t vrtadrng : 1; 3457 uint64_t vrtpe : 1; 3458 uint64_t bigwr : 1; 3459 uint64_t bigrd : 1; 3460 uint64_t reserved_8_15 : 8; 3461 uint64_t tad0 : 1; 3462 uint64_t reserved_17_63 : 47; 3463#endif 3464 } s; 3465 struct cvmx_l2c_int_reg_s cn63xx; 3466 struct cvmx_l2c_int_reg_cn63xxp1 3467 { 3468#if __BYTE_ORDER == __BIG_ENDIAN 3469 uint64_t reserved_17_63 : 47; 3470 uint64_t tad0 : 1; /**< When set, the enabled interrupt is in either 3471 the L2C_ERR_TDT0 or L2C_ERR_TTG0 CSR */ 3472 uint64_t reserved_6_15 : 10; 3473 uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error 3474 Whenever an L2C_VRT_MEM read finds a parity error, 3475 that L2C_VRT_MEM cannot cause stores to be blocked. 3476 Software should correct the error. */ 3477 uint64_t vrtadrng : 1; /**< Address outside of virtualization range 3478 Set when a L2C_VRT_CTL[MEMSZ] violation blocked a 3479 store. 3480 L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */ 3481 uint64_t vrtidrng : 1; /**< Virtualization ID out of range 3482 Set when a L2C_VRT_CTL[NUMID] violation blocked a 3483 store. */ 3484 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write 3485 Set when L2C_VRT_MEM blocked a store. */ 3486 uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */ 3487 uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */ 3488#else 3489 uint64_t holerd : 1; 3490 uint64_t holewr : 1; 3491 uint64_t vrtwr : 1; 3492 uint64_t vrtidrng : 1; 3493 uint64_t vrtadrng : 1; 3494 uint64_t vrtpe : 1; 3495 uint64_t reserved_6_15 : 10; 3496 uint64_t tad0 : 1; 3497 uint64_t reserved_17_63 : 47; 3498#endif 3499 } cn63xxp1; 3500}; 3501typedef union cvmx_l2c_int_reg cvmx_l2c_int_reg_t; 3502 3503/** 3504 * cvmx_l2c_int_stat 3505 * 3506 * L2C_INT_STAT = L2C Global Interrupt Status Register 3507 * 3508 * Description: 3509 */ 3510union cvmx_l2c_int_stat 3511{ 3512 uint64_t u64; 3513 struct cvmx_l2c_int_stat_s 3514 { 3515#if __BYTE_ORDER == __BIG_ENDIAN 3516 uint64_t reserved_9_63 : 55; 3517 uint64_t lck2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n 3518 could not find an available/unlocked set (for 3519 replacement). 3520 Most likely, this is a result of SW mixing SET 3521 PARTITIONING with ADDRESS LOCKING. If SW allows 3522 another PP to LOCKDOWN all SETs available to PP#n, 3523 then a Rd/Wr Miss from PP#n will be unable 3524 to determine a 'valid' replacement set (since LOCKED 3525 addresses should NEVER be replaced). 3526 If such an event occurs, the HW will select the smallest 3527 available SET(specified by UMSK'x)' as the replacement 3528 set, and the address is unlocked. 3529 NOTE: This is the 'same' bit as L2T_ERR[LCKERR2] */ 3530 uint64_t lck : 1; /**< SW attempted to LOCK DOWN the last available set of 3531 the INDEX (which is ignored by HW - but reported to SW). 3532 The LDD(L1 load-miss) for the LOCK operation is completed 3533 successfully, however the address is NOT locked. 3534 NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] 3535 into account. For example, if diagnostic PPx has 3536 UMSKx defined to only use SETs [1:0], and SET1 had 3537 been previously LOCKED, then an attempt to LOCK the 3538 last available SET0 would result in a LCKERR. (This 3539 is to ensure that at least 1 SET at each INDEX is 3540 not LOCKED for general use by other PPs). 3541 NOTE: This is the 'same' bit as L2T_ERR[LCKERR] */ 3542 uint64_t l2dded : 1; /**< L2D Double Error detected (DED) 3543 NOTE: This is the 'same' bit as L2D_ERR[DED_ERR] */ 3544 uint64_t l2dsec : 1; /**< L2D Single Error corrected (SEC) 3545 NOTE: This is the 'same' bit as L2D_ERR[SEC_ERR] */ 3546 uint64_t l2tded : 1; /**< L2T Double Bit Error detected (DED) 3547 During every L2 Tag Probe, all 8 sets Tag's (at a 3548 given index) are checked for double bit errors(DBEs). 3549 This bit is set if ANY of the 8 sets contains a DBE. 3550 DBEs also generated an interrupt(if enabled). 3551 NOTE: This is the 'same' bit as L2T_ERR[DED_ERR] */ 3552 uint64_t l2tsec : 1; /**< L2T Single Bit Error corrected (SEC) status 3553 During every L2 Tag Probe, all 8 sets Tag's (at a 3554 given index) are checked for single bit errors(SBEs). 3555 This bit is set if ANY of the 8 sets contains an SBE. 3556 SBEs are auto corrected in HW and generate an 3557 interrupt(if enabled). 3558 NOTE: This is the 'same' bit as L2T_ERR[SEC_ERR] */ 3559 uint64_t oob3 : 1; /**< DMA Out of Bounds Interrupt Status Range#3 */ 3560 uint64_t oob2 : 1; /**< DMA Out of Bounds Interrupt Status Range#2 */ 3561 uint64_t oob1 : 1; /**< DMA Out of Bounds Interrupt Status Range#1 */ 3562#else 3563 uint64_t oob1 : 1; 3564 uint64_t oob2 : 1; 3565 uint64_t oob3 : 1; 3566 uint64_t l2tsec : 1; 3567 uint64_t l2tded : 1; 3568 uint64_t l2dsec : 1; 3569 uint64_t l2dded : 1; 3570 uint64_t lck : 1; 3571 uint64_t lck2 : 1; 3572 uint64_t reserved_9_63 : 55; 3573#endif 3574 } s; 3575 struct cvmx_l2c_int_stat_s cn52xx; 3576 struct cvmx_l2c_int_stat_s cn52xxp1; 3577 struct cvmx_l2c_int_stat_s cn56xx; 3578 struct cvmx_l2c_int_stat_s cn56xxp1; 3579}; 3580typedef union cvmx_l2c_int_stat cvmx_l2c_int_stat_t; 3581 3582/** 3583 * cvmx_l2c_ioc#_pfc 3584 * 3585 * L2C_IOC_PFC = L2C IOC Performance Counter(s) 3586 * 3587 */ 3588union cvmx_l2c_iocx_pfc 3589{ 3590 uint64_t u64; 3591 struct cvmx_l2c_iocx_pfc_s 3592 { 3593#if __BYTE_ORDER == __BIG_ENDIAN 3594 uint64_t count : 64; /**< Current counter value */ 3595#else 3596 uint64_t count : 64; 3597#endif 3598 } s; 3599 struct cvmx_l2c_iocx_pfc_s cn63xx; 3600 struct cvmx_l2c_iocx_pfc_s cn63xxp1; 3601}; 3602typedef union cvmx_l2c_iocx_pfc cvmx_l2c_iocx_pfc_t; 3603 3604/** 3605 * cvmx_l2c_ior#_pfc 3606 * 3607 * L2C_IOR_PFC = L2C IOR Performance Counter(s) 3608 * 3609 */ 3610union cvmx_l2c_iorx_pfc 3611{ 3612 uint64_t u64; 3613 struct cvmx_l2c_iorx_pfc_s 3614 { 3615#if __BYTE_ORDER == __BIG_ENDIAN 3616 uint64_t count : 64; /**< Current counter value */ 3617#else 3618 uint64_t count : 64; 3619#endif 3620 } s; 3621 struct cvmx_l2c_iorx_pfc_s cn63xx; 3622 struct cvmx_l2c_iorx_pfc_s cn63xxp1; 3623}; 3624typedef union cvmx_l2c_iorx_pfc cvmx_l2c_iorx_pfc_t; 3625 3626/** 3627 * cvmx_l2c_lckbase 3628 * 3629 * L2C_LCKBASE = L2C LockDown Base Register 3630 * 3631 * Description: L2C LockDown Base Register 3632 * 3633 * Notes: 3634 * (1) SW RESTRICTION \#1: SW must manage the L2 Data Store lockdown space such that at least 1 3635 * set per cache line remains in the 'unlocked' (normal) state to allow general caching operations. 3636 * If SW violates this restriction, a status bit is set (LCK_ERR) and an interrupt is posted. 3637 * [this limits the total lockdown space to 7/8ths of the total L2 data store = 896KB] 3638 * (2) IOB initiated LDI commands are ignored (only PP initiated LDI/LDD commands are considered 3639 * for lockdown). 3640 * (3) To 'unlock' a locked cache line, SW can use the FLUSH-INVAL CSR mechanism (see L2C_DBG[FINV]). 3641 * (4) LCK_ENA MUST only be activated when debug modes are disabled (L2C_DBG[L2T], L2C_DBG[L2D], L2C_DBG[FINV]). 3642 */ 3643union cvmx_l2c_lckbase 3644{ 3645 uint64_t u64; 3646 struct cvmx_l2c_lckbase_s 3647 { 3648#if __BYTE_ORDER == __BIG_ENDIAN 3649 uint64_t reserved_31_63 : 33; 3650 uint64_t lck_base : 27; /**< Base Memory block address[33:7]. Specifies the 3651 starting address of the lockdown region. */ 3652 uint64_t reserved_1_3 : 3; 3653 uint64_t lck_ena : 1; /**< L2 Cache Lock Enable 3654 When the LCK_ENA=1, all LDI(I-stream Load) or 3655 LDD(L1 load-miss) commands issued from the 3656 diagnostic PP (specified by the L2C_DBG[PPNUM]), 3657 which fall within a predefined lockdown address 3658 range (specified by: [lck_base:lck_base+lck_offset]) 3659 are LOCKED in the L2 cache. The LOCKED state is 3660 denoted using an explicit L2 Tag bit (L=1). 3661 If the LOCK request L2-Hits (on ANY SET), then data is 3662 returned from the L2 and the hit set is updated to the 3663 LOCKED state. NOTE: If the Hit Set# is outside the 3664 available sets for a given PP (see UMSK'x'), the 3665 the LOCK bit is still SET. If the programmer's intent 3666 is to explicitly LOCK addresses into 'available' sets, 3667 care must be taken to flush-invalidate the cache first 3668 (to avoid such situations). Not following this procedure 3669 can lead to LCKERR2 interrupts. 3670 If the LOCK request L2-Misses, a replacment set is 3671 chosen(from the available sets (UMSK'x'). 3672 If the replacement set contains a dirty-victim it is 3673 written back to memory. Memory read data is then written 3674 into the replacement set, and the replacment SET is 3675 updated to the LOCKED state(L=1). 3676 NOTE: SETs that contain LOCKED addresses are 3677 excluded from the replacement set selection algorithm. 3678 NOTE: The LDD command will allocate the DuTag as normal. 3679 NOTE: If L2C_CFG[IDXALIAS]=1, the address is 'aliased' first 3680 before being checked against the lockdown address 3681 range. To ensure an 'aliased' address is properly locked, 3682 it is recommmended that SW preload the 'aliased' locked adddress 3683 into the L2C_LCKBASE[LCK_BASE] register (while keeping 3684 L2C_LCKOFF[LCK_OFFSET]=0). 3685 NOTE: The OCTEON(N3) implementation only supports 16GB(MAX) of 3686 physical memory. Therefore, only byte address[33:0] are used 3687 (ie: address[35:34] are ignored). */ 3688#else 3689 uint64_t lck_ena : 1; 3690 uint64_t reserved_1_3 : 3; 3691 uint64_t lck_base : 27; 3692 uint64_t reserved_31_63 : 33; 3693#endif 3694 } s; 3695 struct cvmx_l2c_lckbase_s cn30xx; 3696 struct cvmx_l2c_lckbase_s cn31xx; 3697 struct cvmx_l2c_lckbase_s cn38xx; 3698 struct cvmx_l2c_lckbase_s cn38xxp2; 3699 struct cvmx_l2c_lckbase_s cn50xx; 3700 struct cvmx_l2c_lckbase_s cn52xx; 3701 struct cvmx_l2c_lckbase_s cn52xxp1; 3702 struct cvmx_l2c_lckbase_s cn56xx; 3703 struct cvmx_l2c_lckbase_s cn56xxp1; 3704 struct cvmx_l2c_lckbase_s cn58xx; 3705 struct cvmx_l2c_lckbase_s cn58xxp1; 3706}; 3707typedef union cvmx_l2c_lckbase cvmx_l2c_lckbase_t; 3708 3709/** 3710 * cvmx_l2c_lckoff 3711 * 3712 * L2C_LCKOFF = L2C LockDown OFFSET Register 3713 * 3714 * Description: L2C LockDown OFFSET Register 3715 * 3716 * Notes: 3717 * (1) The generation of the end lockdown block address will 'wrap'. 3718 * (2) The minimum granularity for lockdown is 1 cache line (= 128B block) 3719 */ 3720union cvmx_l2c_lckoff 3721{ 3722 uint64_t u64; 3723 struct cvmx_l2c_lckoff_s 3724 { 3725#if __BYTE_ORDER == __BIG_ENDIAN 3726 uint64_t reserved_10_63 : 54; 3727 uint64_t lck_offset : 10; /**< LockDown block Offset. Used in determining 3728 the ending block address of the lockdown 3729 region: 3730 End Lockdown block Address[33:7] = 3731 LCK_BASE[33:7]+LCK_OFFSET[9:0] */ 3732#else 3733 uint64_t lck_offset : 10; 3734 uint64_t reserved_10_63 : 54; 3735#endif 3736 } s; 3737 struct cvmx_l2c_lckoff_s cn30xx; 3738 struct cvmx_l2c_lckoff_s cn31xx; 3739 struct cvmx_l2c_lckoff_s cn38xx; 3740 struct cvmx_l2c_lckoff_s cn38xxp2; 3741 struct cvmx_l2c_lckoff_s cn50xx; 3742 struct cvmx_l2c_lckoff_s cn52xx; 3743 struct cvmx_l2c_lckoff_s cn52xxp1; 3744 struct cvmx_l2c_lckoff_s cn56xx; 3745 struct cvmx_l2c_lckoff_s cn56xxp1; 3746 struct cvmx_l2c_lckoff_s cn58xx; 3747 struct cvmx_l2c_lckoff_s cn58xxp1; 3748}; 3749typedef union cvmx_l2c_lckoff cvmx_l2c_lckoff_t; 3750 3751/** 3752 * cvmx_l2c_lfb0 3753 * 3754 * L2C_LFB0 = L2C LFB DEBUG 0 Register 3755 * 3756 * Description: L2C LFB Contents (Status Bits) 3757 */ 3758union cvmx_l2c_lfb0 3759{ 3760 uint64_t u64; 3761 struct cvmx_l2c_lfb0_s 3762 { 3763#if __BYTE_ORDER == __BIG_ENDIAN 3764 uint64_t reserved_32_63 : 32; 3765 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 3766 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 3767 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 3768 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 3769 uint64_t vam : 1; /**< Valid Full Address Match Status */ 3770 uint64_t inxt : 4; /**< Next LFB Pointer(invalid if ITL=1) */ 3771 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 3772 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 3773 uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 3774 uint64_t vabnum : 4; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 3775 uint64_t sid : 9; /**< LFB Source ID */ 3776 uint64_t cmd : 4; /**< LFB Command */ 3777 uint64_t vld : 1; /**< LFB Valid */ 3778#else 3779 uint64_t vld : 1; 3780 uint64_t cmd : 4; 3781 uint64_t sid : 9; 3782 uint64_t vabnum : 4; 3783 uint64_t set : 3; 3784 uint64_t ihd : 1; 3785 uint64_t itl : 1; 3786 uint64_t inxt : 4; 3787 uint64_t vam : 1; 3788 uint64_t stcfl : 1; 3789 uint64_t stinv : 1; 3790 uint64_t stpnd : 1; 3791 uint64_t stcpnd : 1; 3792 uint64_t reserved_32_63 : 32; 3793#endif 3794 } s; 3795 struct cvmx_l2c_lfb0_cn30xx 3796 { 3797#if __BYTE_ORDER == __BIG_ENDIAN 3798 uint64_t reserved_32_63 : 32; 3799 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 3800 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 3801 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 3802 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 3803 uint64_t vam : 1; /**< Valid Full Address Match Status */ 3804 uint64_t reserved_25_26 : 2; 3805 uint64_t inxt : 2; /**< Next LFB Pointer(invalid if ITL=1) */ 3806 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 3807 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 3808 uint64_t reserved_20_20 : 1; 3809 uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 3810 uint64_t reserved_16_17 : 2; 3811 uint64_t vabnum : 2; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 3812 uint64_t sid : 9; /**< LFB Source ID */ 3813 uint64_t cmd : 4; /**< LFB Command */ 3814 uint64_t vld : 1; /**< LFB Valid */ 3815#else 3816 uint64_t vld : 1; 3817 uint64_t cmd : 4; 3818 uint64_t sid : 9; 3819 uint64_t vabnum : 2; 3820 uint64_t reserved_16_17 : 2; 3821 uint64_t set : 2; 3822 uint64_t reserved_20_20 : 1; 3823 uint64_t ihd : 1; 3824 uint64_t itl : 1; 3825 uint64_t inxt : 2; 3826 uint64_t reserved_25_26 : 2; 3827 uint64_t vam : 1; 3828 uint64_t stcfl : 1; 3829 uint64_t stinv : 1; 3830 uint64_t stpnd : 1; 3831 uint64_t stcpnd : 1; 3832 uint64_t reserved_32_63 : 32; 3833#endif 3834 } cn30xx; 3835 struct cvmx_l2c_lfb0_cn31xx 3836 { 3837#if __BYTE_ORDER == __BIG_ENDIAN 3838 uint64_t reserved_32_63 : 32; 3839 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 3840 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 3841 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 3842 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 3843 uint64_t vam : 1; /**< Valid Full Address Match Status */ 3844 uint64_t reserved_26_26 : 1; 3845 uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ 3846 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 3847 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 3848 uint64_t reserved_20_20 : 1; 3849 uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 3850 uint64_t reserved_17_17 : 1; 3851 uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 3852 uint64_t sid : 9; /**< LFB Source ID */ 3853 uint64_t cmd : 4; /**< LFB Command */ 3854 uint64_t vld : 1; /**< LFB Valid */ 3855#else 3856 uint64_t vld : 1; 3857 uint64_t cmd : 4; 3858 uint64_t sid : 9; 3859 uint64_t vabnum : 3; 3860 uint64_t reserved_17_17 : 1; 3861 uint64_t set : 2; 3862 uint64_t reserved_20_20 : 1; 3863 uint64_t ihd : 1; 3864 uint64_t itl : 1; 3865 uint64_t inxt : 3; 3866 uint64_t reserved_26_26 : 1; 3867 uint64_t vam : 1; 3868 uint64_t stcfl : 1; 3869 uint64_t stinv : 1; 3870 uint64_t stpnd : 1; 3871 uint64_t stcpnd : 1; 3872 uint64_t reserved_32_63 : 32; 3873#endif 3874 } cn31xx; 3875 struct cvmx_l2c_lfb0_s cn38xx; 3876 struct cvmx_l2c_lfb0_s cn38xxp2; 3877 struct cvmx_l2c_lfb0_cn50xx 3878 { 3879#if __BYTE_ORDER == __BIG_ENDIAN 3880 uint64_t reserved_32_63 : 32; 3881 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 3882 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 3883 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 3884 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 3885 uint64_t vam : 1; /**< Valid Full Address Match Status */ 3886 uint64_t reserved_26_26 : 1; 3887 uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ 3888 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 3889 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 3890 uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 3891 uint64_t reserved_17_17 : 1; 3892 uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 3893 uint64_t sid : 9; /**< LFB Source ID */ 3894 uint64_t cmd : 4; /**< LFB Command */ 3895 uint64_t vld : 1; /**< LFB Valid */ 3896#else 3897 uint64_t vld : 1; 3898 uint64_t cmd : 4; 3899 uint64_t sid : 9; 3900 uint64_t vabnum : 3; 3901 uint64_t reserved_17_17 : 1; 3902 uint64_t set : 3; 3903 uint64_t ihd : 1; 3904 uint64_t itl : 1; 3905 uint64_t inxt : 3; 3906 uint64_t reserved_26_26 : 1; 3907 uint64_t vam : 1; 3908 uint64_t stcfl : 1; 3909 uint64_t stinv : 1; 3910 uint64_t stpnd : 1; 3911 uint64_t stcpnd : 1; 3912 uint64_t reserved_32_63 : 32; 3913#endif 3914 } cn50xx; 3915 struct cvmx_l2c_lfb0_cn50xx cn52xx; 3916 struct cvmx_l2c_lfb0_cn50xx cn52xxp1; 3917 struct cvmx_l2c_lfb0_s cn56xx; 3918 struct cvmx_l2c_lfb0_s cn56xxp1; 3919 struct cvmx_l2c_lfb0_s cn58xx; 3920 struct cvmx_l2c_lfb0_s cn58xxp1; 3921}; 3922typedef union cvmx_l2c_lfb0 cvmx_l2c_lfb0_t; 3923 3924/** 3925 * cvmx_l2c_lfb1 3926 * 3927 * L2C_LFB1 = L2C LFB DEBUG 1 Register 3928 * 3929 * Description: L2C LFB Contents (Wait Bits) 3930 */ 3931union cvmx_l2c_lfb1 3932{ 3933 uint64_t u64; 3934 struct cvmx_l2c_lfb1_s 3935 { 3936#if __BYTE_ORDER == __BIG_ENDIAN 3937 uint64_t reserved_19_63 : 45; 3938 uint64_t dsgoing : 1; /**< LFB DS Going (in flight) */ 3939 uint64_t bid : 2; /**< LFB DS Bid# */ 3940 uint64_t wtrsp : 1; /**< LFB Waiting for RSC Response [FILL,STRSP] completion */ 3941 uint64_t wtdw : 1; /**< LFB Waiting for DS-WR completion */ 3942 uint64_t wtdq : 1; /**< LFB Waiting for LFB-DQ */ 3943 uint64_t wtwhp : 1; /**< LFB Waiting for Write-Hit Partial L2 DS-WR completion */ 3944 uint64_t wtwhf : 1; /**< LFB Waiting for Write-Hit Full L2 DS-WR completion */ 3945 uint64_t wtwrm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ 3946 uint64_t wtstm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ 3947 uint64_t wtrda : 1; /**< LFB Waiting for Read-Miss L2 DS-WR completion */ 3948 uint64_t wtstdt : 1; /**< LFB Waiting for all ST write Data to arrive on XMD bus */ 3949 uint64_t wtstrsp : 1; /**< LFB Waiting for ST RSC/RSD to be issued on RSP 3950 (with invalidates) */ 3951 uint64_t wtstrsc : 1; /**< LFB Waiting for ST RSC-Only to be issued on RSP 3952 (no-invalidates) */ 3953 uint64_t wtvtm : 1; /**< LFB Waiting for Victim Read L2 DS-RD completion */ 3954 uint64_t wtmfl : 1; /**< LFB Waiting for Memory Fill completion to MRB */ 3955 uint64_t prbrty : 1; /**< Probe-Retry Detected - waiting for probe completion */ 3956 uint64_t wtprb : 1; /**< LFB Waiting for Probe */ 3957 uint64_t vld : 1; /**< LFB Valid */ 3958#else 3959 uint64_t vld : 1; 3960 uint64_t wtprb : 1; 3961 uint64_t prbrty : 1; 3962 uint64_t wtmfl : 1; 3963 uint64_t wtvtm : 1; 3964 uint64_t wtstrsc : 1; 3965 uint64_t wtstrsp : 1; 3966 uint64_t wtstdt : 1; 3967 uint64_t wtrda : 1; 3968 uint64_t wtstm : 1; 3969 uint64_t wtwrm : 1; 3970 uint64_t wtwhf : 1; 3971 uint64_t wtwhp : 1; 3972 uint64_t wtdq : 1; 3973 uint64_t wtdw : 1; 3974 uint64_t wtrsp : 1; 3975 uint64_t bid : 2; 3976 uint64_t dsgoing : 1; 3977 uint64_t reserved_19_63 : 45; 3978#endif 3979 } s; 3980 struct cvmx_l2c_lfb1_s cn30xx; 3981 struct cvmx_l2c_lfb1_s cn31xx; 3982 struct cvmx_l2c_lfb1_s cn38xx; 3983 struct cvmx_l2c_lfb1_s cn38xxp2; 3984 struct cvmx_l2c_lfb1_s cn50xx; 3985 struct cvmx_l2c_lfb1_s cn52xx; 3986 struct cvmx_l2c_lfb1_s cn52xxp1; 3987 struct cvmx_l2c_lfb1_s cn56xx; 3988 struct cvmx_l2c_lfb1_s cn56xxp1; 3989 struct cvmx_l2c_lfb1_s cn58xx; 3990 struct cvmx_l2c_lfb1_s cn58xxp1; 3991}; 3992typedef union cvmx_l2c_lfb1 cvmx_l2c_lfb1_t; 3993 3994/** 3995 * cvmx_l2c_lfb2 3996 * 3997 * L2C_LFB2 = L2C LFB DEBUG 2 Register 3998 * 3999 * Description: L2C LFB Contents Tag/Index 4000 */ 4001union cvmx_l2c_lfb2 4002{ 4003 uint64_t u64; 4004 struct cvmx_l2c_lfb2_s 4005 { 4006#if __BYTE_ORDER == __BIG_ENDIAN 4007 uint64_t reserved_0_63 : 64; 4008#else 4009 uint64_t reserved_0_63 : 64; 4010#endif 4011 } s; 4012 struct cvmx_l2c_lfb2_cn30xx 4013 { 4014#if __BYTE_ORDER == __BIG_ENDIAN 4015 uint64_t reserved_27_63 : 37; 4016 uint64_t lfb_tag : 19; /**< LFB TAG[33:15] */ 4017 uint64_t lfb_idx : 8; /**< LFB IDX[14:7] */ 4018#else 4019 uint64_t lfb_idx : 8; 4020 uint64_t lfb_tag : 19; 4021 uint64_t reserved_27_63 : 37; 4022#endif 4023 } cn30xx; 4024 struct cvmx_l2c_lfb2_cn31xx 4025 { 4026#if __BYTE_ORDER == __BIG_ENDIAN 4027 uint64_t reserved_27_63 : 37; 4028 uint64_t lfb_tag : 17; /**< LFB TAG[33:16] */ 4029 uint64_t lfb_idx : 10; /**< LFB IDX[15:7] */ 4030#else 4031 uint64_t lfb_idx : 10; 4032 uint64_t lfb_tag : 17; 4033 uint64_t reserved_27_63 : 37; 4034#endif 4035 } cn31xx; 4036 struct cvmx_l2c_lfb2_cn31xx cn38xx; 4037 struct cvmx_l2c_lfb2_cn31xx cn38xxp2; 4038 struct cvmx_l2c_lfb2_cn50xx 4039 { 4040#if __BYTE_ORDER == __BIG_ENDIAN 4041 uint64_t reserved_27_63 : 37; 4042 uint64_t lfb_tag : 20; /**< LFB TAG[33:14] */ 4043 uint64_t lfb_idx : 7; /**< LFB IDX[13:7] */ 4044#else 4045 uint64_t lfb_idx : 7; 4046 uint64_t lfb_tag : 20; 4047 uint64_t reserved_27_63 : 37; 4048#endif 4049 } cn50xx; 4050 struct cvmx_l2c_lfb2_cn52xx 4051 { 4052#if __BYTE_ORDER == __BIG_ENDIAN 4053 uint64_t reserved_27_63 : 37; 4054 uint64_t lfb_tag : 18; /**< LFB TAG[33:16] */ 4055 uint64_t lfb_idx : 9; /**< LFB IDX[15:7] */ 4056#else 4057 uint64_t lfb_idx : 9; 4058 uint64_t lfb_tag : 18; 4059 uint64_t reserved_27_63 : 37; 4060#endif 4061 } cn52xx; 4062 struct cvmx_l2c_lfb2_cn52xx cn52xxp1; 4063 struct cvmx_l2c_lfb2_cn56xx 4064 { 4065#if __BYTE_ORDER == __BIG_ENDIAN 4066 uint64_t reserved_27_63 : 37; 4067 uint64_t lfb_tag : 16; /**< LFB TAG[33:18] */ 4068 uint64_t lfb_idx : 11; /**< LFB IDX[17:7] */ 4069#else 4070 uint64_t lfb_idx : 11; 4071 uint64_t lfb_tag : 16; 4072 uint64_t reserved_27_63 : 37; 4073#endif 4074 } cn56xx; 4075 struct cvmx_l2c_lfb2_cn56xx cn56xxp1; 4076 struct cvmx_l2c_lfb2_cn56xx cn58xx; 4077 struct cvmx_l2c_lfb2_cn56xx cn58xxp1; 4078}; 4079typedef union cvmx_l2c_lfb2 cvmx_l2c_lfb2_t; 4080 4081/** 4082 * cvmx_l2c_lfb3 4083 * 4084 * L2C_LFB3 = L2C LFB DEBUG 3 Register 4085 * 4086 * Description: LFB High Water Mark Register 4087 */ 4088union cvmx_l2c_lfb3 4089{ 4090 uint64_t u64; 4091 struct cvmx_l2c_lfb3_s 4092 { 4093#if __BYTE_ORDER == __BIG_ENDIAN 4094 uint64_t reserved_5_63 : 59; 4095 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4096 When clear, all STP/C(store partials) will take 2 cycles 4097 to complete (power-on default). 4098 When set, all STP/C(store partials) will take 4 cycles 4099 to complete. 4100 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4101 uint64_t lfb_hwm : 4; /**< LFB High Water Mark 4102 Determines \#of LFB Entries in use before backpressure 4103 is asserted. 4104 HWM=0: 1 LFB Entry available 4105 - ... 4106 HWM=15: 16 LFB Entries available */ 4107#else 4108 uint64_t lfb_hwm : 4; 4109 uint64_t stpartdis : 1; 4110 uint64_t reserved_5_63 : 59; 4111#endif 4112 } s; 4113 struct cvmx_l2c_lfb3_cn30xx 4114 { 4115#if __BYTE_ORDER == __BIG_ENDIAN 4116 uint64_t reserved_5_63 : 59; 4117 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4118 When clear, all STP/C(store partials) will take 2 cycles 4119 to complete (power-on default). 4120 When set, all STP/C(store partials) will take 4 cycles 4121 to complete. 4122 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4123 uint64_t reserved_2_3 : 2; 4124 uint64_t lfb_hwm : 2; /**< LFB High Water Mark 4125 Determines \#of LFB Entries in use before backpressure 4126 is asserted. 4127 HWM=0: 1 LFB Entry available 4128 - ... 4129 HWM=3: 4 LFB Entries available */ 4130#else 4131 uint64_t lfb_hwm : 2; 4132 uint64_t reserved_2_3 : 2; 4133 uint64_t stpartdis : 1; 4134 uint64_t reserved_5_63 : 59; 4135#endif 4136 } cn30xx; 4137 struct cvmx_l2c_lfb3_cn31xx 4138 { 4139#if __BYTE_ORDER == __BIG_ENDIAN 4140 uint64_t reserved_5_63 : 59; 4141 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4142 When clear, all STP/C(store partials) will take 2 cycles 4143 to complete (power-on default). 4144 When set, all STP/C(store partials) will take 4 cycles 4145 to complete. 4146 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4147 uint64_t reserved_3_3 : 1; 4148 uint64_t lfb_hwm : 3; /**< LFB High Water Mark 4149 Determines \#of LFB Entries in use before backpressure 4150 is asserted. 4151 HWM=0: 1 LFB Entry available 4152 - ... 4153 HWM=7: 8 LFB Entries available */ 4154#else 4155 uint64_t lfb_hwm : 3; 4156 uint64_t reserved_3_3 : 1; 4157 uint64_t stpartdis : 1; 4158 uint64_t reserved_5_63 : 59; 4159#endif 4160 } cn31xx; 4161 struct cvmx_l2c_lfb3_s cn38xx; 4162 struct cvmx_l2c_lfb3_s cn38xxp2; 4163 struct cvmx_l2c_lfb3_cn31xx cn50xx; 4164 struct cvmx_l2c_lfb3_cn31xx cn52xx; 4165 struct cvmx_l2c_lfb3_cn31xx cn52xxp1; 4166 struct cvmx_l2c_lfb3_s cn56xx; 4167 struct cvmx_l2c_lfb3_s cn56xxp1; 4168 struct cvmx_l2c_lfb3_s cn58xx; 4169 struct cvmx_l2c_lfb3_s cn58xxp1; 4170}; 4171typedef union cvmx_l2c_lfb3 cvmx_l2c_lfb3_t; 4172 4173/** 4174 * cvmx_l2c_oob 4175 * 4176 * L2C_OOB = L2C Out of Bounds Global Enables 4177 * 4178 * Description: Defines DMA "Out of Bounds" global enables. 4179 */ 4180union cvmx_l2c_oob 4181{ 4182 uint64_t u64; 4183 struct cvmx_l2c_oob_s 4184 { 4185#if __BYTE_ORDER == __BIG_ENDIAN 4186 uint64_t reserved_2_63 : 62; 4187 uint64_t dwbena : 1; /**< DMA Out of Bounds Range Checker for DMA DWB 4188 commands (Don't WriteBack). 4189 When enabled, any DMA DWB commands which hit 1-of-3 4190 out of bounds regions will be logged into 4191 L2C_INT_STAT[OOB*] CSRs and the DMA store WILL 4192 NOT occur. If the corresponding L2C_INT_EN[OOB*] 4193 is enabled, an interrupt will also be reported. */ 4194 uint64_t stena : 1; /**< DMA Out of Bounds Range Checker for DMA store 4195 commands (STF/P/T). 4196 When enabled, any DMA store commands (STF/P/T) which 4197 hit 1-of-3 out of bounds regions will be logged into 4198 L2C_INT_STAT[OOB*] CSRs and the DMA store WILL 4199 NOT occur. If the corresponding L2C_INT_EN[OOB*] 4200 is enabled, an interrupt will also be reported. */ 4201#else 4202 uint64_t stena : 1; 4203 uint64_t dwbena : 1; 4204 uint64_t reserved_2_63 : 62; 4205#endif 4206 } s; 4207 struct cvmx_l2c_oob_s cn52xx; 4208 struct cvmx_l2c_oob_s cn52xxp1; 4209 struct cvmx_l2c_oob_s cn56xx; 4210 struct cvmx_l2c_oob_s cn56xxp1; 4211}; 4212typedef union cvmx_l2c_oob cvmx_l2c_oob_t; 4213 4214/** 4215 * cvmx_l2c_oob1 4216 * 4217 * L2C_OOB1 = L2C Out of Bounds Range Checker 4218 * 4219 * Description: Defines DMA "Out of Bounds" region \#1. If a DMA initiated write transaction generates an address 4220 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4221 */ 4222union cvmx_l2c_oob1 4223{ 4224 uint64_t u64; 4225 struct cvmx_l2c_oob1_s 4226 { 4227#if __BYTE_ORDER == __BIG_ENDIAN 4228 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4229 When L2C_INT_STAT[OOB1]=1, this field indicates the 4230 DMA cacheline address. 4231 (addr[33:7] = full cacheline address captured) 4232 NOTE: FADR is locked down until L2C_INT_STAT[OOB1] 4233 is cleared. */ 4234 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4235 When L2C_INT_STAT[OOB1]=1, this field indicates the 4236 type of DMA command. 4237 - 0: ST* (STF/P/T) 4238 - 1: DWB (Don't WriteBack) 4239 NOTE: FSRC is locked down until L2C_INT_STAT[OOB1] 4240 is cleared. */ 4241 uint64_t reserved_34_35 : 2; 4242 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4243 (1MB granularity) */ 4244 uint64_t reserved_14_19 : 6; 4245 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4246 (1MB granularity) 4247 Example: 0: 0MB / 1: 1MB 4248 The range check is for: 4249 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4250 SW NOTE: SADR+SIZE could be setup to potentially wrap 4251 the 34bit ending bounds address. */ 4252#else 4253 uint64_t size : 14; 4254 uint64_t reserved_14_19 : 6; 4255 uint64_t sadr : 14; 4256 uint64_t reserved_34_35 : 2; 4257 uint64_t fsrc : 1; 4258 uint64_t fadr : 27; 4259#endif 4260 } s; 4261 struct cvmx_l2c_oob1_s cn52xx; 4262 struct cvmx_l2c_oob1_s cn52xxp1; 4263 struct cvmx_l2c_oob1_s cn56xx; 4264 struct cvmx_l2c_oob1_s cn56xxp1; 4265}; 4266typedef union cvmx_l2c_oob1 cvmx_l2c_oob1_t; 4267 4268/** 4269 * cvmx_l2c_oob2 4270 * 4271 * L2C_OOB2 = L2C Out of Bounds Range Checker 4272 * 4273 * Description: Defines DMA "Out of Bounds" region \#2. If a DMA initiated write transaction generates an address 4274 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4275 */ 4276union cvmx_l2c_oob2 4277{ 4278 uint64_t u64; 4279 struct cvmx_l2c_oob2_s 4280 { 4281#if __BYTE_ORDER == __BIG_ENDIAN 4282 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4283 When L2C_INT_STAT[OOB2]=1, this field indicates the 4284 DMA cacheline address. 4285 (addr[33:7] = full cacheline address captured) 4286 NOTE: FADR is locked down until L2C_INT_STAT[OOB2] 4287 is cleared. */ 4288 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4289 When L2C_INT_STAT[OOB2]=1, this field indicates the 4290 type of DMA command. 4291 - 0: ST* (STF/P/T) 4292 - 1: DWB (Don't WriteBack) 4293 NOTE: FSRC is locked down until L2C_INT_STAT[OOB2] 4294 is cleared. */ 4295 uint64_t reserved_34_35 : 2; 4296 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4297 (1MB granularity) */ 4298 uint64_t reserved_14_19 : 6; 4299 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4300 (1MB granularity) 4301 Example: 0: 0MB / 1: 1MB 4302 The range check is for: 4303 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4304 SW NOTE: SADR+SIZE could be setup to potentially wrap 4305 the 34bit ending bounds address. */ 4306#else 4307 uint64_t size : 14; 4308 uint64_t reserved_14_19 : 6; 4309 uint64_t sadr : 14; 4310 uint64_t reserved_34_35 : 2; 4311 uint64_t fsrc : 1; 4312 uint64_t fadr : 27; 4313#endif 4314 } s; 4315 struct cvmx_l2c_oob2_s cn52xx; 4316 struct cvmx_l2c_oob2_s cn52xxp1; 4317 struct cvmx_l2c_oob2_s cn56xx; 4318 struct cvmx_l2c_oob2_s cn56xxp1; 4319}; 4320typedef union cvmx_l2c_oob2 cvmx_l2c_oob2_t; 4321 4322/** 4323 * cvmx_l2c_oob3 4324 * 4325 * L2C_OOB3 = L2C Out of Bounds Range Checker 4326 * 4327 * Description: Defines DMA "Out of Bounds" region \#3. If a DMA initiated write transaction generates an address 4328 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4329 */ 4330union cvmx_l2c_oob3 4331{ 4332 uint64_t u64; 4333 struct cvmx_l2c_oob3_s 4334 { 4335#if __BYTE_ORDER == __BIG_ENDIAN 4336 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4337 When L2C_INT_STAT[OOB3]=1, this field indicates the 4338 DMA cacheline address. 4339 (addr[33:7] = full cacheline address captured) 4340 NOTE: FADR is locked down until L2C_INT_STAT[00B3] 4341 is cleared. */ 4342 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4343 When L2C_INT_STAT[OOB3]=1, this field indicates the 4344 type of DMA command. 4345 - 0: ST* (STF/P/T) 4346 - 1: DWB (Don't WriteBack) 4347 NOTE: FSRC is locked down until L2C_INT_STAT[00B3] 4348 is cleared. */ 4349 uint64_t reserved_34_35 : 2; 4350 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4351 (1MB granularity) */ 4352 uint64_t reserved_14_19 : 6; 4353 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4354 (1MB granularity) 4355 Example: 0: 0MB / 1: 1MB 4356 The range check is for: 4357 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4358 SW NOTE: SADR+SIZE could be setup to potentially wrap 4359 the 34bit ending bounds address. */ 4360#else 4361 uint64_t size : 14; 4362 uint64_t reserved_14_19 : 6; 4363 uint64_t sadr : 14; 4364 uint64_t reserved_34_35 : 2; 4365 uint64_t fsrc : 1; 4366 uint64_t fadr : 27; 4367#endif 4368 } s; 4369 struct cvmx_l2c_oob3_s cn52xx; 4370 struct cvmx_l2c_oob3_s cn52xxp1; 4371 struct cvmx_l2c_oob3_s cn56xx; 4372 struct cvmx_l2c_oob3_s cn56xxp1; 4373}; 4374typedef union cvmx_l2c_oob3 cvmx_l2c_oob3_t; 4375 4376/** 4377 * cvmx_l2c_pfc# 4378 * 4379 * L2C_PFC0 = L2 Performance Counter \#0 4380 * 4381 * Description: 4382 */ 4383union cvmx_l2c_pfcx 4384{ 4385 uint64_t u64; 4386 struct cvmx_l2c_pfcx_s 4387 { 4388#if __BYTE_ORDER == __BIG_ENDIAN 4389 uint64_t reserved_36_63 : 28; 4390 uint64_t pfcnt0 : 36; /**< Performance Counter \#0 */ 4391#else 4392 uint64_t pfcnt0 : 36; 4393 uint64_t reserved_36_63 : 28; 4394#endif 4395 } s; 4396 struct cvmx_l2c_pfcx_s cn30xx; 4397 struct cvmx_l2c_pfcx_s cn31xx; 4398 struct cvmx_l2c_pfcx_s cn38xx; 4399 struct cvmx_l2c_pfcx_s cn38xxp2; 4400 struct cvmx_l2c_pfcx_s cn50xx; 4401 struct cvmx_l2c_pfcx_s cn52xx; 4402 struct cvmx_l2c_pfcx_s cn52xxp1; 4403 struct cvmx_l2c_pfcx_s cn56xx; 4404 struct cvmx_l2c_pfcx_s cn56xxp1; 4405 struct cvmx_l2c_pfcx_s cn58xx; 4406 struct cvmx_l2c_pfcx_s cn58xxp1; 4407}; 4408typedef union cvmx_l2c_pfcx cvmx_l2c_pfcx_t; 4409 4410/** 4411 * cvmx_l2c_pfctl 4412 * 4413 * L2C_PFCTL = L2 Performance Counter Control Register 4414 * 4415 * Description: Controls the actions of the 4 Performance Counters 4416 * 4417 * Notes: 4418 * - There are four 36b performance counter registers which can simultaneously count events. 4419 * Each Counter's event is programmably selected via the corresponding CNTxSEL field: 4420 * CNTxSEL[5:0] Event 4421 * -----------------+----------------------- 4422 * 0 | Cycles 4423 * 1 | L2 LDI Command Miss (NOTE: Both PP and IOB are cabable of generating LDI) 4424 * 2 | L2 LDI Command Hit (NOTE: Both PP and IOB are cabable of generating LDI) 4425 * 3 | L2 non-LDI Command Miss 4426 * 4 | L2 non-LDI Command Hit 4427 * 5 | L2 Miss (total) 4428 * 6 | L2 Hit (total) 4429 * 7 | L2 Victim Buffer Hit (Retry Probe) 4430 * 8 | LFB-NQ Index Conflict 4431 * 9 | L2 Tag Probe (issued - could be VB-Retried) 4432 * 10 | L2 Tag Update (completed - note: some CMD types do not update) 4433 * 11 | L2 Tag Probe Completed (beyond VB-RTY window) 4434 * 12 | L2 Tag Dirty Victim 4435 * 13 | L2 Data Store NOP 4436 * 14 | L2 Data Store READ 4437 * 15 | L2 Data Store WRITE 4438 * 16 | Memory Fill Data valid (1 strobe/32B) 4439 * 17 | Memory Write Request 4440 * 18 | Memory Read Request 4441 * 19 | Memory Write Data valid (1 strobe/32B) 4442 * 20 | XMC NOP (XMC Bus Idle) 4443 * 21 | XMC LDT (Load-Through Request) 4444 * 22 | XMC LDI (L2 Load I-Stream Request) 4445 * 23 | XMC LDD (L2 Load D-stream Request) 4446 * 24 | XMC STF (L2 Store Full cacheline Request) 4447 * 25 | XMC STT (L2 Store Through Request) 4448 * 26 | XMC STP (L2 Store Partial Request) 4449 * 27 | XMC STC (L2 Store Conditional Request) 4450 * 28 | XMC DWB (L2 Don't WriteBack Request) 4451 * 29 | XMC PL2 (L2 Prefetch Request) 4452 * 30 | XMC PSL1 (L1 Prefetch Request) 4453 * 31 | XMC IOBLD 4454 * 32 | XMC IOBST 4455 * 33 | XMC IOBDMA 4456 * 34 | XMC IOBRSP 4457 * 35 | XMD Bus valid (all) 4458 * 36 | XMD Bus valid (DST=L2C) Memory Data 4459 * 37 | XMD Bus valid (DST=IOB) REFL Data 4460 * 38 | XMD Bus valid (DST=PP) IOBRSP Data 4461 * 39 | RSC NOP 4462 * 40 | RSC STDN 4463 * 41 | RSC FILL 4464 * 42 | RSC REFL 4465 * 43 | RSC STIN 4466 * 44 | RSC SCIN 4467 * 45 | RSC SCFL 4468 * 46 | RSC SCDN 4469 * 47 | RSD Data Valid 4470 * 48 | RSD Data Valid (FILL) 4471 * 49 | RSD Data Valid (STRSP) 4472 * 50 | RSD Data Valid (REFL) 4473 * 51 | LRF-REQ (LFB-NQ) 4474 * 52 | DT RD-ALLOC (LDD/PSL1 Commands) 4475 * 53 | DT WR-INVAL (ST* Commands) 4476 */ 4477union cvmx_l2c_pfctl 4478{ 4479 uint64_t u64; 4480 struct cvmx_l2c_pfctl_s 4481 { 4482#if __BYTE_ORDER == __BIG_ENDIAN 4483 uint64_t reserved_36_63 : 28; 4484 uint64_t cnt3rdclr : 1; /**< Performance Counter 3 Read Clear 4485 When set, all CSR reads of the L2C_PFC3 4486 register will auto-clear the counter. This allows 4487 SW to maintain 'cumulative' counters in SW. 4488 NOTE: If the CSR read occurs in the same cycle as 4489 the 'event' to be counted, the counter will 4490 properly reflect the event. */ 4491 uint64_t cnt2rdclr : 1; /**< Performance Counter 2 Read Clear 4492 When set, all CSR reads of the L2C_PFC2 4493 register will auto-clear the counter. This allows 4494 SW to maintain 'cumulative' counters in SW. 4495 NOTE: If the CSR read occurs in the same cycle as 4496 the 'event' to be counted, the counter will 4497 properly reflect the event. */ 4498 uint64_t cnt1rdclr : 1; /**< Performance Counter 1 Read Clear 4499 When set, all CSR reads of the L2C_PFC1 4500 register will auto-clear the counter. This allows 4501 SW to maintain 'cumulative' counters in SW. 4502 NOTE: If the CSR read occurs in the same cycle as 4503 the 'event' to be counted, the counter will 4504 properly reflect the event. */ 4505 uint64_t cnt0rdclr : 1; /**< Performance Counter 0 Read Clear 4506 When set, all CSR reads of the L2C_PFC0 4507 register will 'auto-clear' the counter. This allows 4508 SW to maintain accurate 'cumulative' counters. 4509 NOTE: If the CSR read occurs in the same cycle as 4510 the 'event' to be counted, the counter will 4511 properly reflect the event. */ 4512 uint64_t cnt3ena : 1; /**< Performance Counter 3 Enable 4513 When this bit is set, the performance counter 4514 is enabled. */ 4515 uint64_t cnt3clr : 1; /**< Performance Counter 3 Clear 4516 When the CSR write occurs, if this bit is set, 4517 the performance counter is cleared. Otherwise, 4518 it will resume counting from its current value. */ 4519 uint64_t cnt3sel : 6; /**< Performance Counter 3 Event Selector 4520 (see list of selectable events to count in NOTES) */ 4521 uint64_t cnt2ena : 1; /**< Performance Counter 2 Enable 4522 When this bit is set, the performance counter 4523 is enabled. */ 4524 uint64_t cnt2clr : 1; /**< Performance Counter 2 Clear 4525 When the CSR write occurs, if this bit is set, 4526 the performance counter is cleared. Otherwise, 4527 it will resume counting from its current value. */ 4528 uint64_t cnt2sel : 6; /**< Performance Counter 2 Event Selector 4529 (see list of selectable events to count in NOTES) */ 4530 uint64_t cnt1ena : 1; /**< Performance Counter 1 Enable 4531 When this bit is set, the performance counter 4532 is enabled. */ 4533 uint64_t cnt1clr : 1; /**< Performance Counter 1 Clear 4534 When the CSR write occurs, if this bit is set, 4535 the performance counter is cleared. Otherwise, 4536 it will resume counting from its current value. */ 4537 uint64_t cnt1sel : 6; /**< Performance Counter 1 Event Selector 4538 (see list of selectable events to count in NOTES) */ 4539 uint64_t cnt0ena : 1; /**< Performance Counter 0 Enable 4540 When this bit is set, the performance counter 4541 is enabled. */ 4542 uint64_t cnt0clr : 1; /**< Performance Counter 0 Clear 4543 When the CSR write occurs, if this bit is set, 4544 the performance counter is cleared. Otherwise, 4545 it will resume counting from its current value. */ 4546 uint64_t cnt0sel : 6; /**< Performance Counter 0 Event Selector 4547 (see list of selectable events to count in NOTES) */ 4548#else 4549 uint64_t cnt0sel : 6; 4550 uint64_t cnt0clr : 1; 4551 uint64_t cnt0ena : 1; 4552 uint64_t cnt1sel : 6; 4553 uint64_t cnt1clr : 1; 4554 uint64_t cnt1ena : 1; 4555 uint64_t cnt2sel : 6; 4556 uint64_t cnt2clr : 1; 4557 uint64_t cnt2ena : 1; 4558 uint64_t cnt3sel : 6; 4559 uint64_t cnt3clr : 1; 4560 uint64_t cnt3ena : 1; 4561 uint64_t cnt0rdclr : 1; 4562 uint64_t cnt1rdclr : 1; 4563 uint64_t cnt2rdclr : 1; 4564 uint64_t cnt3rdclr : 1; 4565 uint64_t reserved_36_63 : 28; 4566#endif 4567 } s; 4568 struct cvmx_l2c_pfctl_s cn30xx; 4569 struct cvmx_l2c_pfctl_s cn31xx; 4570 struct cvmx_l2c_pfctl_s cn38xx; 4571 struct cvmx_l2c_pfctl_s cn38xxp2; 4572 struct cvmx_l2c_pfctl_s cn50xx; 4573 struct cvmx_l2c_pfctl_s cn52xx; 4574 struct cvmx_l2c_pfctl_s cn52xxp1; 4575 struct cvmx_l2c_pfctl_s cn56xx; 4576 struct cvmx_l2c_pfctl_s cn56xxp1; 4577 struct cvmx_l2c_pfctl_s cn58xx; 4578 struct cvmx_l2c_pfctl_s cn58xxp1; 4579}; 4580typedef union cvmx_l2c_pfctl cvmx_l2c_pfctl_t; 4581 4582/** 4583 * cvmx_l2c_ppgrp 4584 * 4585 * L2C_PPGRP = L2C PP Group Number 4586 * 4587 * Description: Defines the PP(Packet Processor) PLC Group \# (0,1,2) 4588 */ 4589union cvmx_l2c_ppgrp 4590{ 4591 uint64_t u64; 4592 struct cvmx_l2c_ppgrp_s 4593 { 4594#if __BYTE_ORDER == __BIG_ENDIAN 4595 uint64_t reserved_24_63 : 40; 4596 uint64_t pp11grp : 2; /**< PP11 PLC Group# (0,1,2) */ 4597 uint64_t pp10grp : 2; /**< PP10 PLC Group# (0,1,2) */ 4598 uint64_t pp9grp : 2; /**< PP9 PLC Group# (0,1,2) */ 4599 uint64_t pp8grp : 2; /**< PP8 PLC Group# (0,1,2) */ 4600 uint64_t pp7grp : 2; /**< PP7 PLC Group# (0,1,2) */ 4601 uint64_t pp6grp : 2; /**< PP6 PLC Group# (0,1,2) */ 4602 uint64_t pp5grp : 2; /**< PP5 PLC Group# (0,1,2) */ 4603 uint64_t pp4grp : 2; /**< PP4 PLC Group# (0,1,2) */ 4604 uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ 4605 uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ 4606 uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ 4607 uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ 4608#else 4609 uint64_t pp0grp : 2; 4610 uint64_t pp1grp : 2; 4611 uint64_t pp2grp : 2; 4612 uint64_t pp3grp : 2; 4613 uint64_t pp4grp : 2; 4614 uint64_t pp5grp : 2; 4615 uint64_t pp6grp : 2; 4616 uint64_t pp7grp : 2; 4617 uint64_t pp8grp : 2; 4618 uint64_t pp9grp : 2; 4619 uint64_t pp10grp : 2; 4620 uint64_t pp11grp : 2; 4621 uint64_t reserved_24_63 : 40; 4622#endif 4623 } s; 4624 struct cvmx_l2c_ppgrp_cn52xx 4625 { 4626#if __BYTE_ORDER == __BIG_ENDIAN 4627 uint64_t reserved_8_63 : 56; 4628 uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ 4629 uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ 4630 uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ 4631 uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ 4632#else 4633 uint64_t pp0grp : 2; 4634 uint64_t pp1grp : 2; 4635 uint64_t pp2grp : 2; 4636 uint64_t pp3grp : 2; 4637 uint64_t reserved_8_63 : 56; 4638#endif 4639 } cn52xx; 4640 struct cvmx_l2c_ppgrp_cn52xx cn52xxp1; 4641 struct cvmx_l2c_ppgrp_s cn56xx; 4642 struct cvmx_l2c_ppgrp_s cn56xxp1; 4643}; 4644typedef union cvmx_l2c_ppgrp cvmx_l2c_ppgrp_t; 4645 4646/** 4647 * cvmx_l2c_qos_iob# 4648 * 4649 * L2C_QOS_IOB = L2C IOB QOS level 4650 * 4651 * Description: 4652 */ 4653union cvmx_l2c_qos_iobx 4654{ 4655 uint64_t u64; 4656 struct cvmx_l2c_qos_iobx_s 4657 { 4658#if __BYTE_ORDER == __BIG_ENDIAN 4659 uint64_t reserved_6_63 : 58; 4660 uint64_t dwblvl : 2; /**< QOS level for DWB commands. */ 4661 uint64_t reserved_2_3 : 2; 4662 uint64_t lvl : 2; /**< QOS level for non-DWB commands. */ 4663#else 4664 uint64_t lvl : 2; 4665 uint64_t reserved_2_3 : 2; 4666 uint64_t dwblvl : 2; 4667 uint64_t reserved_6_63 : 58; 4668#endif 4669 } s; 4670 struct cvmx_l2c_qos_iobx_s cn63xx; 4671 struct cvmx_l2c_qos_iobx_s cn63xxp1; 4672}; 4673typedef union cvmx_l2c_qos_iobx cvmx_l2c_qos_iobx_t; 4674 4675/** 4676 * cvmx_l2c_qos_pp# 4677 * 4678 * L2C_QOS_PP = L2C PP QOS level 4679 * 4680 * Description: 4681 */ 4682union cvmx_l2c_qos_ppx 4683{ 4684 uint64_t u64; 4685 struct cvmx_l2c_qos_ppx_s 4686 { 4687#if __BYTE_ORDER == __BIG_ENDIAN 4688 uint64_t reserved_2_63 : 62; 4689 uint64_t lvl : 2; /**< QOS level to use for this PP. */ 4690#else 4691 uint64_t lvl : 2; 4692 uint64_t reserved_2_63 : 62; 4693#endif 4694 } s; 4695 struct cvmx_l2c_qos_ppx_s cn63xx; 4696 struct cvmx_l2c_qos_ppx_s cn63xxp1; 4697}; 4698typedef union cvmx_l2c_qos_ppx cvmx_l2c_qos_ppx_t; 4699 4700/** 4701 * cvmx_l2c_qos_wgt 4702 * 4703 * L2C_QOS_WGT = L2C QOS weights 4704 * 4705 */ 4706union cvmx_l2c_qos_wgt 4707{ 4708 uint64_t u64; 4709 struct cvmx_l2c_qos_wgt_s 4710 { 4711#if __BYTE_ORDER == __BIG_ENDIAN 4712 uint64_t reserved_32_63 : 32; 4713 uint64_t wgt3 : 8; /**< Weight for QOS level 3 */ 4714 uint64_t wgt2 : 8; /**< Weight for QOS level 2 */ 4715 uint64_t wgt1 : 8; /**< Weight for QOS level 1 */ 4716 uint64_t wgt0 : 8; /**< Weight for QOS level 0 */ 4717#else 4718 uint64_t wgt0 : 8; 4719 uint64_t wgt1 : 8; 4720 uint64_t wgt2 : 8; 4721 uint64_t wgt3 : 8; 4722 uint64_t reserved_32_63 : 32; 4723#endif 4724 } s; 4725 struct cvmx_l2c_qos_wgt_s cn63xx; 4726 struct cvmx_l2c_qos_wgt_s cn63xxp1; 4727}; 4728typedef union cvmx_l2c_qos_wgt cvmx_l2c_qos_wgt_t; 4729 4730/** 4731 * cvmx_l2c_rsc#_pfc 4732 * 4733 * L2C_RSC_PFC = L2C RSC Performance Counter(s) 4734 * 4735 */ 4736union cvmx_l2c_rscx_pfc 4737{ 4738 uint64_t u64; 4739 struct cvmx_l2c_rscx_pfc_s 4740 { 4741#if __BYTE_ORDER == __BIG_ENDIAN 4742 uint64_t count : 64; /**< Current counter value */ 4743#else 4744 uint64_t count : 64; 4745#endif 4746 } s; 4747 struct cvmx_l2c_rscx_pfc_s cn63xx; 4748 struct cvmx_l2c_rscx_pfc_s cn63xxp1; 4749}; 4750typedef union cvmx_l2c_rscx_pfc cvmx_l2c_rscx_pfc_t; 4751 4752/** 4753 * cvmx_l2c_rsd#_pfc 4754 * 4755 * L2C_RSD_PFC = L2C RSD Performance Counter(s) 4756 * 4757 */ 4758union cvmx_l2c_rsdx_pfc 4759{ 4760 uint64_t u64; 4761 struct cvmx_l2c_rsdx_pfc_s 4762 { 4763#if __BYTE_ORDER == __BIG_ENDIAN 4764 uint64_t count : 64; /**< Current counter value */ 4765#else 4766 uint64_t count : 64; 4767#endif 4768 } s; 4769 struct cvmx_l2c_rsdx_pfc_s cn63xx; 4770 struct cvmx_l2c_rsdx_pfc_s cn63xxp1; 4771}; 4772typedef union cvmx_l2c_rsdx_pfc cvmx_l2c_rsdx_pfc_t; 4773 4774/** 4775 * cvmx_l2c_spar0 4776 * 4777 * L2C_SPAR0 = L2 Set Partitioning Register (PP0-3) 4778 * 4779 * Description: L2 Set Partitioning Register 4780 * 4781 * Notes: 4782 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 4783 * set for replacement. 4784 * - There MUST ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 4785 * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers 4786 * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers 4787 */ 4788union cvmx_l2c_spar0 4789{ 4790 uint64_t u64; 4791 struct cvmx_l2c_spar0_s 4792 { 4793#if __BYTE_ORDER == __BIG_ENDIAN 4794 uint64_t reserved_32_63 : 32; 4795 uint64_t umsk3 : 8; /**< PP[3] L2 'DO NOT USE' set partition mask */ 4796 uint64_t umsk2 : 8; /**< PP[2] L2 'DO NOT USE' set partition mask */ 4797 uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ 4798 uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ 4799#else 4800 uint64_t umsk0 : 8; 4801 uint64_t umsk1 : 8; 4802 uint64_t umsk2 : 8; 4803 uint64_t umsk3 : 8; 4804 uint64_t reserved_32_63 : 32; 4805#endif 4806 } s; 4807 struct cvmx_l2c_spar0_cn30xx 4808 { 4809#if __BYTE_ORDER == __BIG_ENDIAN 4810 uint64_t reserved_4_63 : 60; 4811 uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ 4812#else 4813 uint64_t umsk0 : 4; 4814 uint64_t reserved_4_63 : 60; 4815#endif 4816 } cn30xx; 4817 struct cvmx_l2c_spar0_cn31xx 4818 { 4819#if __BYTE_ORDER == __BIG_ENDIAN 4820 uint64_t reserved_12_63 : 52; 4821 uint64_t umsk1 : 4; /**< PP[1] L2 'DO NOT USE' set partition mask */ 4822 uint64_t reserved_4_7 : 4; 4823 uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ 4824#else 4825 uint64_t umsk0 : 4; 4826 uint64_t reserved_4_7 : 4; 4827 uint64_t umsk1 : 4; 4828 uint64_t reserved_12_63 : 52; 4829#endif 4830 } cn31xx; 4831 struct cvmx_l2c_spar0_s cn38xx; 4832 struct cvmx_l2c_spar0_s cn38xxp2; 4833 struct cvmx_l2c_spar0_cn50xx 4834 { 4835#if __BYTE_ORDER == __BIG_ENDIAN 4836 uint64_t reserved_16_63 : 48; 4837 uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ 4838 uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ 4839#else 4840 uint64_t umsk0 : 8; 4841 uint64_t umsk1 : 8; 4842 uint64_t reserved_16_63 : 48; 4843#endif 4844 } cn50xx; 4845 struct cvmx_l2c_spar0_s cn52xx; 4846 struct cvmx_l2c_spar0_s cn52xxp1; 4847 struct cvmx_l2c_spar0_s cn56xx; 4848 struct cvmx_l2c_spar0_s cn56xxp1; 4849 struct cvmx_l2c_spar0_s cn58xx; 4850 struct cvmx_l2c_spar0_s cn58xxp1; 4851}; 4852typedef union cvmx_l2c_spar0 cvmx_l2c_spar0_t; 4853 4854/** 4855 * cvmx_l2c_spar1 4856 * 4857 * L2C_SPAR1 = L2 Set Partitioning Register (PP4-7) 4858 * 4859 * Description: L2 Set Partitioning Register 4860 * 4861 * Notes: 4862 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 4863 * set for replacement. 4864 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 4865 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 4866 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 4867 */ 4868union cvmx_l2c_spar1 4869{ 4870 uint64_t u64; 4871 struct cvmx_l2c_spar1_s 4872 { 4873#if __BYTE_ORDER == __BIG_ENDIAN 4874 uint64_t reserved_32_63 : 32; 4875 uint64_t umsk7 : 8; /**< PP[7] L2 'DO NOT USE' set partition mask */ 4876 uint64_t umsk6 : 8; /**< PP[6] L2 'DO NOT USE' set partition mask */ 4877 uint64_t umsk5 : 8; /**< PP[5] L2 'DO NOT USE' set partition mask */ 4878 uint64_t umsk4 : 8; /**< PP[4] L2 'DO NOT USE' set partition mask */ 4879#else 4880 uint64_t umsk4 : 8; 4881 uint64_t umsk5 : 8; 4882 uint64_t umsk6 : 8; 4883 uint64_t umsk7 : 8; 4884 uint64_t reserved_32_63 : 32; 4885#endif 4886 } s; 4887 struct cvmx_l2c_spar1_s cn38xx; 4888 struct cvmx_l2c_spar1_s cn38xxp2; 4889 struct cvmx_l2c_spar1_s cn56xx; 4890 struct cvmx_l2c_spar1_s cn56xxp1; 4891 struct cvmx_l2c_spar1_s cn58xx; 4892 struct cvmx_l2c_spar1_s cn58xxp1; 4893}; 4894typedef union cvmx_l2c_spar1 cvmx_l2c_spar1_t; 4895 4896/** 4897 * cvmx_l2c_spar2 4898 * 4899 * L2C_SPAR2 = L2 Set Partitioning Register (PP8-11) 4900 * 4901 * Description: L2 Set Partitioning Register 4902 * 4903 * Notes: 4904 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 4905 * set for replacement. 4906 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 4907 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 4908 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 4909 */ 4910union cvmx_l2c_spar2 4911{ 4912 uint64_t u64; 4913 struct cvmx_l2c_spar2_s 4914 { 4915#if __BYTE_ORDER == __BIG_ENDIAN 4916 uint64_t reserved_32_63 : 32; 4917 uint64_t umsk11 : 8; /**< PP[11] L2 'DO NOT USE' set partition mask */ 4918 uint64_t umsk10 : 8; /**< PP[10] L2 'DO NOT USE' set partition mask */ 4919 uint64_t umsk9 : 8; /**< PP[9] L2 'DO NOT USE' set partition mask */ 4920 uint64_t umsk8 : 8; /**< PP[8] L2 'DO NOT USE' set partition mask */ 4921#else 4922 uint64_t umsk8 : 8; 4923 uint64_t umsk9 : 8; 4924 uint64_t umsk10 : 8; 4925 uint64_t umsk11 : 8; 4926 uint64_t reserved_32_63 : 32; 4927#endif 4928 } s; 4929 struct cvmx_l2c_spar2_s cn38xx; 4930 struct cvmx_l2c_spar2_s cn38xxp2; 4931 struct cvmx_l2c_spar2_s cn56xx; 4932 struct cvmx_l2c_spar2_s cn56xxp1; 4933 struct cvmx_l2c_spar2_s cn58xx; 4934 struct cvmx_l2c_spar2_s cn58xxp1; 4935}; 4936typedef union cvmx_l2c_spar2 cvmx_l2c_spar2_t; 4937 4938/** 4939 * cvmx_l2c_spar3 4940 * 4941 * L2C_SPAR3 = L2 Set Partitioning Register (PP12-15) 4942 * 4943 * Description: L2 Set Partitioning Register 4944 * 4945 * Notes: 4946 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 4947 * set for replacement. 4948 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 4949 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 4950 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 4951 */ 4952union cvmx_l2c_spar3 4953{ 4954 uint64_t u64; 4955 struct cvmx_l2c_spar3_s 4956 { 4957#if __BYTE_ORDER == __BIG_ENDIAN 4958 uint64_t reserved_32_63 : 32; 4959 uint64_t umsk15 : 8; /**< PP[15] L2 'DO NOT USE' set partition mask */ 4960 uint64_t umsk14 : 8; /**< PP[14] L2 'DO NOT USE' set partition mask */ 4961 uint64_t umsk13 : 8; /**< PP[13] L2 'DO NOT USE' set partition mask */ 4962 uint64_t umsk12 : 8; /**< PP[12] L2 'DO NOT USE' set partition mask */ 4963#else 4964 uint64_t umsk12 : 8; 4965 uint64_t umsk13 : 8; 4966 uint64_t umsk14 : 8; 4967 uint64_t umsk15 : 8; 4968 uint64_t reserved_32_63 : 32; 4969#endif 4970 } s; 4971 struct cvmx_l2c_spar3_s cn38xx; 4972 struct cvmx_l2c_spar3_s cn38xxp2; 4973 struct cvmx_l2c_spar3_s cn58xx; 4974 struct cvmx_l2c_spar3_s cn58xxp1; 4975}; 4976typedef union cvmx_l2c_spar3 cvmx_l2c_spar3_t; 4977 4978/** 4979 * cvmx_l2c_spar4 4980 * 4981 * L2C_SPAR4 = L2 Set Partitioning Register (IOB) 4982 * 4983 * Description: L2 Set Partitioning Register 4984 * 4985 * Notes: 4986 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 4987 * set for replacement. 4988 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 4989 * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers 4990 * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers 4991 */ 4992union cvmx_l2c_spar4 4993{ 4994 uint64_t u64; 4995 struct cvmx_l2c_spar4_s 4996 { 4997#if __BYTE_ORDER == __BIG_ENDIAN 4998 uint64_t reserved_8_63 : 56; 4999 uint64_t umskiob : 8; /**< IOB L2 'DO NOT USE' set partition mask */ 5000#else 5001 uint64_t umskiob : 8; 5002 uint64_t reserved_8_63 : 56; 5003#endif 5004 } s; 5005 struct cvmx_l2c_spar4_cn30xx 5006 { 5007#if __BYTE_ORDER == __BIG_ENDIAN 5008 uint64_t reserved_4_63 : 60; 5009 uint64_t umskiob : 4; /**< IOB L2 'DO NOT USE' set partition mask */ 5010#else 5011 uint64_t umskiob : 4; 5012 uint64_t reserved_4_63 : 60; 5013#endif 5014 } cn30xx; 5015 struct cvmx_l2c_spar4_cn30xx cn31xx; 5016 struct cvmx_l2c_spar4_s cn38xx; 5017 struct cvmx_l2c_spar4_s cn38xxp2; 5018 struct cvmx_l2c_spar4_s cn50xx; 5019 struct cvmx_l2c_spar4_s cn52xx; 5020 struct cvmx_l2c_spar4_s cn52xxp1; 5021 struct cvmx_l2c_spar4_s cn56xx; 5022 struct cvmx_l2c_spar4_s cn56xxp1; 5023 struct cvmx_l2c_spar4_s cn58xx; 5024 struct cvmx_l2c_spar4_s cn58xxp1; 5025}; 5026typedef union cvmx_l2c_spar4 cvmx_l2c_spar4_t; 5027 5028/** 5029 * cvmx_l2c_tad#_ecc0 5030 * 5031 * L2C_TAD_ECC0 = L2C ECC logging 5032 * 5033 * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD 5034 */ 5035union cvmx_l2c_tadx_ecc0 5036{ 5037 uint64_t u64; 5038 struct cvmx_l2c_tadx_ecc0_s 5039 { 5040#if __BYTE_ORDER == __BIG_ENDIAN 5041 uint64_t reserved_58_63 : 6; 5042 uint64_t ow3ecc : 10; /**< ECC for OW3 of cache block */ 5043 uint64_t reserved_42_47 : 6; 5044 uint64_t ow2ecc : 10; /**< ECC for OW2 of cache block */ 5045 uint64_t reserved_26_31 : 6; 5046 uint64_t ow1ecc : 10; /**< ECC for OW1 of cache block */ 5047 uint64_t reserved_10_15 : 6; 5048 uint64_t ow0ecc : 10; /**< ECC for OW0 of cache block */ 5049#else 5050 uint64_t ow0ecc : 10; 5051 uint64_t reserved_10_15 : 6; 5052 uint64_t ow1ecc : 10; 5053 uint64_t reserved_26_31 : 6; 5054 uint64_t ow2ecc : 10; 5055 uint64_t reserved_42_47 : 6; 5056 uint64_t ow3ecc : 10; 5057 uint64_t reserved_58_63 : 6; 5058#endif 5059 } s; 5060 struct cvmx_l2c_tadx_ecc0_s cn63xx; 5061 struct cvmx_l2c_tadx_ecc0_s cn63xxp1; 5062}; 5063typedef union cvmx_l2c_tadx_ecc0 cvmx_l2c_tadx_ecc0_t; 5064 5065/** 5066 * cvmx_l2c_tad#_ecc1 5067 * 5068 * L2C_TAD_ECC1 = L2C ECC logging 5069 * 5070 * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD 5071 */ 5072union cvmx_l2c_tadx_ecc1 5073{ 5074 uint64_t u64; 5075 struct cvmx_l2c_tadx_ecc1_s 5076 { 5077#if __BYTE_ORDER == __BIG_ENDIAN 5078 uint64_t reserved_58_63 : 6; 5079 uint64_t ow7ecc : 10; /**< ECC for OW7 of cache block */ 5080 uint64_t reserved_42_47 : 6; 5081 uint64_t ow6ecc : 10; /**< ECC for OW6 of cache block */ 5082 uint64_t reserved_26_31 : 6; 5083 uint64_t ow5ecc : 10; /**< ECC for OW5 of cache block */ 5084 uint64_t reserved_10_15 : 6; 5085 uint64_t ow4ecc : 10; /**< ECC for OW4 of cache block */ 5086#else 5087 uint64_t ow4ecc : 10; 5088 uint64_t reserved_10_15 : 6; 5089 uint64_t ow5ecc : 10; 5090 uint64_t reserved_26_31 : 6; 5091 uint64_t ow6ecc : 10; 5092 uint64_t reserved_42_47 : 6; 5093 uint64_t ow7ecc : 10; 5094 uint64_t reserved_58_63 : 6; 5095#endif 5096 } s; 5097 struct cvmx_l2c_tadx_ecc1_s cn63xx; 5098 struct cvmx_l2c_tadx_ecc1_s cn63xxp1; 5099}; 5100typedef union cvmx_l2c_tadx_ecc1 cvmx_l2c_tadx_ecc1_t; 5101 5102/** 5103 * cvmx_l2c_tad#_ien 5104 * 5105 * L2C_TAD_IEN = L2C TAD Interrupt Enable 5106 * 5107 */ 5108union cvmx_l2c_tadx_ien 5109{ 5110 uint64_t u64; 5111 struct cvmx_l2c_tadx_ien_s 5112 { 5113#if __BYTE_ORDER == __BIG_ENDIAN 5114 uint64_t reserved_9_63 : 55; 5115 uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error enable 5116 Enables L2C_TADX_INT[WRDISLMC] to 5117 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5118 uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error enable 5119 Enables L2C_TADX_INT[RDDISLMC] to 5120 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5121 uint64_t noway : 1; /**< No way available interrupt enable 5122 Enables L2C_ERR_TTGX[NOWAY] to assert 5123 L2C_INT_REG[TADX] (and cause an interrupt) */ 5124 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable 5125 Enables L2C_ERR_TDTX[VSBE] to assert 5126 L2C_INT_REG[TADX] (and cause an interrupt) */ 5127 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable 5128 Enables L2C_ERR_TDTX[VSBE] to assert 5129 L2C_INT_REG[TADX] (and cause an interrupt) */ 5130 uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable 5131 Enables L2C_ERR_TTGX[DBE] to assert 5132 L2C_INT_REG[TADX] (and cause an interrupt) */ 5133 uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable 5134 Enables L2C_ERR_TTGX[SBE] to assert 5135 L2C_INT_REG[TADX] (and cause an interrupt) */ 5136 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable 5137 Enables L2C_ERR_TDTX[DBE] to assert 5138 L2C_INT_REG[TADX] (and cause an interrupt) */ 5139 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable 5140 Enables L2C_ERR_TDTX[SBE] to assert 5141 L2C_INT_REG[TADX] (and cause an interrupt) */ 5142#else 5143 uint64_t l2dsbe : 1; 5144 uint64_t l2ddbe : 1; 5145 uint64_t tagsbe : 1; 5146 uint64_t tagdbe : 1; 5147 uint64_t vbfsbe : 1; 5148 uint64_t vbfdbe : 1; 5149 uint64_t noway : 1; 5150 uint64_t rddislmc : 1; 5151 uint64_t wrdislmc : 1; 5152 uint64_t reserved_9_63 : 55; 5153#endif 5154 } s; 5155 struct cvmx_l2c_tadx_ien_s cn63xx; 5156 struct cvmx_l2c_tadx_ien_cn63xxp1 5157 { 5158#if __BYTE_ORDER == __BIG_ENDIAN 5159 uint64_t reserved_7_63 : 57; 5160 uint64_t noway : 1; /**< No way available interrupt enable 5161 Enables L2C_ERR_TTGX[NOWAY] to assert 5162 L2C_INT_REG[TADX] (and cause an interrupt) */ 5163 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable 5164 Enables L2C_ERR_TDTX[VSBE] to assert 5165 L2C_INT_REG[TADX] (and cause an interrupt) */ 5166 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable 5167 Enables L2C_ERR_TDTX[VSBE] to assert 5168 L2C_INT_REG[TADX] (and cause an interrupt) */ 5169 uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable 5170 Enables L2C_ERR_TTGX[DBE] to assert 5171 L2C_INT_REG[TADX] (and cause an interrupt) */ 5172 uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable 5173 Enables L2C_ERR_TTGX[SBE] to assert 5174 L2C_INT_REG[TADX] (and cause an interrupt) */ 5175 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable 5176 Enables L2C_ERR_TDTX[DBE] to assert 5177 L2C_INT_REG[TADX] (and cause an interrupt) */ 5178 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable 5179 Enables L2C_ERR_TDTX[SBE] to assert 5180 L2C_INT_REG[TADX] (and cause an interrupt) */ 5181#else 5182 uint64_t l2dsbe : 1; 5183 uint64_t l2ddbe : 1; 5184 uint64_t tagsbe : 1; 5185 uint64_t tagdbe : 1; 5186 uint64_t vbfsbe : 1; 5187 uint64_t vbfdbe : 1; 5188 uint64_t noway : 1; 5189 uint64_t reserved_7_63 : 57; 5190#endif 5191 } cn63xxp1; 5192}; 5193typedef union cvmx_l2c_tadx_ien cvmx_l2c_tadx_ien_t; 5194 5195/** 5196 * cvmx_l2c_tad#_int 5197 * 5198 * L2C_TAD_INT = L2C TAD Interrupt Register (not present in pass 1 O63) 5199 * 5200 * 5201 * Notes: 5202 * L2C_TAD_IEN is the interrupt enable register corresponding to this register. 5203 * 5204 */ 5205union cvmx_l2c_tadx_int 5206{ 5207 uint64_t u64; 5208 struct cvmx_l2c_tadx_int_s 5209 { 5210#if __BYTE_ORDER == __BIG_ENDIAN 5211 uint64_t reserved_9_63 : 55; 5212 uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error 5213 A DRAM write arrived before the LMC(s) were enabled */ 5214 uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error 5215 A DRAM read arrived before the LMC(s) were enabled */ 5216 uint64_t noway : 1; /**< No way available interrupt 5217 Shadow copy of L2C_ERR_TTGX[NOWAY] 5218 Writes of 1 also clear L2C_ERR_TTGX[NOWAY] */ 5219 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error 5220 Shadow copy of L2C_ERR_TDTX[VDBE] 5221 Writes of 1 also clear L2C_ERR_TDTX[VDBE] */ 5222 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error 5223 Shadow copy of L2C_ERR_TDTX[VSBE] 5224 Writes of 1 also clear L2C_ERR_TDTX[VSBE] */ 5225 uint64_t tagdbe : 1; /**< TAG Double-Bit Error 5226 Shadow copy of L2C_ERR_TTGX[DBE] 5227 Writes of 1 also clear L2C_ERR_TTGX[DBE] */ 5228 uint64_t tagsbe : 1; /**< TAG Single-Bit Error 5229 Shadow copy of L2C_ERR_TTGX[SBE] 5230 Writes of 1 also clear L2C_ERR_TTGX[SBE] */ 5231 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error 5232 Shadow copy of L2C_ERR_TDTX[DBE] 5233 Writes of 1 also clear L2C_ERR_TDTX[DBE] */ 5234 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error 5235 Shadow copy of L2C_ERR_TDTX[SBE] 5236 Writes of 1 also clear L2C_ERR_TDTX[SBE] */ 5237#else 5238 uint64_t l2dsbe : 1; 5239 uint64_t l2ddbe : 1; 5240 uint64_t tagsbe : 1; 5241 uint64_t tagdbe : 1; 5242 uint64_t vbfsbe : 1; 5243 uint64_t vbfdbe : 1; 5244 uint64_t noway : 1; 5245 uint64_t rddislmc : 1; 5246 uint64_t wrdislmc : 1; 5247 uint64_t reserved_9_63 : 55; 5248#endif 5249 } s; 5250 struct cvmx_l2c_tadx_int_s cn63xx; 5251}; 5252typedef union cvmx_l2c_tadx_int cvmx_l2c_tadx_int_t; 5253 5254/** 5255 * cvmx_l2c_tad#_pfc0 5256 * 5257 * L2C_TAD_PFC0 = L2C TAD Performance Counter 0 5258 * 5259 */ 5260union cvmx_l2c_tadx_pfc0 5261{ 5262 uint64_t u64; 5263 struct cvmx_l2c_tadx_pfc0_s 5264 { 5265#if __BYTE_ORDER == __BIG_ENDIAN 5266 uint64_t count : 64; /**< Current counter value */ 5267#else 5268 uint64_t count : 64; 5269#endif 5270 } s; 5271 struct cvmx_l2c_tadx_pfc0_s cn63xx; 5272 struct cvmx_l2c_tadx_pfc0_s cn63xxp1; 5273}; 5274typedef union cvmx_l2c_tadx_pfc0 cvmx_l2c_tadx_pfc0_t; 5275 5276/** 5277 * cvmx_l2c_tad#_pfc1 5278 * 5279 * L2C_TAD_PFC1 = L2C TAD Performance Counter 1 5280 * 5281 */ 5282union cvmx_l2c_tadx_pfc1 5283{ 5284 uint64_t u64; 5285 struct cvmx_l2c_tadx_pfc1_s 5286 { 5287#if __BYTE_ORDER == __BIG_ENDIAN 5288 uint64_t count : 64; /**< Current counter value */ 5289#else 5290 uint64_t count : 64; 5291#endif 5292 } s; 5293 struct cvmx_l2c_tadx_pfc1_s cn63xx; 5294 struct cvmx_l2c_tadx_pfc1_s cn63xxp1; 5295}; 5296typedef union cvmx_l2c_tadx_pfc1 cvmx_l2c_tadx_pfc1_t; 5297 5298/** 5299 * cvmx_l2c_tad#_pfc2 5300 * 5301 * L2C_TAD_PFC2 = L2C TAD Performance Counter 2 5302 * 5303 */ 5304union cvmx_l2c_tadx_pfc2 5305{ 5306 uint64_t u64; 5307 struct cvmx_l2c_tadx_pfc2_s 5308 { 5309#if __BYTE_ORDER == __BIG_ENDIAN 5310 uint64_t count : 64; /**< Current counter value */ 5311#else 5312 uint64_t count : 64; 5313#endif 5314 } s; 5315 struct cvmx_l2c_tadx_pfc2_s cn63xx; 5316 struct cvmx_l2c_tadx_pfc2_s cn63xxp1; 5317}; 5318typedef union cvmx_l2c_tadx_pfc2 cvmx_l2c_tadx_pfc2_t; 5319 5320/** 5321 * cvmx_l2c_tad#_pfc3 5322 * 5323 * L2C_TAD_PFC3 = L2C TAD Performance Counter 3 5324 * 5325 */ 5326union cvmx_l2c_tadx_pfc3 5327{ 5328 uint64_t u64; 5329 struct cvmx_l2c_tadx_pfc3_s 5330 { 5331#if __BYTE_ORDER == __BIG_ENDIAN 5332 uint64_t count : 64; /**< Current counter value */ 5333#else 5334 uint64_t count : 64; 5335#endif 5336 } s; 5337 struct cvmx_l2c_tadx_pfc3_s cn63xx; 5338 struct cvmx_l2c_tadx_pfc3_s cn63xxp1; 5339}; 5340typedef union cvmx_l2c_tadx_pfc3 cvmx_l2c_tadx_pfc3_t; 5341 5342/** 5343 * cvmx_l2c_tad#_prf 5344 * 5345 * L2C_TAD_PRF = L2C TAD Performance Counter Control 5346 * 5347 * 5348 * Notes: 5349 * (1) All four counters are equivalent and can use any of the defined selects. 5350 * 5351 * (2) the CNTnSEL legal values are: 5352 * 0x00 -- Nothing (disabled) 5353 * 0x01 -- L2 Tag Hit 5354 * 0x02 -- L2 Tag Miss 5355 * 0x03 -- L2 Tag NoAlloc (forced no-allocate) 5356 * 0x04 -- L2 Victim 5357 * 0x05 -- SC Fail 5358 * 0x06 -- SC Pass 5359 * 0x07 -- LFB Occupancy (each cycle adds \# of LFBs valid) 5360 * 0x08 -- LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs) 5361 * 0x09 -- LFB Wait VAB (each cycle adds \# LFBs waiting for VAB) 5362 * 0x80 -- Quad 0 index bus inuse 5363 * 0x81 -- Quad 0 read data bus inuse 5364 * 0x82 -- Quad 0 \# banks inuse (0-4/cycle) 5365 * 0x83 -- Quad 0 wdat flops inuse (0-4/cycle) 5366 * 0x90 -- Quad 1 index bus inuse 5367 * 0x91 -- Quad 1 read data bus inuse 5368 * 0x92 -- Quad 1 \# banks inuse (0-4/cycle) 5369 * 0x93 -- Quad 1 wdat flops inuse (0-4/cycle) 5370 * 0xA0 -- Quad 2 index bus inuse 5371 * 0xA1 -- Quad 2 read data bus inuse 5372 * 0xA2 -- Quad 2 \# banks inuse (0-4/cycle) 5373 * 0xA3 -- Quad 2 wdat flops inuse (0-4/cycle) 5374 * 0xB0 -- Quad 3 index bus inuse 5375 * 0xB1 -- Quad 3 read data bus inuse 5376 * 0xB2 -- Quad 3 \# banks inuse (0-4/cycle) 5377 * 0xB3 -- Quad 3 wdat flops inuse (0-4/cycle) 5378 */ 5379union cvmx_l2c_tadx_prf 5380{ 5381 uint64_t u64; 5382 struct cvmx_l2c_tadx_prf_s 5383 { 5384#if __BYTE_ORDER == __BIG_ENDIAN 5385 uint64_t reserved_32_63 : 32; 5386 uint64_t cnt3sel : 8; /**< Selects event to count for L2C_TAD_PFC3 */ 5387 uint64_t cnt2sel : 8; /**< Selects event to count for L2C_TAD_PFC2 */ 5388 uint64_t cnt1sel : 8; /**< Selects event to count for L2C_TAD_PFC1 */ 5389 uint64_t cnt0sel : 8; /**< Selects event to count for L2C_TAD_PFC0 */ 5390#else 5391 uint64_t cnt0sel : 8; 5392 uint64_t cnt1sel : 8; 5393 uint64_t cnt2sel : 8; 5394 uint64_t cnt3sel : 8; 5395 uint64_t reserved_32_63 : 32; 5396#endif 5397 } s; 5398 struct cvmx_l2c_tadx_prf_s cn63xx; 5399 struct cvmx_l2c_tadx_prf_s cn63xxp1; 5400}; 5401typedef union cvmx_l2c_tadx_prf cvmx_l2c_tadx_prf_t; 5402 5403/** 5404 * cvmx_l2c_tad#_tag 5405 * 5406 * L2C_TAD_TAG = L2C tag data 5407 * 5408 * Description: holds the tag information for LTGL2I and STGL2I commands 5409 * 5410 * Notes: 5411 * (1) For 63xx TAG[35] must be written zero for STGL2I's or operation is undefined. During normal 5412 * operation, TAG[35] will also read 0. 5413 * 5414 * (2) If setting the LOCK bit, the USE bit should also be set or operation is undefined. 5415 * 5416 * (3) The tag is the corresponding bits from the L2C+LMC internal L2/DRAM byte address. 5417 */ 5418union cvmx_l2c_tadx_tag 5419{ 5420 uint64_t u64; 5421 struct cvmx_l2c_tadx_tag_s 5422 { 5423#if __BYTE_ORDER == __BIG_ENDIAN 5424 uint64_t reserved_46_63 : 18; 5425 uint64_t ecc : 6; /**< The tag ECC */ 5426 uint64_t reserved_36_39 : 4; 5427 uint64_t tag : 19; /**< The tag (see notes 1 and 3) */ 5428 uint64_t reserved_4_16 : 13; 5429 uint64_t use : 1; /**< The LRU use bit */ 5430 uint64_t valid : 1; /**< The valid bit */ 5431 uint64_t dirty : 1; /**< The dirty bit */ 5432 uint64_t lock : 1; /**< The lock bit */ 5433#else 5434 uint64_t lock : 1; 5435 uint64_t dirty : 1; 5436 uint64_t valid : 1; 5437 uint64_t use : 1; 5438 uint64_t reserved_4_16 : 13; 5439 uint64_t tag : 19; 5440 uint64_t reserved_36_39 : 4; 5441 uint64_t ecc : 6; 5442 uint64_t reserved_46_63 : 18; 5443#endif 5444 } s; 5445 struct cvmx_l2c_tadx_tag_s cn63xx; 5446 struct cvmx_l2c_tadx_tag_s cn63xxp1; 5447}; 5448typedef union cvmx_l2c_tadx_tag cvmx_l2c_tadx_tag_t; 5449 5450/** 5451 * cvmx_l2c_ver_id 5452 * 5453 * L2C_VER_ID = L2C Virtualization ID Error Register 5454 * 5455 * Description: records virtualization IDs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 5456 */ 5457union cvmx_l2c_ver_id 5458{ 5459 uint64_t u64; 5460 struct cvmx_l2c_ver_id_s 5461 { 5462#if __BYTE_ORDER == __BIG_ENDIAN 5463 uint64_t mask : 64; /**< Mask of virtualization IDs which had an error */ 5464#else 5465 uint64_t mask : 64; 5466#endif 5467 } s; 5468 struct cvmx_l2c_ver_id_s cn63xx; 5469 struct cvmx_l2c_ver_id_s cn63xxp1; 5470}; 5471typedef union cvmx_l2c_ver_id cvmx_l2c_ver_id_t; 5472 5473/** 5474 * cvmx_l2c_ver_iob 5475 * 5476 * L2C_VER_IOB = L2C Virtualization ID IOB Error Register 5477 * 5478 * Description: records IOBs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 5479 */ 5480union cvmx_l2c_ver_iob 5481{ 5482 uint64_t u64; 5483 struct cvmx_l2c_ver_iob_s 5484 { 5485#if __BYTE_ORDER == __BIG_ENDIAN 5486 uint64_t reserved_1_63 : 63; 5487 uint64_t mask : 1; /**< Mask of IOBs which had a virtualization error */ 5488#else 5489 uint64_t mask : 1; 5490 uint64_t reserved_1_63 : 63; 5491#endif 5492 } s; 5493 struct cvmx_l2c_ver_iob_s cn63xx; 5494 struct cvmx_l2c_ver_iob_s cn63xxp1; 5495}; 5496typedef union cvmx_l2c_ver_iob cvmx_l2c_ver_iob_t; 5497 5498/** 5499 * cvmx_l2c_ver_msc 5500 * 5501 * L2C_VER_MSC = L2C Virtualization Miscellaneous Error Register (not in 63xx pass 1.x) 5502 * 5503 * Description: records type of command associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts 5504 */ 5505union cvmx_l2c_ver_msc 5506{ 5507 uint64_t u64; 5508 struct cvmx_l2c_ver_msc_s 5509 { 5510#if __BYTE_ORDER == __BIG_ENDIAN 5511 uint64_t reserved_2_63 : 62; 5512 uint64_t invl2 : 1; /**< If set, a INVL2 caused HOLEWR/BIGWR/VRT* to set */ 5513 uint64_t dwb : 1; /**< If set, a DWB caused HOLEWR/BIGWR/VRT* to set */ 5514#else 5515 uint64_t dwb : 1; 5516 uint64_t invl2 : 1; 5517 uint64_t reserved_2_63 : 62; 5518#endif 5519 } s; 5520 struct cvmx_l2c_ver_msc_s cn63xx; 5521}; 5522typedef union cvmx_l2c_ver_msc cvmx_l2c_ver_msc_t; 5523 5524/** 5525 * cvmx_l2c_ver_pp 5526 * 5527 * L2C_VER_PP = L2C Virtualization ID PP Error Register 5528 * 5529 * Description: records PPs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 5530 */ 5531union cvmx_l2c_ver_pp 5532{ 5533 uint64_t u64; 5534 struct cvmx_l2c_ver_pp_s 5535 { 5536#if __BYTE_ORDER == __BIG_ENDIAN 5537 uint64_t reserved_6_63 : 58; 5538 uint64_t mask : 6; /**< Mask of PPs which had a virtualization error */ 5539#else 5540 uint64_t mask : 6; 5541 uint64_t reserved_6_63 : 58; 5542#endif 5543 } s; 5544 struct cvmx_l2c_ver_pp_s cn63xx; 5545 struct cvmx_l2c_ver_pp_s cn63xxp1; 5546}; 5547typedef union cvmx_l2c_ver_pp cvmx_l2c_ver_pp_t; 5548 5549/** 5550 * cvmx_l2c_virtid_iob# 5551 * 5552 * L2C_VIRTID_IOB = L2C IOB virtualization ID 5553 * 5554 * Description: 5555 */ 5556union cvmx_l2c_virtid_iobx 5557{ 5558 uint64_t u64; 5559 struct cvmx_l2c_virtid_iobx_s 5560 { 5561#if __BYTE_ORDER == __BIG_ENDIAN 5562 uint64_t reserved_14_63 : 50; 5563 uint64_t dwbid : 6; /**< Virtualization ID to use for DWB commands */ 5564 uint64_t reserved_6_7 : 2; 5565 uint64_t id : 6; /**< Virtualization ID to use for non-DWB commands */ 5566#else 5567 uint64_t id : 6; 5568 uint64_t reserved_6_7 : 2; 5569 uint64_t dwbid : 6; 5570 uint64_t reserved_14_63 : 50; 5571#endif 5572 } s; 5573 struct cvmx_l2c_virtid_iobx_s cn63xx; 5574 struct cvmx_l2c_virtid_iobx_s cn63xxp1; 5575}; 5576typedef union cvmx_l2c_virtid_iobx cvmx_l2c_virtid_iobx_t; 5577 5578/** 5579 * cvmx_l2c_virtid_pp# 5580 * 5581 * L2C_VIRTID_PP = L2C PP virtualization ID 5582 * 5583 * Description: 5584 */ 5585union cvmx_l2c_virtid_ppx 5586{ 5587 uint64_t u64; 5588 struct cvmx_l2c_virtid_ppx_s 5589 { 5590#if __BYTE_ORDER == __BIG_ENDIAN 5591 uint64_t reserved_6_63 : 58; 5592 uint64_t id : 6; /**< Virtualization ID to use for this PP. */ 5593#else 5594 uint64_t id : 6; 5595 uint64_t reserved_6_63 : 58; 5596#endif 5597 } s; 5598 struct cvmx_l2c_virtid_ppx_s cn63xx; 5599 struct cvmx_l2c_virtid_ppx_s cn63xxp1; 5600}; 5601typedef union cvmx_l2c_virtid_ppx cvmx_l2c_virtid_ppx_t; 5602 5603/** 5604 * cvmx_l2c_vrt_ctl 5605 * 5606 * L2C_VRT_CTL = L2C Virtualization control register 5607 * 5608 */ 5609union cvmx_l2c_vrt_ctl 5610{ 5611 uint64_t u64; 5612 struct cvmx_l2c_vrt_ctl_s 5613 { 5614#if __BYTE_ORDER == __BIG_ENDIAN 5615 uint64_t reserved_9_63 : 55; 5616 uint64_t ooberr : 1; /**< Whether out of bounds writes are an error 5617 Determines virtualization hardware behavior for 5618 a store to an L2/DRAM address larger than 5619 indicated by MEMSZ. If OOBERR is set, all these 5620 stores (from any virtualization ID) are blocked. If 5621 OOBERR is clear, none of these stores are blocked. */ 5622 uint64_t reserved_7_7 : 1; 5623 uint64_t memsz : 3; /**< Memory space coverage of L2C_VRT_MEM (encoded) 5624 0 = 1GB 5625 1 = 2GB 5626 2 = 4GB 5627 3 = 8GB 5628 4 = 16GB 5629 5 = 32GB 5630 6 = 64GB (**reserved in 63xx**) 5631 7 = 128GB (**reserved in 63xx**) */ 5632 uint64_t numid : 3; /**< Number of allowed virtualization IDs (encoded) 5633 0 = 2 5634 1 = 4 5635 2 = 8 5636 3 = 16 5637 4 = 32 5638 5 = 64 5639 6,7 illegal 5640 Violations of this limit causes 5641 L2C to set L2C_INT_REG[VRTIDRNG]. */ 5642 uint64_t enable : 1; /**< Global virtualization enable 5643 When ENABLE is clear, stores are never blocked by 5644 the L2C virtualization hardware and none of NUMID, 5645 MEMSZ, OOBERR are used. */ 5646#else 5647 uint64_t enable : 1; 5648 uint64_t numid : 3; 5649 uint64_t memsz : 3; 5650 uint64_t reserved_7_7 : 1; 5651 uint64_t ooberr : 1; 5652 uint64_t reserved_9_63 : 55; 5653#endif 5654 } s; 5655 struct cvmx_l2c_vrt_ctl_s cn63xx; 5656 struct cvmx_l2c_vrt_ctl_s cn63xxp1; 5657}; 5658typedef union cvmx_l2c_vrt_ctl cvmx_l2c_vrt_ctl_t; 5659 5660/** 5661 * cvmx_l2c_vrt_mem# 5662 * 5663 * L2C_VRT_MEM = L2C Virtualization Memory 5664 * 5665 * Description: Virtualization memory mapped region. There are 1024 32b 5666 * byte-parity protected entries. 5667 * 5668 * Notes: 5669 * When a DATA bit is set in L2C_VRT_MEM when L2C virtualization is enabled, L2C 5670 * prevents the selected virtual machine from storing to the selected L2/DRAM region. 5671 * L2C uses L2C_VRT_MEM to block stores when: 5672 * - L2C_VRT_CTL[ENABLE] is set, and 5673 * - the address of the store exists in L2C+LMC internal L2/DRAM Address space 5674 * and is within the L2C_VRT_CTL[MEMSZ] bounds, and 5675 * - the virtID of the store is within the L2C_VRT_CTL[NUMID] bounds 5676 * 5677 * L2C_VRT_MEM is never used for these L2C transactions which are always allowed: 5678 * - L2C CMI L2/DRAM transactions that cannot modify L2/DRAM, and 5679 * - any L2/DRAM transaction originated from L2C_XMC_CMD 5680 * 5681 * L2C_VRT_MEM contains one DATA bit per L2C+LMC internal L2/DRAM region and virtID indicating whether the store 5682 * to the region is allowed. The granularity of the checking is the region size, which is: 5683 * 2 ^^ (L2C_VRT_CTL[NUMID]+L2C_VRT_CTL[MEMSZ]+16) 5684 * which ranges from a minimum of 64KB to a maximum of 256MB, depending on the size 5685 * of L2/DRAM that is protected and the number of virtual machines. 5686 * 5687 * The L2C_VRT_MEM DATA bit that L2C uses is: 5688 * 5689 * l2c_vrt_mem_bit_index = address >> (L2C_VRT_CTL[MEMSZ]+L2C_VRT_CTL[NUMID]+16); // address is a byte address 5690 * l2c_vrt_mem_bit_index = l2c_vrt_mem_bit_index | (virtID << (14-L2C_VRT_CTL[NUMID])); 5691 * 5692 * L2C_VRT_MEM(l2c_vrt_mem_bit_index >> 5)[DATA<l2c_vrt_mem_bit_index & 0x1F>] is used 5693 * 5694 * A specific example: 5695 * 5696 * L2C_VRT_CTL[NUMID]=2 (i.e. 8 virtual machine ID's used) 5697 * L2C_VRT_CTL[MEMSZ]=4 (i.e. L2C_VRT_MEM covers 16 GB) 5698 * 5699 * L2/DRAM region size (granularity) is 4MB 5700 * 5701 * l2c_vrt_mem_bit_index<14:12> = virtID<2:0> 5702 * l2c_vrt_mem_bit_index<11:0> = address<33:22> 5703 * 5704 * For L2/DRAM physical address 0x51000000 with virtID=5: 5705 * L2C_VRT_MEM648[DATA<4>] determines when the store is allowed (648 is decimal, not hex) 5706 */ 5707union cvmx_l2c_vrt_memx 5708{ 5709 uint64_t u64; 5710 struct cvmx_l2c_vrt_memx_s 5711 { 5712#if __BYTE_ORDER == __BIG_ENDIAN 5713 uint64_t reserved_36_63 : 28; 5714 uint64_t parity : 4; /**< Parity to write into (or read from) the 5715 virtualization memory. 5716 PARITY<i> is the even parity of DATA<(i*8)+7:i*8> */ 5717 uint64_t data : 32; /**< Data to write into (or read from) the 5718 virtualization memory. */ 5719#else 5720 uint64_t data : 32; 5721 uint64_t parity : 4; 5722 uint64_t reserved_36_63 : 28; 5723#endif 5724 } s; 5725 struct cvmx_l2c_vrt_memx_s cn63xx; 5726 struct cvmx_l2c_vrt_memx_s cn63xxp1; 5727}; 5728typedef union cvmx_l2c_vrt_memx cvmx_l2c_vrt_memx_t; 5729 5730/** 5731 * cvmx_l2c_wpar_iob# 5732 * 5733 * L2C_WPAR_IOB = L2C IOB way partitioning 5734 * 5735 * 5736 * Notes: 5737 * (1) The read value of MASK will include bits set because of the L2C cripple fuses. 5738 * 5739 */ 5740union cvmx_l2c_wpar_iobx 5741{ 5742 uint64_t u64; 5743 struct cvmx_l2c_wpar_iobx_s 5744 { 5745#if __BYTE_ORDER == __BIG_ENDIAN 5746 uint64_t reserved_16_63 : 48; 5747 uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */ 5748#else 5749 uint64_t mask : 16; 5750 uint64_t reserved_16_63 : 48; 5751#endif 5752 } s; 5753 struct cvmx_l2c_wpar_iobx_s cn63xx; 5754 struct cvmx_l2c_wpar_iobx_s cn63xxp1; 5755}; 5756typedef union cvmx_l2c_wpar_iobx cvmx_l2c_wpar_iobx_t; 5757 5758/** 5759 * cvmx_l2c_wpar_pp# 5760 * 5761 * L2C_WPAR_PP = L2C PP way partitioning 5762 * 5763 * 5764 * Notes: 5765 * (1) The read value of MASK will include bits set because of the L2C cripple fuses. 5766 * 5767 */ 5768union cvmx_l2c_wpar_ppx 5769{ 5770 uint64_t u64; 5771 struct cvmx_l2c_wpar_ppx_s 5772 { 5773#if __BYTE_ORDER == __BIG_ENDIAN 5774 uint64_t reserved_16_63 : 48; 5775 uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */ 5776#else 5777 uint64_t mask : 16; 5778 uint64_t reserved_16_63 : 48; 5779#endif 5780 } s; 5781 struct cvmx_l2c_wpar_ppx_s cn63xx; 5782 struct cvmx_l2c_wpar_ppx_s cn63xxp1; 5783}; 5784typedef union cvmx_l2c_wpar_ppx cvmx_l2c_wpar_ppx_t; 5785 5786/** 5787 * cvmx_l2c_xmc#_pfc 5788 * 5789 * L2C_XMC_PFC = L2C XMC Performance Counter(s) 5790 * 5791 */ 5792union cvmx_l2c_xmcx_pfc 5793{ 5794 uint64_t u64; 5795 struct cvmx_l2c_xmcx_pfc_s 5796 { 5797#if __BYTE_ORDER == __BIG_ENDIAN 5798 uint64_t count : 64; /**< Current counter value */ 5799#else 5800 uint64_t count : 64; 5801#endif 5802 } s; 5803 struct cvmx_l2c_xmcx_pfc_s cn63xx; 5804 struct cvmx_l2c_xmcx_pfc_s cn63xxp1; 5805}; 5806typedef union cvmx_l2c_xmcx_pfc cvmx_l2c_xmcx_pfc_t; 5807 5808/** 5809 * cvmx_l2c_xmc_cmd 5810 * 5811 * L2C_XMC_CMD = L2C XMC command register 5812 * 5813 * 5814 * Notes: 5815 * (1) the XMC command chosen MUST NOT be a IOB destined command or operation is UNDEFINED. 5816 * 5817 * (2) the XMC command will have sid forced to IOB, did forced to L2C, no virtualization checks 5818 * performed (always pass), and xmdmsk forced to 0. Note that this implies that commands which 5819 * REQUIRE an XMD cycle (STP,STC,SAA,FAA,FAS) should not be used or the results are unpredictable. 5820 * The sid=IOB means that the way partitioning used for the command is L2C_WPAR_IOB. 5821 * None of L2C_QOS_IOB, L2C_QOS_PP, L2C_VIRTID_IOB, L2C_VIRTID_PP are used for these commands. 5822 * 5823 * (3) any responses generated by the XMC command will be forced to PP7 (a non-existant PP) effectively 5824 * causing them to be ignored. Generated STINs, however, will correctly invalidate the required 5825 * PPs. 5826 * 5827 * (4) any L2D read generated by the XMC command will record the syndrome information in 5828 * L2C_TAD_ECC0/1. If ECC is disabled prior to the CSR write this provides the ability to read the 5829 * ECC bits directly. If ECC is not disabled this should log 0's (assuming no ECC errors were 5830 * found in the block). 5831 * 5832 * (5) A write which arrives while the INUSE bit is set will block until the INUSE bit clears. This 5833 * gives software 2 options when needing to issue a stream of writes to L2C_XMC_CMD: polling on the 5834 * INUSE bit, or allowing HW to handle the interlock -- at the expense of locking up the RSL bus 5835 * for potentially tens of cycles at a time while waiting for an available LFB/VAB entry. 5836 * 5837 * (6) The address written to L2C_XMC_CMD is a 38-bit OCTEON physical address. L2C performs hole removal and 5838 * index aliasing (if enabled) on the written address and uses that for the command. This hole 5839 * removed/index aliased 38-bit address is what is returned on a read of the L2C_XMC_CMD register. 5840 */ 5841union cvmx_l2c_xmc_cmd 5842{ 5843 uint64_t u64; 5844 struct cvmx_l2c_xmc_cmd_s 5845 { 5846#if __BYTE_ORDER == __BIG_ENDIAN 5847 uint64_t inuse : 1; /**< Set to 1 by HW upon receiving a write, cleared when 5848 command has issued (not necessarily completed, but 5849 ordered relative to other traffic) and HW can accept 5850 another command. */ 5851 uint64_t cmd : 6; /**< Command to use for simulated XMC request 5852 a new request can be accepted */ 5853 uint64_t reserved_38_56 : 19; 5854 uint64_t addr : 38; /**< Address to use for simulated XMC request (see Note 6) */ 5855#else 5856 uint64_t addr : 38; 5857 uint64_t reserved_38_56 : 19; 5858 uint64_t cmd : 6; 5859 uint64_t inuse : 1; 5860#endif 5861 } s; 5862 struct cvmx_l2c_xmc_cmd_s cn63xx; 5863 struct cvmx_l2c_xmc_cmd_s cn63xxp1; 5864}; 5865typedef union cvmx_l2c_xmc_cmd cvmx_l2c_xmc_cmd_t; 5866 5867/** 5868 * cvmx_l2c_xmd#_pfc 5869 * 5870 * L2C_XMD_PFC = L2C XMD Performance Counter(s) 5871 * 5872 */ 5873union cvmx_l2c_xmdx_pfc 5874{ 5875 uint64_t u64; 5876 struct cvmx_l2c_xmdx_pfc_s 5877 { 5878#if __BYTE_ORDER == __BIG_ENDIAN 5879 uint64_t count : 64; /**< Current counter value */ 5880#else 5881 uint64_t count : 64; 5882#endif 5883 } s; 5884 struct cvmx_l2c_xmdx_pfc_s cn63xx; 5885 struct cvmx_l2c_xmdx_pfc_s cn63xxp1; 5886}; 5887typedef union cvmx_l2c_xmdx_pfc cvmx_l2c_xmdx_pfc_t; 5888 5889#endif 5890