cvmx-l2c.h revision 215990
1210284Sjmallett/***********************license start*************** 2215990Sjmallett * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3215990Sjmallett * reserved. 4210284Sjmallett * 5210284Sjmallett * 6215990Sjmallett * Redistribution and use in source and binary forms, with or without 7215990Sjmallett * modification, are permitted provided that the following conditions are 8215990Sjmallett * met: 9210284Sjmallett * 10215990Sjmallett * * Redistributions of source code must retain the above copyright 11215990Sjmallett * notice, this list of conditions and the following disclaimer. 12210284Sjmallett * 13215990Sjmallett * * Redistributions in binary form must reproduce the above 14215990Sjmallett * copyright notice, this list of conditions and the following 15215990Sjmallett * disclaimer in the documentation and/or other materials provided 16215990Sjmallett * with the distribution. 17215990Sjmallett 18215990Sjmallett * * Neither the name of Cavium Networks nor the names of 19215990Sjmallett * its contributors may be used to endorse or promote products 20215990Sjmallett * derived from this software without specific prior written 21215990Sjmallett * permission. 22215990Sjmallett 23215990Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215990Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215990Sjmallett * regulations, and may be subject to export or import regulations in other 26215990Sjmallett * countries. 27215990Sjmallett 28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29215990Sjmallett * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38210284Sjmallett ***********************license end**************************************/ 39210284Sjmallett 40210284Sjmallett 41210284Sjmallett 42210284Sjmallett 43210284Sjmallett 44210284Sjmallett 45215990Sjmallett 46210284Sjmallett/** 47210284Sjmallett * @file 48210284Sjmallett * 49210284Sjmallett * Interface to the Level 2 Cache (L2C) control, measurement, and debugging 50210284Sjmallett * facilities. 51210284Sjmallett * 52215990Sjmallett * <hr>$Revision: 52004 $<hr> 53210284Sjmallett * 54210284Sjmallett */ 55210284Sjmallett 56210284Sjmallett#ifndef __CVMX_L2C_H__ 57210284Sjmallett#define __CVMX_L2C_H__ 58210284Sjmallett 59210284Sjmallett#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */ 60210284Sjmallett#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */ 61210284Sjmallett#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */ 62210284Sjmallett 63210284Sjmallett 64210284Sjmallett#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */ 65210284Sjmallett#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1) 66210284Sjmallett 67210284Sjmallett/* Defines for index aliasing computations */ 68210284Sjmallett#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits()) 69210284Sjmallett#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) 70215990Sjmallett#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096 71210284Sjmallett 72215990Sjmallett/* Defines for Virtualizations, valid only from Octeon II onwards. */ 73215990Sjmallett#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0) 74215990Sjmallett#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0) 75210284Sjmallett 76210284Sjmallett /*------------*/ 77210284Sjmallett /* TYPEDEFS */ 78210284Sjmallett /*------------*/ 79210284Sjmallett 80210284Sjmalletttypedef union 81210284Sjmallett{ 82210284Sjmallett uint64_t u64; 83210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 84210284Sjmallett struct 85210284Sjmallett { 86210284Sjmallett uint64_t reserved : 28; 87210284Sjmallett uint64_t V : 1; // Line valid 88210284Sjmallett uint64_t D : 1; // Line dirty 89210284Sjmallett uint64_t L : 1; // Line locked 90210284Sjmallett uint64_t U : 1; // Use, LRU eviction 91210284Sjmallett uint64_t addr : 32; // Phys mem (not all bits valid) 92210284Sjmallett } s; 93210284Sjmallett#endif 94210284Sjmallett} cvmx_l2c_tag_t; 95210284Sjmallett 96215990Sjmallett/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */ 97215990Sjmallett#define CVMX_L2C_TADS 1 98210284Sjmallett 99210284Sjmallett /* L2C Performance Counter events. */ 100210284Sjmalletttypedef enum 101210284Sjmallett{ 102215990Sjmallett CVMX_L2C_EVENT_CYCLES = 0, /**< Cycles */ 103215990Sjmallett CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, /**< L2 Instruction Miss */ 104215990Sjmallett CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, /**< L2 Instruction Hit */ 105215990Sjmallett CVMX_L2C_EVENT_DATA_MISS = 3, /**< L2 Data Miss */ 106215990Sjmallett CVMX_L2C_EVENT_DATA_HIT = 4, /**< L2 Data Hit */ 107215990Sjmallett CVMX_L2C_EVENT_MISS = 5, /**< L2 Miss (I/D) */ 108215990Sjmallett CVMX_L2C_EVENT_HIT = 6, /**< L2 Hit (I/D) */ 109215990Sjmallett CVMX_L2C_EVENT_VICTIM_HIT = 7, /**< L2 Victim Buffer Hit (Retry Probe) */ 110215990Sjmallett CVMX_L2C_EVENT_INDEX_CONFLICT = 8, /**< LFB-NQ Index Conflict */ 111215990Sjmallett CVMX_L2C_EVENT_TAG_PROBE = 9, /**< L2 Tag Probe (issued - could be VB-Retried) */ 112215990Sjmallett CVMX_L2C_EVENT_TAG_UPDATE = 10, /**< L2 Tag Update (completed). Note: Some CMD types do not update */ 113215990Sjmallett CVMX_L2C_EVENT_TAG_COMPLETE = 11, /**< L2 Tag Probe Completed (beyond VB-RTY window) */ 114215990Sjmallett CVMX_L2C_EVENT_TAG_DIRTY = 12, /**< L2 Tag Dirty Victim */ 115215990Sjmallett CVMX_L2C_EVENT_DATA_STORE_NOP = 13, /**< L2 Data Store NOP */ 116215990Sjmallett CVMX_L2C_EVENT_DATA_STORE_READ = 14, /**< L2 Data Store READ */ 117215990Sjmallett CVMX_L2C_EVENT_DATA_STORE_WRITE = 15, /**< L2 Data Store WRITE */ 118215990Sjmallett CVMX_L2C_EVENT_FILL_DATA_VALID = 16, /**< Memory Fill Data valid */ 119215990Sjmallett CVMX_L2C_EVENT_WRITE_REQUEST = 17, /**< Memory Write Request */ 120215990Sjmallett CVMX_L2C_EVENT_READ_REQUEST = 18, /**< Memory Read Request */ 121215990Sjmallett CVMX_L2C_EVENT_WRITE_DATA_VALID = 19, /**< Memory Write Data valid */ 122215990Sjmallett CVMX_L2C_EVENT_XMC_NOP = 20, /**< XMC NOP */ 123215990Sjmallett CVMX_L2C_EVENT_XMC_LDT = 21, /**< XMC LDT */ 124215990Sjmallett CVMX_L2C_EVENT_XMC_LDI = 22, /**< XMC LDI */ 125215990Sjmallett CVMX_L2C_EVENT_XMC_LDD = 23, /**< XMC LDD */ 126215990Sjmallett CVMX_L2C_EVENT_XMC_STF = 24, /**< XMC STF */ 127215990Sjmallett CVMX_L2C_EVENT_XMC_STT = 25, /**< XMC STT */ 128215990Sjmallett CVMX_L2C_EVENT_XMC_STP = 26, /**< XMC STP */ 129215990Sjmallett CVMX_L2C_EVENT_XMC_STC = 27, /**< XMC STC */ 130215990Sjmallett CVMX_L2C_EVENT_XMC_DWB = 28, /**< XMC DWB */ 131215990Sjmallett CVMX_L2C_EVENT_XMC_PL2 = 29, /**< XMC PL2 */ 132215990Sjmallett CVMX_L2C_EVENT_XMC_PSL1 = 30, /**< XMC PSL1 */ 133215990Sjmallett CVMX_L2C_EVENT_XMC_IOBLD = 31, /**< XMC IOBLD */ 134215990Sjmallett CVMX_L2C_EVENT_XMC_IOBST = 32, /**< XMC IOBST */ 135215990Sjmallett CVMX_L2C_EVENT_XMC_IOBDMA = 33, /**< XMC IOBDMA */ 136215990Sjmallett CVMX_L2C_EVENT_XMC_IOBRSP = 34, /**< XMC IOBRSP */ 137215990Sjmallett CVMX_L2C_EVENT_XMC_BUS_VALID = 35, /**< XMC Bus valid (all) */ 138215990Sjmallett CVMX_L2C_EVENT_XMC_MEM_DATA = 36, /**< XMC Bus valid (DST=L2C) Memory */ 139215990Sjmallett CVMX_L2C_EVENT_XMC_REFL_DATA = 37, /**< XMC Bus valid (DST=IOB) REFL Data */ 140215990Sjmallett CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, /**< XMC Bus valid (DST=PP) IOBRSP Data */ 141215990Sjmallett CVMX_L2C_EVENT_RSC_NOP = 39, /**< RSC NOP */ 142215990Sjmallett CVMX_L2C_EVENT_RSC_STDN = 40, /**< RSC STDN */ 143215990Sjmallett CVMX_L2C_EVENT_RSC_FILL = 41, /**< RSC FILL */ 144215990Sjmallett CVMX_L2C_EVENT_RSC_REFL = 42, /**< RSC REFL */ 145215990Sjmallett CVMX_L2C_EVENT_RSC_STIN = 43, /**< RSC STIN */ 146215990Sjmallett CVMX_L2C_EVENT_RSC_SCIN = 44, /**< RSC SCIN */ 147215990Sjmallett CVMX_L2C_EVENT_RSC_SCFL = 45, /**< RSC SCFL */ 148215990Sjmallett CVMX_L2C_EVENT_RSC_SCDN = 46, /**< RSC SCDN */ 149215990Sjmallett CVMX_L2C_EVENT_RSC_DATA_VALID = 47, /**< RSC Data Valid */ 150215990Sjmallett CVMX_L2C_EVENT_RSC_VALID_FILL = 48, /**< RSC Data Valid (FILL) */ 151215990Sjmallett CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, /**< RSC Data Valid (STRSP) */ 152215990Sjmallett CVMX_L2C_EVENT_RSC_VALID_REFL = 50, /**< RSC Data Valid (REFL) */ 153215990Sjmallett CVMX_L2C_EVENT_LRF_REQ = 51, /**< LRF-REQ (LFB-NQ) */ 154215990Sjmallett CVMX_L2C_EVENT_DT_RD_ALLOC = 52, /**< DT RD-ALLOC */ 155215990Sjmallett CVMX_L2C_EVENT_DT_WR_INVAL = 53, /**< DT WR-INVAL */ 156215990Sjmallett CVMX_L2C_EVENT_MAX 157210284Sjmallett} cvmx_l2c_event_t; 158210284Sjmallett 159215990Sjmallett/* L2C Performance Counter events for Octeon2. */ 160215990Sjmalletttypedef enum 161215990Sjmallett{ 162215990Sjmallett CVMX_L2C_TAD_EVENT_NONE = 0, /* None */ 163215990Sjmallett CVMX_L2C_TAD_EVENT_TAG_HIT = 1, /* L2 Tag Hit */ 164215990Sjmallett CVMX_L2C_TAD_EVENT_TAG_MISS = 2, /* L2 Tag Miss */ 165215990Sjmallett CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3, /* L2 Tag NoAlloc (forced no-allocate) */ 166215990Sjmallett CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4, /* L2 Tag Victim */ 167215990Sjmallett CVMX_L2C_TAD_EVENT_SC_FAIL = 5, /* SC Fail */ 168215990Sjmallett CVMX_L2C_TAD_EVENT_SC_PASS = 6, /* SC Pass */ 169215990Sjmallett CVMX_L2C_TAD_EVENT_LFB_VALID = 7, /* LFB Occupancy (each cycle adds \# of LFBs valid) */ 170215990Sjmallett CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8, /* LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs) */ 171215990Sjmallett CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9, /* LFB Wait VAB (each cycle adds \# LFBs waiting for VAB) */ 172215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128, /* Quad 0 index bus inuse */ 173215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD0_READ = 129, /* Quad 0 read data bus inuse */ 174215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130, /* Quad 0 \# banks inuse (0-4/cycle) */ 175215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131, /* Quad 0 wdat flops inuse (0-4/cycle) */ 176215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144, /* Quad 1 index bus inuse */ 177215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD1_READ = 145, /* Quad 1 read data bus inuse */ 178215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146, /* Quad 1 \# banks inuse (0-4/cycle) */ 179215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147, /* Quad 1 wdat flops inuse (0-4/cycle) */ 180215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160, /* Quad 2 index bus inuse */ 181215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD2_READ = 161, /* Quad 2 read data bus inuse */ 182215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162, /* Quad 2 \# banks inuse (0-4/cycle) */ 183215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163, /* Quad 2 wdat flops inuse (0-4/cycle) */ 184215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176, /* Quad 3 index bus inuse */ 185215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD3_READ = 177, /* Quad 3 read data bus inuse */ 186215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178, /* Quad 3 \# banks inuse (0-4/cycle) */ 187215990Sjmallett CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179, /* Quad 3 wdat flops inuse (0-4/cycle) */ 188215990Sjmallett CVMX_L2C_TAD_EVENT_MAX 189215990Sjmallett} cvmx_l2c_tad_event_t; 190215990Sjmallett 191210284Sjmallett/** 192210284Sjmallett * Configure one of the four L2 Cache performance counters to capture event 193210284Sjmallett * occurences. 194210284Sjmallett * 195210284Sjmallett * @param counter The counter to configure. Range 0..3. 196210284Sjmallett * @param event The type of L2 Cache event occurrence to count. 197210284Sjmallett * @param clear_on_read When asserted, any read of the performance counter 198210284Sjmallett * clears the counter. 199210284Sjmallett * 200210284Sjmallett * @note The routine does not clear the counter. 201210284Sjmallett */ 202215990Sjmallettvoid cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event, uint32_t clear_on_read); 203215990Sjmallett 204210284Sjmallett/** 205210284Sjmallett * Read the given L2 Cache performance counter. The counter must be configured 206210284Sjmallett * before reading, but this routine does not enforce this requirement. 207210284Sjmallett * 208210284Sjmallett * @param counter The counter to configure. Range 0..3. 209210284Sjmallett * 210210284Sjmallett * @return The current counter value. 211210284Sjmallett */ 212210284Sjmallettuint64_t cvmx_l2c_read_perf(uint32_t counter); 213210284Sjmallett 214210284Sjmallett/** 215210284Sjmallett * Return the L2 Cache way partitioning for a given core. 216210284Sjmallett * 217210284Sjmallett * @param core The core processor of interest. 218210284Sjmallett * 219210284Sjmallett * @return The mask specifying the partitioning. 0 bits in mask indicates 220210284Sjmallett * the cache 'ways' that a core can evict from. 221210284Sjmallett * -1 on error 222210284Sjmallett */ 223210284Sjmallettint cvmx_l2c_get_core_way_partition(uint32_t core); 224210284Sjmallett 225210284Sjmallett/** 226210284Sjmallett * Partitions the L2 cache for a core 227210284Sjmallett * 228210284Sjmallett * @param core The core that the partitioning applies to. 229210284Sjmallett * @param mask The partitioning of the ways expressed as a binary mask. A 0 bit allows the core 230210284Sjmallett * to evict cache lines from a way, while a 1 bit blocks the core from evicting any lines 231210284Sjmallett * from that way. There must be at least one allowed way (0 bit) in the mask. 232210284Sjmallett * 233210284Sjmallett * @note If any ways are blocked for all cores and the HW blocks, then those ways will never have 234210284Sjmallett * any cache lines evicted from them. All cores and the hardware blocks are free to read from 235210284Sjmallett * all ways regardless of the partitioning. 236210284Sjmallett */ 237210284Sjmallettint cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask); 238210284Sjmallett 239210284Sjmallett/** 240210284Sjmallett * Return the L2 Cache way partitioning for the hw blocks. 241210284Sjmallett * 242210284Sjmallett * @return The mask specifying the reserved way. 0 bits in mask indicates 243210284Sjmallett * the cache 'ways' that a core can evict from. 244210284Sjmallett * -1 on error 245210284Sjmallett */ 246210284Sjmallettint cvmx_l2c_get_hw_way_partition(void); 247210284Sjmallett 248210284Sjmallett/** 249210284Sjmallett * Partitions the L2 cache for the hardware blocks. 250210284Sjmallett * 251210284Sjmallett * @param mask The partitioning of the ways expressed as a binary mask. A 0 bit allows the core 252210284Sjmallett * to evict cache lines from a way, while a 1 bit blocks the core from evicting any lines 253210284Sjmallett * from that way. There must be at least one allowed way (0 bit) in the mask. 254210284Sjmallett * 255210284Sjmallett * @note If any ways are blocked for all cores and the HW blocks, then those ways will never have 256210284Sjmallett * any cache lines evicted from them. All cores and the hardware blocks are free to read from 257210284Sjmallett * all ways regardless of the partitioning. 258210284Sjmallett */ 259210284Sjmallettint cvmx_l2c_set_hw_way_partition(uint32_t mask); 260210284Sjmallett 261210284Sjmallett 262210284Sjmallett/** 263210284Sjmallett * Locks a line in the L2 cache at the specified physical address 264210284Sjmallett * 265210284Sjmallett * @param addr physical address of line to lock 266210284Sjmallett * 267210284Sjmallett * @return 0 on success, 268210284Sjmallett * 1 if line not locked. 269210284Sjmallett */ 270210284Sjmallettint cvmx_l2c_lock_line(uint64_t addr); 271210284Sjmallett 272210284Sjmallett/** 273210284Sjmallett * Locks a specified memory region in the L2 cache. 274210284Sjmallett * 275210284Sjmallett * Note that if not all lines can be locked, that means that all 276210284Sjmallett * but one of the ways (associations) available to the locking 277210284Sjmallett * core are locked. Having only 1 association available for 278210284Sjmallett * normal caching may have a significant adverse affect on performance. 279210284Sjmallett * Care should be taken to ensure that enough of the L2 cache is left 280210284Sjmallett * unlocked to allow for normal caching of DRAM. 281210284Sjmallett * 282210284Sjmallett * @param start Physical address of the start of the region to lock 283210284Sjmallett * @param len Length (in bytes) of region to lock 284210284Sjmallett * 285210284Sjmallett * @return Number of requested lines that where not locked. 286210284Sjmallett * 0 on success (all locked) 287210284Sjmallett */ 288210284Sjmallettint cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len); 289210284Sjmallett 290210284Sjmallett 291210284Sjmallett/** 292210284Sjmallett * Unlock and flush a cache line from the L2 cache. 293210284Sjmallett * IMPORTANT: Must only be run by one core at a time due to use 294210284Sjmallett * of L2C debug features. 295210284Sjmallett * Note that this function will flush a matching but unlocked cache line. 296210284Sjmallett * (If address is not in L2, no lines are flushed.) 297210284Sjmallett * 298210284Sjmallett * @param address Physical address to unlock 299210284Sjmallett * 300210284Sjmallett * @return 0: line not unlocked 301210284Sjmallett * 1: line unlocked 302210284Sjmallett */ 303210284Sjmallettint cvmx_l2c_unlock_line(uint64_t address); 304210284Sjmallett 305210284Sjmallett/** 306210284Sjmallett * Unlocks a region of memory that is locked in the L2 cache 307210284Sjmallett * 308210284Sjmallett * @param start start physical address 309210284Sjmallett * @param len length (in bytes) to unlock 310210284Sjmallett * 311210284Sjmallett * @return Number of locked lines that the call unlocked 312210284Sjmallett */ 313210284Sjmallettint cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len); 314210284Sjmallett 315210284Sjmallett 316210284Sjmallett 317210284Sjmallett 318210284Sjmallett/** 319210284Sjmallett * Read the L2 controller tag for a given location in L2 320210284Sjmallett * 321210284Sjmallett * @param association 322210284Sjmallett * Which association to read line from 323210284Sjmallett * @param index Which way to read from. 324210284Sjmallett * 325210284Sjmallett * @return l2c tag structure for line requested. 326210284Sjmallett */ 327210284Sjmallettcvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index); 328210284Sjmallett 329215990Sjmallett/* Wrapper providing a deprecated old function name */ 330215990Sjmallettstatic inline cvmx_l2c_tag_t cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated)); 331210284Sjmallettstatic inline cvmx_l2c_tag_t cvmx_get_l2c_tag(uint32_t association, uint32_t index) 332210284Sjmallett{ 333210284Sjmallett return cvmx_l2c_get_tag(association, index); 334210284Sjmallett} 335210284Sjmallett 336210284Sjmallett 337210284Sjmallett/** 338210284Sjmallett * Returns the cache index for a given physical address 339210284Sjmallett * 340210284Sjmallett * @param addr physical address 341210284Sjmallett * 342210284Sjmallett * @return L2 cache index 343210284Sjmallett */ 344210284Sjmallettuint32_t cvmx_l2c_address_to_index (uint64_t addr); 345210284Sjmallett 346210284Sjmallett 347210284Sjmallett/** 348210284Sjmallett * Flushes (and unlocks) the entire L2 cache. 349210284Sjmallett * IMPORTANT: Must only be run by one core at a time due to use 350210284Sjmallett * of L2C debug features. 351210284Sjmallett */ 352210284Sjmallettvoid cvmx_l2c_flush(void); 353210284Sjmallett 354210284Sjmallett 355210284Sjmallett 356210284Sjmallett/** 357210284Sjmallett * 358210284Sjmallett * @return Returns the size of the L2 cache in bytes, 359210284Sjmallett * -1 on error (unrecognized model) 360210284Sjmallett */ 361210284Sjmallettint cvmx_l2c_get_cache_size_bytes(void); 362210284Sjmallett 363210284Sjmallett/** 364210284Sjmallett * Return the number of sets in the L2 Cache 365210284Sjmallett * 366210284Sjmallett * @return 367210284Sjmallett */ 368210284Sjmallettint cvmx_l2c_get_num_sets(void); 369210284Sjmallett 370210284Sjmallett/** 371210284Sjmallett * Return log base 2 of the number of sets in the L2 cache 372210284Sjmallett * @return 373210284Sjmallett */ 374210284Sjmallettint cvmx_l2c_get_set_bits(void); 375210284Sjmallett/** 376210284Sjmallett * Return the number of associations in the L2 Cache 377210284Sjmallett * 378210284Sjmallett * @return 379210284Sjmallett */ 380210284Sjmallettint cvmx_l2c_get_num_assoc(void); 381210284Sjmallett 382210284Sjmallett/** 383210284Sjmallett * Flush a line from the L2 cache 384210284Sjmallett * This should only be called from one core at a time, as this routine 385210284Sjmallett * sets the core to the 'debug' core in order to flush the line. 386210284Sjmallett * 387210284Sjmallett * @param assoc Association (or way) to flush 388210284Sjmallett * @param index Index to flush 389210284Sjmallett */ 390210284Sjmallettvoid cvmx_l2c_flush_line(uint32_t assoc, uint32_t index); 391210284Sjmallett 392215990Sjmallett/* 393215990Sjmallett * Set maxium number of Virtual IDS allowed in a machine. 394215990Sjmallett * 395215990Sjmallett * @param nvid Number of virtial ids allowed in a machine. 396215990Sjmallett * @return Return 0 on success or -1 on failure. 397215990Sjmallett */ 398215990Sjmallettint cvmx_l2c_vrt_set_max_virtids(int nvid); 399215990Sjmallett 400215990Sjmallett/** 401215990Sjmallett * Get maxium number of virtual IDs allowed in a machine. 402215990Sjmallett * 403215990Sjmallett * @return Return number of virtual machine IDs. Return -1 on failure. 404215990Sjmallett */ 405215990Sjmallettint cvmx_l2c_vrt_get_max_virtids(void); 406215990Sjmallett 407215990Sjmallett/** 408215990Sjmallett * Set the maxium size of memory space to be allocated for virtualization. 409215990Sjmallett * 410215990Sjmallett * @param memsz Size of the virtual memory in GB 411215990Sjmallett * @return Return 0 on success or -1 on failure. 412215990Sjmallett */ 413215990Sjmallettint cvmx_l2c_vrt_set_max_memsz(int memsz); 414215990Sjmallett 415215990Sjmallett/** 416215990Sjmallett * Set a Virtual ID to a set of cores. 417215990Sjmallett * 418215990Sjmallett * @param virtid Assign virtid to a set of cores. 419215990Sjmallett * @param coremask The group of cores to assign a unique virtual id. 420215990Sjmallett * @return Return 0 on success, otherwise -1. 421215990Sjmallett */ 422215990Sjmallettint cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask); 423215990Sjmallett 424215990Sjmallett/** 425215990Sjmallett * Remove a virt id assigned to a set of cores. Update the virtid mask and 426215990Sjmallett * virtid stored for each core. 427215990Sjmallett * 428215990Sjmallett * @param coremask the group of cores whose virtual id is removed. 429215990Sjmallett */ 430215990Sjmallettvoid cvmx_l2c_vrt_remove_virtid(int virtid); 431215990Sjmallett 432215990Sjmallett/** 433215990Sjmallett * Block a memory region to be updated by a set of virtids. 434215990Sjmallett * 435215990Sjmallett * @param start_addr Starting address of memory region 436215990Sjmallett * @param size Size of the memory to protect 437215990Sjmallett * @param virtid_mask Virtual ID to use 438215990Sjmallett * @param mode Allow/Disallow write access 439215990Sjmallett * = 0, Allow write access by virtid 440215990Sjmallett * = 1, Disallow write access by virtid 441215990Sjmallett */ 442215990Sjmallettint cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode); 443215990Sjmallett 444215990Sjmallett/** 445215990Sjmallett * Enable virtualization. 446215990Sjmallett */ 447215990Sjmallettvoid cvmx_l2c_vrt_enable(int mode); 448215990Sjmallett 449215990Sjmallett/** 450215990Sjmallett * Disable virtualization. 451215990Sjmallett */ 452215990Sjmallettvoid cvmx_l2c_vrt_disable(void); 453215990Sjmallett 454210284Sjmallett#endif /* __CVMX_L2C_H__ */ 455