1/***********************license start***************
2 * Copyright (c) 2003-2011  Cavium, Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40/**
41 * @file
42 *
43 * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
44 * facilities.
45 *
46 * <hr>$Revision: 70030 $<hr>
47 *
48 */
49
50#ifndef __CVMX_L2C_H__
51#define __CVMX_L2C_H__
52
53#define CVMX_L2C_IDX_ADDR_SHIFT 7  /* based on 128 byte cache line size */
54#define CVMX_L2C_IDX_MASK       (cvmx_l2c_get_num_sets() - 1)
55
56/* Defines for index aliasing computations */
57#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
58#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
59#define CVMX_L2C_MEMBANK_SELECT_SIZE  4096
60
61/* Defines for Virtualizations, valid only from Octeon II onwards. */
62#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 64 : 0)
63#define CVMX_L2C_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 32 : 0)
64
65  /*------------*/
66  /*  TYPEDEFS  */
67  /*------------*/
68
69union cvmx_l2c_tag {
70	uint64_t u64;
71#ifdef __BIG_ENDIAN_BITFIELD
72	struct {
73		uint64_t reserved:28;
74		uint64_t V:1;		/* Line valid */
75		uint64_t D:1;		/* Line dirty */
76		uint64_t L:1;		/* Line locked */
77		uint64_t U:1;		/* Use, LRU eviction */
78		uint64_t addr:32;	/* Phys mem (not all bits valid) */
79	} s;
80#else
81	struct {
82		uint64_t addr:32;	/* Phys mem (not all bits valid) */
83		uint64_t U:1;		/* Use, LRU eviction */
84		uint64_t L:1;		/* Line locked */
85		uint64_t D:1;		/* Line dirty */
86		uint64_t V:1;		/* Line valid */
87		uint64_t reserved:28;
88	} s;
89
90#endif
91};
92typedef union cvmx_l2c_tag cvmx_l2c_tag_t;
93
94/* Maximium number of TADs */
95#define CVMX_L2C_MAX_TADS     4
96/* Maximium number of L2C performance counters */
97#define CVMX_L2C_MAX_PCNT     4
98
99/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
100#define CVMX_L2C_TADS  ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 4 : 1)
101/* Number of L2C IOBs connected to LMC. */
102#define CVMX_L2C_IOBS  ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 2 : 1)
103
104  /* L2C Performance Counter events. */
105enum cvmx_l2c_event {
106	CVMX_L2C_EVENT_CYCLES           =  0,    /**< Cycles */
107	CVMX_L2C_EVENT_INSTRUCTION_MISS =  1,    /**< L2 Instruction Miss */
108	CVMX_L2C_EVENT_INSTRUCTION_HIT  =  2,    /**< L2 Instruction Hit */
109	CVMX_L2C_EVENT_DATA_MISS        =  3,    /**< L2 Data Miss */
110	CVMX_L2C_EVENT_DATA_HIT         =  4,    /**< L2 Data Hit */
111	CVMX_L2C_EVENT_MISS             =  5,    /**< L2 Miss (I/D) */
112	CVMX_L2C_EVENT_HIT              =  6,    /**< L2 Hit (I/D) */
113	CVMX_L2C_EVENT_VICTIM_HIT       =  7,    /**< L2 Victim Buffer Hit (Retry Probe) */
114	CVMX_L2C_EVENT_INDEX_CONFLICT   =  8,    /**< LFB-NQ Index Conflict */
115	CVMX_L2C_EVENT_TAG_PROBE        =  9,    /**< L2 Tag Probe (issued - could be VB-Retried) */
116	CVMX_L2C_EVENT_TAG_UPDATE       = 10,    /**< L2 Tag Update (completed). Note: Some CMD types do not update */
117	CVMX_L2C_EVENT_TAG_COMPLETE     = 11,    /**< L2 Tag Probe Completed (beyond VB-RTY window) */
118	CVMX_L2C_EVENT_TAG_DIRTY        = 12,    /**< L2 Tag Dirty Victim */
119	CVMX_L2C_EVENT_DATA_STORE_NOP   = 13,    /**< L2 Data Store NOP */
120	CVMX_L2C_EVENT_DATA_STORE_READ  = 14,    /**< L2 Data Store READ */
121	CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,    /**< L2 Data Store WRITE */
122	CVMX_L2C_EVENT_FILL_DATA_VALID  = 16,    /**< Memory Fill Data valid */
123	CVMX_L2C_EVENT_WRITE_REQUEST    = 17,    /**< Memory Write Request */
124	CVMX_L2C_EVENT_READ_REQUEST     = 18,    /**< Memory Read Request */
125	CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,    /**< Memory Write Data valid */
126	CVMX_L2C_EVENT_XMC_NOP          = 20,    /**< XMC NOP */
127	CVMX_L2C_EVENT_XMC_LDT          = 21,    /**< XMC LDT */
128	CVMX_L2C_EVENT_XMC_LDI          = 22,    /**< XMC LDI */
129	CVMX_L2C_EVENT_XMC_LDD          = 23,    /**< XMC LDD */
130	CVMX_L2C_EVENT_XMC_STF          = 24,    /**< XMC STF */
131	CVMX_L2C_EVENT_XMC_STT          = 25,    /**< XMC STT */
132	CVMX_L2C_EVENT_XMC_STP          = 26,    /**< XMC STP */
133	CVMX_L2C_EVENT_XMC_STC          = 27,    /**< XMC STC */
134	CVMX_L2C_EVENT_XMC_DWB          = 28,    /**< XMC DWB */
135	CVMX_L2C_EVENT_XMC_PL2          = 29,    /**< XMC PL2 */
136	CVMX_L2C_EVENT_XMC_PSL1         = 30,    /**< XMC PSL1 */
137	CVMX_L2C_EVENT_XMC_IOBLD        = 31,    /**< XMC IOBLD */
138	CVMX_L2C_EVENT_XMC_IOBST        = 32,    /**< XMC IOBST */
139	CVMX_L2C_EVENT_XMC_IOBDMA       = 33,    /**< XMC IOBDMA */
140	CVMX_L2C_EVENT_XMC_IOBRSP       = 34,    /**< XMC IOBRSP */
141	CVMX_L2C_EVENT_XMC_BUS_VALID    = 35,    /**< XMC Bus valid (all) */
142	CVMX_L2C_EVENT_XMC_MEM_DATA     = 36,    /**< XMC Bus valid (DST=L2C) Memory */
143	CVMX_L2C_EVENT_XMC_REFL_DATA    = 37,    /**< XMC Bus valid (DST=IOB) REFL Data */
144	CVMX_L2C_EVENT_XMC_IOBRSP_DATA  = 38,    /**< XMC Bus valid (DST=PP) IOBRSP Data */
145	CVMX_L2C_EVENT_RSC_NOP          = 39,    /**< RSC NOP */
146	CVMX_L2C_EVENT_RSC_STDN         = 40,    /**< RSC STDN */
147	CVMX_L2C_EVENT_RSC_FILL         = 41,    /**< RSC FILL */
148	CVMX_L2C_EVENT_RSC_REFL         = 42,    /**< RSC REFL */
149	CVMX_L2C_EVENT_RSC_STIN         = 43,    /**< RSC STIN */
150	CVMX_L2C_EVENT_RSC_SCIN         = 44,    /**< RSC SCIN */
151	CVMX_L2C_EVENT_RSC_SCFL         = 45,    /**< RSC SCFL */
152	CVMX_L2C_EVENT_RSC_SCDN         = 46,    /**< RSC SCDN */
153	CVMX_L2C_EVENT_RSC_DATA_VALID   = 47,    /**< RSC Data Valid */
154	CVMX_L2C_EVENT_RSC_VALID_FILL   = 48,    /**< RSC Data Valid (FILL) */
155	CVMX_L2C_EVENT_RSC_VALID_STRSP  = 49,    /**< RSC Data Valid (STRSP) */
156	CVMX_L2C_EVENT_RSC_VALID_REFL   = 50,    /**< RSC Data Valid (REFL) */
157	CVMX_L2C_EVENT_LRF_REQ          = 51,    /**< LRF-REQ (LFB-NQ) */
158	CVMX_L2C_EVENT_DT_RD_ALLOC      = 52,    /**< DT RD-ALLOC */
159	CVMX_L2C_EVENT_DT_WR_INVAL      = 53,    /**< DT WR-INVAL */
160	CVMX_L2C_EVENT_MAX
161};
162typedef enum cvmx_l2c_event cvmx_l2c_event_t;
163
164/* L2C Performance Counter events for Octeon2. */
165enum cvmx_l2c_tad_event {
166	CVMX_L2C_TAD_EVENT_NONE          = 0,     /* None */
167	CVMX_L2C_TAD_EVENT_TAG_HIT       = 1,     /* L2 Tag Hit */
168	CVMX_L2C_TAD_EVENT_TAG_MISS      = 2,     /* L2 Tag Miss */
169	CVMX_L2C_TAD_EVENT_TAG_NOALLOC   = 3,     /* L2 Tag NoAlloc (forced no-allocate) */
170	CVMX_L2C_TAD_EVENT_TAG_VICTIM    = 4,     /* L2 Tag Victim */
171	CVMX_L2C_TAD_EVENT_SC_FAIL       = 5,     /* SC Fail */
172	CVMX_L2C_TAD_EVENT_SC_PASS       = 6,     /* SC Pass */
173	CVMX_L2C_TAD_EVENT_LFB_VALID     = 7,     /* LFB Occupancy (each cycle adds \# of LFBs valid) */
174	CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB  = 8,     /* LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs) */
175	CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB  = 9,     /* LFB Wait VAB (each cycle adds \# LFBs waiting for VAB) */
176	CVMX_L2C_TAD_EVENT_QUAD0_INDEX   = 128,   /* Quad 0 index bus inuse */
177	CVMX_L2C_TAD_EVENT_QUAD0_READ    = 129,   /* Quad 0 read data bus inuse */
178	CVMX_L2C_TAD_EVENT_QUAD0_BANK    = 130,   /* Quad 0 \# banks inuse (0-4/cycle) */
179	CVMX_L2C_TAD_EVENT_QUAD0_WDAT    = 131,   /* Quad 0 wdat flops inuse (0-4/cycle) */
180	CVMX_L2C_TAD_EVENT_QUAD1_INDEX   = 144,   /* Quad 1 index bus inuse */
181	CVMX_L2C_TAD_EVENT_QUAD1_READ    = 145,   /* Quad 1 read data bus inuse */
182	CVMX_L2C_TAD_EVENT_QUAD1_BANK    = 146,   /* Quad 1 \# banks inuse (0-4/cycle) */
183	CVMX_L2C_TAD_EVENT_QUAD1_WDAT    = 147,   /* Quad 1 wdat flops inuse (0-4/cycle) */
184	CVMX_L2C_TAD_EVENT_QUAD2_INDEX   = 160,   /* Quad 2 index bus inuse */
185	CVMX_L2C_TAD_EVENT_QUAD2_READ    = 161,   /* Quad 2 read data bus inuse */
186	CVMX_L2C_TAD_EVENT_QUAD2_BANK    = 162,   /* Quad 2 \# banks inuse (0-4/cycle) */
187	CVMX_L2C_TAD_EVENT_QUAD2_WDAT    = 163,   /* Quad 2 wdat flops inuse (0-4/cycle) */
188	CVMX_L2C_TAD_EVENT_QUAD3_INDEX   = 176,   /* Quad 3 index bus inuse */
189	CVMX_L2C_TAD_EVENT_QUAD3_READ    = 177,   /* Quad 3 read data bus inuse */
190	CVMX_L2C_TAD_EVENT_QUAD3_BANK    = 178,   /* Quad 3 \# banks inuse (0-4/cycle) */
191	CVMX_L2C_TAD_EVENT_QUAD3_WDAT    = 179,   /* Quad 3 wdat flops inuse (0-4/cycle) */
192	CVMX_L2C_TAD_EVENT_MAX
193};
194typedef enum cvmx_l2c_tad_event cvmx_l2c_tad_event_t;
195
196/**
197 * Configure one of the four L2 Cache performance counters to capture event
198 * occurences.
199 *
200 * @param counter        The counter to configure. Range 0..3.
201 * @param event          The type of L2 Cache event occurrence to count.
202 * @param clear_on_read  When asserted, any read of the performance counter
203 *                       clears the counter.
204 *
205 * @note The routine does not clear the counter.
206 */
207void cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event, uint32_t clear_on_read);
208
209/**
210 * Read the given L2 Cache performance counter. The counter must be configured
211 * before reading, but this routine does not enforce this requirement.
212 *
213 * @param counter  The counter to configure. Range 0..3.
214 *
215 * @return The current counter value.
216 */
217uint64_t cvmx_l2c_read_perf(uint32_t counter);
218
219/**
220 * Return the L2 Cache way partitioning for a given core.
221 *
222 * @param core  The core processor of interest.
223 *
224 * @return    The mask specifying the partitioning. 0 bits in mask indicates
225 *              the cache 'ways' that a core can evict from.
226 *            -1 on error
227 */
228int cvmx_l2c_get_core_way_partition(uint32_t core);
229
230/**
231 * Partitions the L2 cache for a core
232 *
233 * @param core The core that the partitioning applies to.
234 * @param mask The partitioning of the ways expressed as a binary
235 *             mask. A 0 bit allows the core to evict cache lines from
236 *             a way, while a 1 bit blocks the core from evicting any
237 *             lines from that way. There must be at least one allowed
238 *             way (0 bit) in the mask.
239 *
240
241 * @note If any ways are blocked for all cores and the HW blocks, then
242 *       those ways will never have any cache lines evicted from them.
243 *       All cores and the hardware blocks are free to read from all
244 *       ways regardless of the partitioning.
245 */
246int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
247
248/**
249 * Return the L2 Cache way partitioning for the hw blocks.
250 *
251 * @return    The mask specifying the reserved way. 0 bits in mask indicates
252 *              the cache 'ways' that a core can evict from.
253 *            -1 on error
254 */
255int cvmx_l2c_get_hw_way_partition(void);
256
257/**
258 * Partitions the L2 cache for the hardware blocks.
259 *
260 * @param mask The partitioning of the ways expressed as a binary
261 *             mask. A 0 bit allows the core to evict cache lines from
262 *             a way, while a 1 bit blocks the core from evicting any
263 *             lines from that way. There must be at least one allowed
264 *             way (0 bit) in the mask.
265 *
266
267 * @note If any ways are blocked for all cores and the HW blocks, then
268 *       those ways will never have any cache lines evicted from them.
269 *       All cores and the hardware blocks are free to read from all
270 *       ways regardless of the partitioning.
271 */
272int cvmx_l2c_set_hw_way_partition(uint32_t mask);
273
274
275/**
276 * Return the L2 Cache way partitioning for the second set of hw blocks.
277 *
278 * @return    The mask specifying the reserved way. 0 bits in mask indicates
279 *              the cache 'ways' that a core can evict from.
280 *            -1 on error
281 */
282int cvmx_l2c_get_hw_way_partition2(void);
283
284/**
285 * Partitions the L2 cache for the second set of  blocks.
286 *
287 * @param mask The partitioning of the ways expressed as a binary
288 *             mask. A 0 bit allows the core to evict cache lines from
289 *             a way, while a 1 bit blocks the core from evicting any
290 *             lines from that way. There must be at least one allowed
291 *             way (0 bit) in the mask.
292 *
293
294 * @note If any ways are blocked for all cores and the HW blocks, then
295 *       those ways will never have any cache lines evicted from them.
296 *       All cores and the hardware blocks are free to read from all
297 *       ways regardless of the partitioning.
298 */
299int cvmx_l2c_set_hw_way_partition2(uint32_t mask);
300
301/**
302 * Locks a line in the L2 cache at the specified physical address
303 *
304 * @param addr   physical address of line to lock
305 *
306 * @return 0 on success,
307 *         1 if line not locked.
308 */
309int cvmx_l2c_lock_line(uint64_t addr);
310
311/**
312 * Locks a specified memory region in the L2 cache.
313 *
314 * Note that if not all lines can be locked, that means that all
315 * but one of the ways (associations) available to the locking
316 * core are locked.  Having only 1 association available for
317 * normal caching may have a significant adverse affect on performance.
318 * Care should be taken to ensure that enough of the L2 cache is left
319 * unlocked to allow for normal caching of DRAM.
320 *
321 * @param start  Physical address of the start of the region to lock
322 * @param len    Length (in bytes) of region to lock
323 *
324 * @return Number of requested lines that where not locked.
325 *         0 on success (all locked)
326 */
327int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
328
329
330/**
331 * Unlock and flush a cache line from the L2 cache.
332 * IMPORTANT: Must only be run by one core at a time due to use
333 * of L2C debug features.
334 * Note that this function will flush a matching but unlocked cache line.
335 * (If address is not in L2, no lines are flushed.)
336 *
337 * @param address Physical address to unlock
338 *
339 * @return 0: line not unlocked
340 *         1: line unlocked
341 */
342int cvmx_l2c_unlock_line(uint64_t address);
343
344/**
345 * Unlocks a region of memory that is locked in the L2 cache
346 *
347 * @param start  start physical address
348 * @param len    length (in bytes) to unlock
349 *
350 * @return Number of locked lines that the call unlocked
351 */
352int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
353
354
355/**
356 * Read the L2 controller tag for a given location in L2
357 *
358 * @param association
359 *               Which association to read line from
360 * @param index  Which way to read from.
361 *
362 * @return l2c tag structure for line requested.
363 *
364 * NOTE: This function is deprecated and cannot be used on devices with
365 *       multiple L2C interfaces such as the OCTEON CN68XX.
366 *       Please use cvmx_l2c_get_tag_v2 instead.
367 */
368cvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
369        __attribute__ ((deprecated));
370
371/**
372 * Read the L2 controller tag for a given location in L2
373 *
374 * @param association
375 *               Which association to read line from
376 * @param index  Which way to read from.
377 *
378 * @param tad    Which TAD to read from, set to 0 except on OCTEON CN68XX.
379 *
380 * @return l2c tag structure for line requested.
381 */
382cvmx_l2c_tag_t cvmx_l2c_get_tag_v2(uint32_t association, uint32_t index, uint32_t tad);
383
384/**
385 * Find the TAD for the specified address
386 *
387 * @param addr   physical address to get TAD for
388 *
389 * @return TAD number for address.
390 */
391int cvmx_l2c_address_to_tad(uint64_t addr);
392
393/**
394 * Returns the cache index for a given physical address
395 *
396 * @param addr   physical address
397 *
398 * @return L2 cache index
399 */
400uint32_t cvmx_l2c_address_to_index (uint64_t addr);
401
402/**
403 * Returns the L2 tag that will be used for the given physical address
404 *
405 * @param addr   physical address
406 * @return L2 cache tag. Addreses in the LMC hole are not valid.
407 * Returns 0xFFFFFFFF if the address specified is in the LMC hole.
408 */
409uint32_t cvmx_l2c_v2_address_to_tag(uint64_t addr);
410
411/**
412 * Flushes (and unlocks) the entire L2 cache.
413 * IMPORTANT: Must only be run by one core at a time due to use
414 * of L2C debug features.
415 */
416void cvmx_l2c_flush(void);
417
418/**
419 *
420 * @return Returns the size of the L2 cache in bytes,
421 * -1 on error (unrecognized model)
422 */
423int cvmx_l2c_get_cache_size_bytes(void);
424
425/**
426 * Return the number of sets in the L2 Cache
427 *
428 * @return
429 */
430int cvmx_l2c_get_num_sets(void);
431
432/**
433 * Return log base 2 of the number of sets in the L2 cache
434 * @return
435 */
436int cvmx_l2c_get_set_bits(void);
437/**
438 * Return the number of associations in the L2 Cache
439 *
440 * @return
441 */
442int cvmx_l2c_get_num_assoc(void);
443
444/**
445 * Flush a line from the L2 cache
446 * This should only be called from one core at a time, as this routine
447 * sets the core to the 'debug' core in order to flush the line.
448 *
449 * @param assoc  Association (or way) to flush
450 * @param index  Index to flush
451 */
452void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
453
454/**
455 * Initialize the BIG address in L2C+DRAM to generate proper error
456 * on reading/writing to an non-existant memory location.
457 *
458 * @param mem_size  Amount of DRAM configured in MB.
459 * @param mode      Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
460 */
461void cvmx_l2c_set_big_size(uint64_t mem_size, int mode);
462
463#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
464
465/*
466 * Set maxium number of Virtual IDS allowed in a machine.
467 *
468 * @param nvid  Number of virtial ids allowed in a machine.
469 * @return      Return 0 on success or -1 on failure.
470 */
471int cvmx_l2c_vrt_set_max_virtids(int nvid);
472
473/**
474 * Get maxium number of virtual IDs allowed in a machine.
475 *
476 * @return  Return number of virtual machine IDs. Return -1 on failure.
477 */
478int cvmx_l2c_vrt_get_max_virtids(void);
479
480/**
481 * Set the maxium size of memory space to be allocated for virtualization.
482 *
483 * @param memsz     Size of the virtual memory in GB
484 * @return          Return 0 on success or -1 on failure.
485 */
486int cvmx_l2c_vrt_set_max_memsz(int memsz);
487
488/**
489 * Set a Virtual ID to a set of cores.
490 *
491 * @param virtid    Assign virtid to a set of cores.
492 * @param coremask  The group of cores to assign a unique virtual id.
493 * @return          Return 0 on success, otherwise -1.
494 */
495int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask);
496
497/**
498 * Remove a virt id assigned to a set of cores. Update the virtid mask and
499 * virtid stored for each core.
500 *
501 * @param coremask  the group of cores whose virtual id is removed.
502 */
503void cvmx_l2c_vrt_remove_virtid(int virtid);
504
505/**
506 * Block a memory region to be updated by a set of virtids.
507 *
508 * @param start_addr   Starting address of memory region
509 * @param size         Size of the memory to protect
510 * @param virtid_mask  Virtual ID to use
511 * @param mode         Allow/Disallow write access
512 *                        = 0,  Allow write access by virtid
513 *                        = 1,  Disallow write access by virtid
514 */
515int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode);
516
517/**
518 * Enable virtualization.
519 */
520void cvmx_l2c_vrt_enable(int mode);
521
522/**
523 * Disable virtualization.
524 */
525void cvmx_l2c_vrt_disable(void);
526
527#endif /* CVMX_BUILD_FOR_LINUX_HOST */
528
529#endif /* __CVMX_L2C_H__ */
530