Deleted Added
full compact
cvmx-asm.h (210311) cvmx-asm.h (215990)
1/***********************license start***************
1/***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 * reserved.
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22 *
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
37 ***********************license end**************************************/
38
39
40
41
42
43
38 ***********************license end**************************************/
39
40
41
42
43
44
45
44/**
45 * @file
46 *
47 * This is file defines ASM primitives for the executive.
48
46/**
47 * @file
48 *
49 * This is file defines ASM primitives for the executive.
50
49 * <hr>$Revision: 42280 $<hr>
51 * <hr>$Revision: 52004 $<hr>
50 *
51 *
52 */
53#ifndef __CVMX_ASM_H__
54#define __CVMX_ASM_H__
55
52 *
53 *
54 */
55#ifndef __CVMX_ASM_H__
56#define __CVMX_ASM_H__
57
58#define COP0_INDEX $0,0 /* TLB read/write index */
59#define COP0_RANDOM $1,0 /* TLB random index */
60#define COP0_ENTRYLO0 $2,0 /* TLB entryLo0 */
61#define COP0_ENTRYLO1 $3,0 /* TLB entryLo1 */
62#define COP0_CONTEXT $4,0 /* Context */
63#define COP0_PAGEMASK $5,0 /* TLB pagemask */
64#define COP0_PAGEGRAIN $5,1 /* TLB config for max page sizes */
65#define COP0_WIRED $6,0 /* TLB number of wired entries */
66#define COP0_HWRENA $7,0 /* rdhw instruction enable per register */
67#define COP0_BADVADDR $8,0 /* Bad virtual address */
68#define COP0_COUNT $9,0 /* Mips count register */
69#define COP0_CVMCOUNT $9,6 /* Cavium count register */
70#define COP0_CVMCTL $9,7 /* Cavium control */
71#define COP0_ENTRYHI $10,0 /* TLB entryHi */
72#define COP0_COMPARE $11,0 /* Mips compare register */
73#define COP0_POWTHROTTLE $11,6 /* Power throttle register */
74#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
75#define COP0_STATUS $12,0 /* Mips status register */
76#define COP0_INTCTL $12,1 /* Useless (Vectored interrupts) */
77#define COP0_SRSCTL $12,2 /* Useless (Shadow registers) */
78#define COP0_CAUSE $13,0 /* Mips cause register */
79#define COP0_EPC $14,0 /* Exception program counter */
80#define COP0_PRID $15,0 /* Processor ID */
81#define COP0_EBASE $15,1 /* Exception base */
82#define COP0_CONFIG $16,0 /* Misc config options */
83#define COP0_CONFIG1 $16,1 /* Misc config options */
84#define COP0_CONFIG2 $16,2 /* Misc config options */
85#define COP0_CONFIG3 $16,3 /* Misc config options */
86#define COP0_WATCHLO0 $18,0 /* Address watch registers */
87#define COP0_WATCHLO1 $18,1 /* Address watch registers */
88#define COP0_WATCHHI0 $19,0 /* Address watch registers */
89#define COP0_WATCHHI1 $19,1 /* Address watch registers */
90#define COP0_XCONTEXT $20,0 /* OS context */
91#define COP0_MULTICOREDEBUG $22,0 /* Cavium debug */
92#define COP0_DEBUG $23,0 /* Debug status */
93#define COP0_DEPC $24,0 /* Debug PC */
94#define COP0_PERFCONTROL0 $25,0 /* Performance counter control */
95#define COP0_PERFCONTROL1 $25,2 /* Performance counter control */
96#define COP0_PERFVALUE0 $25,1 /* Performance counter */
97#define COP0_PERFVALUE1 $25,3 /* Performance counter */
98#define COP0_CACHEERRI $27,0 /* I cache error status */
99#define COP0_CACHEERRD $27,1 /* D cache error status */
100#define COP0_TAGLOI $28,0 /* I cache tagLo */
101#define COP0_TAGLOD $28,2 /* D cache tagLo */
102#define COP0_DATALOI $28,1 /* I cache dataLo */
103#define COP0_DATALOD $28,3 /* D cahce dataLo */
104#define COP0_TAGHI $29,2 /* ? */
105#define COP0_DATAHII $29,1 /* ? */
106#define COP0_DATAHID $29,3 /* ? */
107#define COP0_ERROREPC $30,0 /* Error PC */
108#define COP0_DESAVE $31,0 /* Debug scratch area */
109
110/* This header file can be included from a .S file. Keep non-preprocessor
111 things under !__ASSEMBLER__. */
112#ifndef __ASSEMBLER__
113
114#include "octeon-model.h"
115
56#ifdef __cplusplus
57extern "C" {
58#endif
59
60/* turn the variable name into a string */
61#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
62#define CVMX_TMP_STR2(x) #x
63
116#ifdef __cplusplus
117extern "C" {
118#endif
119
120/* turn the variable name into a string */
121#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
122#define CVMX_TMP_STR2(x) #x
123
124#if !OCTEON_IS_COMMON_BINARY()
125 #if CVMX_COMPILED_FOR(OCTEON_CN63XX)
126 #define CVMX_CAVIUM_OCTEON2
127 #endif
128#endif
129
64/* other useful stuff */
65#define CVMX_BREAK asm volatile ("break")
66#define CVMX_SYNC asm volatile ("sync" : : :"memory")
67/* String version of SYNCW macro for using in inline asm constructs */
130/* other useful stuff */
131#define CVMX_BREAK asm volatile ("break")
132#define CVMX_SYNC asm volatile ("sync" : : :"memory")
133/* String version of SYNCW macro for using in inline asm constructs */
68#define CVMX_SYNCW_STR "syncw\nsyncw\n"
134#define CVMX_SYNCW_STR_OCTEON2 "syncw\n"
135#ifdef CVMX_CAVIUM_OCTEON2
136 #define CVMX_SYNCW_STR CVMX_SYNCW_STR_OCTEON2
137#else
138 #define CVMX_SYNCW_STR "syncw\nsyncw\n"
139#endif /* CVMX_CAVIUM_OCTEON2 */
140
69#ifdef __OCTEON__
70 #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
71 #define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
72 #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
73 /* We actually use two syncw instructions in a row when we need a write
74 memory barrier. This is because the CN3XXX series of Octeons have
75 errata Core-401. This can cause a single syncw to not enforce
76 ordering under very rare conditions. Even if it is rare, better safe
77 than sorry */
141#ifdef __OCTEON__
142 #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
143 #define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
144 #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
145 /* We actually use two syncw instructions in a row when we need a write
146 memory barrier. This is because the CN3XXX series of Octeons have
147 errata Core-401. This can cause a single syncw to not enforce
148 ordering under very rare conditions. Even if it is rare, better safe
149 than sorry */
78 #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
150 #define CVMX_SYNCW_OCTEON2 asm volatile ("syncw\n" : : :"memory")
151 #ifdef CVMX_CAVIUM_OCTEON2
152 #define CVMX_SYNCW CVMX_SYNCW_OCTEON2
153 #else
154 #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
155 #endif /* CVMX_CAVIUM_OCTEON2 */
79#if defined(VXWORKS) || defined(__linux__)
156#if defined(VXWORKS) || defined(__linux__)
80 /* Define new sync instructions to be normal SYNC instructions for
81 operating systems that use threads */
82 #define CVMX_SYNCWS CVMX_SYNCW
83 #define CVMX_SYNCS CVMX_SYNC
84 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
157 /* Define new sync instructions to be normal SYNC instructions for
158 operating systems that use threads */
159 #define CVMX_SYNCWS CVMX_SYNCW
160 #define CVMX_SYNCS CVMX_SYNC
161 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
162 #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
163 #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
85#else
86 #if defined(CVMX_BUILD_FOR_TOOLCHAIN)
87 /* While building simple exec toolchain, always use syncw to
88 support all Octeon models. */
89 #define CVMX_SYNCWS CVMX_SYNCW
90 #define CVMX_SYNCS CVMX_SYNC
91 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
164#else
165 #if defined(CVMX_BUILD_FOR_TOOLCHAIN)
166 /* While building simple exec toolchain, always use syncw to
167 support all Octeon models. */
168 #define CVMX_SYNCWS CVMX_SYNCW
169 #define CVMX_SYNCS CVMX_SYNC
170 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
171 #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
172 #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
92 #else
93 /* Again, just like syncw, we may need two syncws instructions in a row due
173 #else
174 /* Again, just like syncw, we may need two syncws instructions in a row due
94 errata Core-401 */
95 #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
175 errata Core-401. Only one syncws is required for Octeon2 models */
96 #define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
176 #define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
97 #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
177 #define CVMX_SYNCWS_OCTEON2 asm volatile ("syncws\n" : : :"memory")
178 #define CVMX_SYNCWS_STR_OCTEON2 "syncws\n"
179 #ifdef CVMX_CAVIUM_OCTEON2
180 #define CVMX_SYNCWS CVMX_SYNCWS_OCTEON2
181 #define CVMX_SYNCWS_STR CVMX_SYNCWS_STR_OCTEON2
182 #else
183 #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
184 #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
185 #endif /* CVMX_CAVIUM_OCTEON2 */
98 #endif
99#endif
186 #endif
187#endif
100#else
188#else /* !__OCTEON__ */
101 /* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
102 #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
103 #define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
104 #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
105 #define CVMX_SYNCW asm volatile ("sync" : : :"memory")
106 #define CVMX_SYNCWS CVMX_SYNCW
107 #define CVMX_SYNCS CVMX_SYNC
108 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
189 /* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
190 #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
191 #define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
192 #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
193 #define CVMX_SYNCW asm volatile ("sync" : : :"memory")
194 #define CVMX_SYNCWS CVMX_SYNCW
195 #define CVMX_SYNCS CVMX_SYNC
196 #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
197 #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW
198 #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR
109#endif
110#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
111#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
112#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
113// a normal prefetch
114#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
115// normal prefetches that use the pref instruction
116#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))

--- 15 unchanged lines hidden (view full) ---

132// block. Basically, SW is telling HW that the current version of the block will not be
133// used.
134#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
135
136#define CVMX_ICACHE_INVALIDATE { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); } // flush stores, invalidate entire icache
137#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
138#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
139
199#endif
200#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
201#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
202#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
203// a normal prefetch
204#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
205// normal prefetches that use the pref instruction
206#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))

--- 15 unchanged lines hidden (view full) ---

222// block. Basically, SW is telling HW that the current version of the block will not be
223// used.
224#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
225
226#define CVMX_ICACHE_INVALIDATE { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); } // flush stores, invalidate entire icache
227#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
228#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
229
230#define CVMX_CACHE(op, address, offset) asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
231#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) // fetch and lock the state.
232#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) // unlock the state.
233#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) // invalidate the cache block and clear the USED bits for the block
234#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) // load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register
235
140/* new instruction to make RC4 run faster */
141#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
142
143// misc v2 stuff
144#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
145#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
146#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
147#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))

--- 79 unchanged lines hidden (view full) ---

227 "\tsll\t%0,%0,0" : "=d"(_v)); \
228 result = (__typeof(result))_v; \
229 } \
230 } else { \
231 unsigned long _v; \
232 ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
233 result = (__typeof(result))_v; \
234 }})
236/* new instruction to make RC4 run faster */
237#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
238
239// misc v2 stuff
240#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
241#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
242#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
243#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))

--- 79 unchanged lines hidden (view full) ---

323 "\tsll\t%0,%0,0" : "=d"(_v)); \
324 result = (__typeof(result))_v; \
325 } \
326 } else { \
327 unsigned long _v; \
328 ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
329 result = (__typeof(result))_v; \
330 }})
235
236
237
331
332
333
238# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
239# define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
240#else
241# define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
242# define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
243#endif
244
245// some new cop0-like stuff

--- 157 unchanged lines hidden (view full) ---

403// pos can be 0-1
404#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
405// pos can be 0-1
406#define CVMX_MF_AES_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
407// pos can be 0-3
408#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
409#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
410#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
334# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
335# define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
336#else
337# define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
338# define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
339#endif
340
341// some new cop0-like stuff

--- 157 unchanged lines hidden (view full) ---

499// pos can be 0-1
500#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
501// pos can be 0-1
502#define CVMX_MF_AES_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
503// pos can be 0-3
504#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
505#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
506#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
411/* GFM COP2 macros */
412/* index can be 0 or 1 */
413#define CVMX_MF_GFM_MUL(val, index) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
414#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
415#define CVMX_MF_GFM_RESINP(val, index) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
416
507
417#define CVMX_MT_GFM_MUL(val, index) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(index) : : [rt] "d" (val))
418#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
419#define CVMX_MT_GFM_RESINP(val, index) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(index) : : [rt] "d" (val))
420#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
421#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
508// GFM
422
509
510// pos can be 0-1
511#define CVMX_MF_GFM_MUL(val,pos) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
512#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
513// pos can be 0-1
514#define CVMX_MF_GFM_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
515// pos can be 0-1
516#define CVMX_MF_GFM_RESINP_REFLECT(val,pos) asm volatile ("dmfc2 %[rt],0x005a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
423
517
518// pos can be 0-1
519#define CVMX_MT_GFM_MUL(val,pos) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
520#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
521// pos can be 0-1
522#define CVMX_MT_GFM_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
523#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
524#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
525// pos can be 0-1
526#define CVMX_MT_GFM_MUL_REFLECT(val,pos) asm volatile ("dmtc2 %[rt],0x0058+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
527#define CVMX_MT_GFM_XOR0_REFLECT(val) asm volatile ("dmtc2 %[rt],0x005c" : : [rt] "d" (val))
528#define CVMX_MT_GFM_XORMUL1_REFLECT(val) asm volatile ("dmtc2 %[rt],0x405d" : : [rt] "d" (val))
529
530// SNOW 3G
531
532// pos can be 0-7
533#define CVMX_MF_SNOW3G_LFSR(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
534// pos can be 0-2
535#define CVMX_MF_SNOW3G_FSM(val,pos) asm volatile ("dmfc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
536#define CVMX_MF_SNOW3G_RESULT(val) asm volatile ("dmfc2 %[rt],0x0250" : [rt] "=d" (val) : )
537
538// pos can be 0-7
539#define CVMX_MT_SNOW3G_LFSR(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
540// pos can be 0-2
541#define CVMX_MT_SNOW3G_FSM(val,pos) asm volatile ("dmtc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
542#define CVMX_MT_SNOW3G_RESULT(val) asm volatile ("dmtc2 %[rt],0x0250" : : [rt] "d" (val))
543#define CVMX_MT_SNOW3G_START(val) asm volatile ("dmtc2 %[rt],0x404d" : : [rt] "d" (val))
544#define CVMX_MT_SNOW3G_MORE(val) asm volatile ("dmtc2 %[rt],0x404e" : : [rt] "d" (val))
545
546// SMS4
547
548// pos can be 0-1
549#define CVMX_MF_SMS4_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
550// pos can be 0-1
551#define CVMX_MF_SMS4_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
552// pos can be 0-1
553#define CVMX_MF_SMS4_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
554#define CVMX_MT_SMS4_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
555#define CVMX_MT_SMS4_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x311d" : : [rt] "d" (val))
556#define CVMX_MT_SMS4_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
557#define CVMX_MT_SMS4_DEC1(val) asm volatile ("dmtc2 %[rt],0x311f" : : [rt] "d" (val))
558#define CVMX_MT_SMS4_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
559#define CVMX_MT_SMS4_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3119" : : [rt] "d" (val))
560#define CVMX_MT_SMS4_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
561#define CVMX_MT_SMS4_ENC1(val) asm volatile ("dmtc2 %[rt],0x311b" : : [rt] "d" (val))
562// pos can be 0-1
563#define CVMX_MT_SMS4_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
564// pos can be 0-1
565#define CVMX_MT_SMS4_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
566// pos can be 0-1
567#define CVMX_MT_SMS4_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
568
424/* check_ordering stuff */
425#if 0
426#define CVMX_MF_CHORD(dest) asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
427#else
428#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
429#endif
430
431#if 0
432#define CVMX_MF_CYCLE(dest) asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
433#else
434#define CVMX_MF_CYCLE(dest) CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
435#endif
436
437#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
438
569/* check_ordering stuff */
570#if 0
571#define CVMX_MF_CHORD(dest) asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
572#else
573#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
574#endif
575
576#if 0
577#define CVMX_MF_CYCLE(dest) asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
578#else
579#define CVMX_MF_CYCLE(dest) CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
580#endif
581
582#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
583
439#define CVMX_MF_CACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,0" : [rt] "=d" (val):)
440#define CVMX_MF_DCACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,1" : [rt] "=d" (val):)
441#define CVMX_MF_CVM_MEM_CTL(val) asm volatile ("dmfc0 %[rt],$11,7" : [rt] "=d" (val):)
442#define CVMX_MF_CVM_CTL(val) asm volatile ("dmfc0 %[rt],$9,7" : [rt] "=d" (val):)
443#define CVMX_MT_CACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,0" : : [rt] "d" (val))
444#define CVMX_MT_DCACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,1" : : [rt] "d" (val))
445#define CVMX_MT_CVM_MEM_CTL(val) asm volatile ("dmtc0 %[rt],$11,7" : : [rt] "d" (val))
446#define CVMX_MT_CVM_CTL(val) asm volatile ("dmtc0 %[rt],$9,7" : : [rt] "d" (val))
584#define VASTR(...) #__VA_ARGS__
447
585
586#define CVMX_MF_COP0(val, cop0) asm volatile ("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val));
587#define CVMX_MT_COP0(val, cop0) asm volatile ("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val));
588
589#define CVMX_MF_CACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRI)
590#define CVMX_MF_DCACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRD)
591#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
592#define CVMX_MF_CVM_CTL(val) CVMX_MF_COP0(val, COP0_CVMCTL)
593#define CVMX_MT_CACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRI)
594#define CVMX_MT_DCACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRD)
595#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
596#define CVMX_MT_CVM_CTL(val) CVMX_MT_COP0(val, COP0_CVMCTL)
597
448/* Macros for TLB */
449#define CVMX_TLBWI asm volatile ("tlbwi" : : )
450#define CVMX_TLBWR asm volatile ("tlbwr" : : )
451#define CVMX_TLBR asm volatile ("tlbr" : : )
598/* Macros for TLB */
599#define CVMX_TLBWI asm volatile ("tlbwi" : : )
600#define CVMX_TLBWR asm volatile ("tlbwr" : : )
601#define CVMX_TLBR asm volatile ("tlbr" : : )
602#define CVMX_TLBP asm volatile ("tlbp" : : )
452#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
453#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
454#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
455#define CVMX_MT_PAGEMASK(val) asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
456#define CVMX_MT_PAGEGRAIN(val) asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
457#define CVMX_MT_TLB_INDEX(val) asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
458#define CVMX_MT_TLB_CONTEXT(val) asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
459#define CVMX_MT_TLB_WIRED(val) asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
460#define CVMX_MT_TLB_RANDOM(val) asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
461#define CVMX_MF_ENTRY_LO_0(val) asm volatile ("dmfc0 %[rt],$2,0" : [rt] "=d" (val):)
462#define CVMX_MF_ENTRY_LO_1(val) asm volatile ("dmfc0 %[rt],$3,0" : [rt] "=d" (val):)
463#define CVMX_MF_ENTRY_HIGH(val) asm volatile ("dmfc0 %[rt],$10,0" : [rt] "=d" (val):)
464#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
465#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
466#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
603#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
604#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
605#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
606#define CVMX_MT_PAGEMASK(val) asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
607#define CVMX_MT_PAGEGRAIN(val) asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
608#define CVMX_MT_TLB_INDEX(val) asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
609#define CVMX_MT_TLB_CONTEXT(val) asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
610#define CVMX_MT_TLB_WIRED(val) asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
611#define CVMX_MT_TLB_RANDOM(val) asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
612#define CVMX_MF_ENTRY_LO_0(val) asm volatile ("dmfc0 %[rt],$2,0" : [rt] "=d" (val):)
613#define CVMX_MF_ENTRY_LO_1(val) asm volatile ("dmfc0 %[rt],$3,0" : [rt] "=d" (val):)
614#define CVMX_MF_ENTRY_HIGH(val) asm volatile ("dmfc0 %[rt],$10,0" : [rt] "=d" (val):)
615#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
616#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
617#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
618#define CVMX_MF_TLB_INDEX(val) asm volatile ("mfc0 %[rt],$0,0" : [rt] "=d" (val):)
467#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
468#define TLB_DIRTY (0x1ULL<<2)
469#define TLB_VALID (0x1ULL<<1)
470#define TLB_GLOBAL (0x1ULL<<0)
471
472
619#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
620#define TLB_DIRTY (0x1ULL<<2)
621#define TLB_VALID (0x1ULL<<1)
622#define TLB_GLOBAL (0x1ULL<<0)
623
624
625#if !defined(__FreeBSD__) || !defined(_KERNEL)
626/* Macros to PUSH and POP Octeon2 ISA. */
627#define CVMX_PUSH_OCTEON2 asm volatile (".set push\n.set arch=octeon2")
628#define CVMX_POP_OCTEON2 asm volatile (".set pop")
629#endif
473
474/* assembler macros to guarantee byte loads/stores are used */
475/* for an unaligned 16-bit access (these use AT register) */
476/* we need the hidden argument (__a) so that GCC gets the dependencies right */
477#define CVMX_LOADUNA_INT16(result, address, offset) \
478 { char *__a = (char *)(address); \
479 asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
480#define CVMX_LOADUNA_UINT16(result, address, offset) \

--- 24 unchanged lines hidden (view full) ---

505 "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
506 "=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
507 [rsrc] "d" (data), [rbase] "d" (__a)); }
508
509#ifdef __cplusplus
510}
511#endif
512
630
631/* assembler macros to guarantee byte loads/stores are used */
632/* for an unaligned 16-bit access (these use AT register) */
633/* we need the hidden argument (__a) so that GCC gets the dependencies right */
634#define CVMX_LOADUNA_INT16(result, address, offset) \
635 { char *__a = (char *)(address); \
636 asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
637#define CVMX_LOADUNA_UINT16(result, address, offset) \

--- 24 unchanged lines hidden (view full) ---

662 "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
663 "=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
664 [rsrc] "d" (data), [rbase] "d" (__a)); }
665
666#ifdef __cplusplus
667}
668#endif
669
670#endif /* __ASSEMBLER__ */
671
513#endif /* __CVMX_ASM_H__ */
672#endif /* __CVMX_ASM_H__ */