cvmx-asm.h revision 210286
1181834Sroberto/***********************license start***************
2285612Sdelphij *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3285612Sdelphij *  reserved.
4181834Sroberto *
5181834Sroberto *
6181834Sroberto *  Redistribution and use in source and binary forms, with or without
7285612Sdelphij *  modification, are permitted provided that the following conditions are
8285612Sdelphij *  met:
9285612Sdelphij *
10181834Sroberto *      * Redistributions of source code must retain the above copyright
11181834Sroberto *        notice, this list of conditions and the following disclaimer.
12285612Sdelphij *
13285612Sdelphij *      * Redistributions in binary form must reproduce the above
14285612Sdelphij *        copyright notice, this list of conditions and the following
15181834Sroberto *        disclaimer in the documentation and/or other materials provided
16285612Sdelphij *        with the distribution.
17285612Sdelphij *
18285612Sdelphij *      * Neither the name of Cavium Networks nor the names of
19181834Sroberto *        its contributors may be used to endorse or promote products
20285612Sdelphij *        derived from this software without specific prior written
21285612Sdelphij *        permission.
22181834Sroberto *
23285612Sdelphij *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24285612Sdelphij *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25181834Sroberto *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26285612Sdelphij *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27181834Sroberto *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28285612Sdelphij *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29285612Sdelphij *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30285612Sdelphij *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31181834Sroberto *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32181834Sroberto *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33181834Sroberto *
34181834Sroberto *
35181834Sroberto *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36181834Sroberto *
37181834Sroberto ***********************license end**************************************/
38181834Sroberto
39181834Sroberto
40181834Sroberto
41285612Sdelphij
42285612Sdelphij
43181834Sroberto
44181834Sroberto/**
45181834Sroberto * @file
46181834Sroberto *
47181834Sroberto * This is file defines ASM primitives for the executive.
48285612Sdelphij
49181834Sroberto * <hr>$Revision: 42280 $<hr>
50285612Sdelphij *
51181834Sroberto *
52285612Sdelphij */
53285612Sdelphij#ifndef __CVMX_ASM_H__
54285612Sdelphij#define __CVMX_ASM_H__
55285612Sdelphij
56285612Sdelphij#ifdef	__cplusplus
57181834Srobertoextern "C" {
58181834Sroberto#endif
59181834Sroberto
60181834Sroberto/* turn the variable name into a string */
61285612Sdelphij#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
62285612Sdelphij#define CVMX_TMP_STR2(x) #x
63285612Sdelphij
64285612Sdelphij/* other useful stuff */
65181834Sroberto#define CVMX_BREAK asm volatile ("break")
66181834Sroberto#define CVMX_SYNC asm volatile ("sync" : : :"memory")
67181834Sroberto/* String version of SYNCW macro for using in inline asm constructs */
68181834Sroberto#define CVMX_SYNCW_STR "syncw\nsyncw\n"
69181834Sroberto#ifdef __OCTEON__
70181834Sroberto    #define CVMX_SYNCIO asm volatile ("nop")   /* Deprecated, will be removed in future release */
71181834Sroberto    #define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
72181834Sroberto    #define CVMX_SYNCIOALL asm volatile ("nop")   /* Deprecated, will be removed in future release */
73285612Sdelphij    /* We actually use two syncw instructions in a row when we need a write
74181834Sroberto        memory barrier. This is because the CN3XXX series of Octeons have
75181834Sroberto        errata Core-401. This can cause a single syncw to not enforce
76181834Sroberto        ordering under very rare conditions. Even if it is rare, better safe
77181834Sroberto        than sorry */
78181834Sroberto    #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
79181834Sroberto#if defined(VXWORKS) || defined(__linux__)
80181834Sroberto    /* Define new sync instructions to be normal SYNC instructions for
81181834Sroberto        operating systems that use threads */
82285612Sdelphij    #define CVMX_SYNCWS CVMX_SYNCW
83285612Sdelphij    #define CVMX_SYNCS  CVMX_SYNC
84285612Sdelphij    #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
85285612Sdelphij#else
86181834Sroberto    #if defined(CVMX_BUILD_FOR_TOOLCHAIN)
87285612Sdelphij        /* While building simple exec toolchain, always use syncw to
88181834Sroberto           support all Octeon models. */
89181834Sroberto        #define CVMX_SYNCWS CVMX_SYNCW
90181834Sroberto        #define CVMX_SYNCS  CVMX_SYNC
91285612Sdelphij        #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
92181834Sroberto    #else
93181834Sroberto        /* Again, just like syncw, we may need two syncws instructions in a row due
94181834Sroberto            errata Core-401 */
95181834Sroberto        #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
96181834Sroberto        #define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
97181834Sroberto        #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
98181834Sroberto    #endif
99181834Sroberto#endif
100285612Sdelphij#else
101181834Sroberto    /* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
102181834Sroberto    #define CVMX_SYNCIO asm volatile ("nop")   /* Deprecated, will be removed in future release */
103181834Sroberto    #define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
104181834Sroberto    #define CVMX_SYNCIOALL asm volatile ("nop")   /* Deprecated, will be removed in future release */
105181834Sroberto    #define CVMX_SYNCW asm volatile ("sync" : : :"memory")
106181834Sroberto    #define CVMX_SYNCWS CVMX_SYNCW
107181834Sroberto    #define CVMX_SYNCS  CVMX_SYNC
108181834Sroberto    #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
109181834Sroberto#endif
110181834Sroberto#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
111181834Sroberto#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
112181834Sroberto#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
113285612Sdelphij// a normal prefetch
114181834Sroberto#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
115181834Sroberto// normal prefetches that use the pref instruction
116181834Sroberto#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
117181834Sroberto#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
118285612Sdelphij#define CVMX_PREFETCH_PREF1(address, offset) CVMX_PREFETCH_PREFX(1, address, offset)
119181834Sroberto#define CVMX_PREFETCH_PREF6(address, offset) CVMX_PREFETCH_PREFX(6, address, offset)
120181834Sroberto#define CVMX_PREFETCH_PREF7(address, offset) CVMX_PREFETCH_PREFX(7, address, offset)
121181834Sroberto// prefetch into L1, do not put the block in the L2
122181834Sroberto#define CVMX_PREFETCH_NOTL2(address, offset) CVMX_PREFETCH_PREFX(4, address, offset)
123181834Sroberto#define CVMX_PREFETCH_NOTL22(address, offset) CVMX_PREFETCH_PREFX(5, address, offset)
124181834Sroberto// prefetch into L2, do not put the block in the L1
125181834Sroberto#define CVMX_PREFETCH_L2(address, offset) CVMX_PREFETCH_PREFX(28, address, offset)
126181834Sroberto// CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable (actually old value or zero) until
127181834Sroberto// that byte is stored to (by this or another processor. Note that the value of each byte is not only
128181834Sroberto// unpredictable, but may also change again - up until the point when one of the cores stores to the
129181834Sroberto// byte.
130285612Sdelphij#define CVMX_PREPARE_FOR_STORE(address, offset) CVMX_PREFETCH_PREFX(30, address, offset)
131285612Sdelphij// This is a command headed to the L2 controller to tell it to clear its dirty bit for a
132285612Sdelphij// block. Basically, SW is telling HW that the current version of the block will not be
133181834Sroberto// used.
134181834Sroberto#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
135181834Sroberto
136181834Sroberto#define CVMX_ICACHE_INVALIDATE  { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }    // flush stores, invalidate entire icache
137285612Sdelphij#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
138181834Sroberto#define CVMX_DCACHE_INVALIDATE  { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
139181834Sroberto
140181834Sroberto/* new instruction to make RC4 run faster */
141181834Sroberto#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
142181834Sroberto
143181834Sroberto// misc v2 stuff
144285612Sdelphij#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
145181834Sroberto#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
146181834Sroberto#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
147181834Sroberto#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
148181834Sroberto#define CVMX_SEB(result, input1) asm ("seb %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
149181834Sroberto#define CVMX_SEH(result, input1) asm ("seh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
150181834Sroberto#define CVMX_DSBH(result, input1) asm ("dsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
151181834Sroberto#define CVMX_DSHD(result, input1) asm ("dshd %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
152181834Sroberto#define CVMX_WSBH(result, input1) asm ("wsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
153181834Sroberto
154285612Sdelphij// Endian swap
155181834Sroberto#define CVMX_ES64(result, input) \
156181834Sroberto        do {\
157181834Sroberto        CVMX_DSBH(result, input); \
158181834Sroberto        CVMX_DSHD(result, result); \
159181834Sroberto        } while (0)
160181834Sroberto#define CVMX_ES32(result, input) \
161181834Sroberto        do {\
162181834Sroberto        CVMX_WSBH(result, input); \
163181834Sroberto        CVMX_ROTR(result, result, 16); \
164285612Sdelphij        } while (0)
165285612Sdelphij
166285612Sdelphij
167285612Sdelphij/* extract and insert - NOTE that pos and len variables must be constants! */
168285612Sdelphij/* the P variants take len rather than lenm1 */
169285612Sdelphij/* the M1 variants take lenm1 rather than len */
170181834Sroberto#define CVMX_EXTS(result,input,pos,lenm1) asm ("exts %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
171181834Sroberto#define CVMX_EXTSP(result,input,pos,len) CVMX_EXTS(result,input,pos,(len)-1)
172181834Sroberto
173181834Sroberto#define CVMX_DEXT(result,input,pos,len) asm ("dext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
174181834Sroberto#define CVMX_DEXTM1(result,input,pos,lenm1) CVMX_DEXT(result,input,pos,(lenm1)+1)
175181834Sroberto
176181834Sroberto#define CVMX_EXT(result,input,pos,len) asm ("ext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
177181834Sroberto#define CVMX_EXTM1(result,input,pos,lenm1) CVMX_EXT(result,input,pos,(lenm1)+1)
178181834Sroberto
179181834Sroberto// removed
180285612Sdelphij// #define CVMX_EXTU(result,input,pos,lenm1) asm ("extu %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
181181834Sroberto// #define CVMX_EXTUP(result,input,pos,len) CVMX_EXTU(result,input,pos,(len)-1)
182285612Sdelphij
183181834Sroberto#define CVMX_CINS(result,input,pos,lenm1) asm ("cins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
184181834Sroberto#define CVMX_CINSP(result,input,pos,len) CVMX_CINS(result,input,pos,(len)-1)
185181834Sroberto
186181834Sroberto#define CVMX_DINS(result,input,pos,len) asm ("dins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
187181834Sroberto#define CVMX_DINSM1(result,input,pos,lenm1) CVMX_DINS(result,input,pos,(lenm1)+1)
188181834Sroberto#define CVMX_DINSC(result,pos,len) asm ("dins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
189285612Sdelphij#define CVMX_DINSCM1(result,pos,lenm1) CVMX_DINSC(result,pos,(lenm1)+1)
190181834Sroberto
191181834Sroberto#define CVMX_INS(result,input,pos,len) asm ("ins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
192181834Sroberto#define CVMX_INSM1(result,input,pos,lenm1) CVMX_INS(result,input,pos,(lenm1)+1)
193181834Sroberto#define CVMX_INSC(result,pos,len) asm ("ins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
194285612Sdelphij#define CVMX_INSCM1(result,pos,lenm1) CVMX_INSC(result,pos,(lenm1)+1)
195181834Sroberto
196181834Sroberto// removed
197181834Sroberto// #define CVMX_INS0(result,input,pos,lenm1) asm("ins0 %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
198181834Sroberto// #define CVMX_INS0P(result,input,pos,len) CVMX_INS0(result,input,pos,(len)-1)
199181834Sroberto// #define CVMX_INS0C(result,pos,lenm1) asm ("ins0 %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : "[rt]" (result))
200181834Sroberto// #define CVMX_INS0CP(result,pos,len) CVMX_INS0C(result,pos,(len)-1)
201181834Sroberto
202285612Sdelphij#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
203181834Sroberto#define CVMX_DCLZ(result, input) asm ("dclz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
204181834Sroberto#define CVMX_CLO(result, input) asm ("clo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
205181834Sroberto#define CVMX_DCLO(result, input) asm ("dclo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
206181834Sroberto#define CVMX_POP(result, input) asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
207181834Sroberto#define CVMX_DPOP(result, input) asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
208181834Sroberto
209285612Sdelphij#ifdef CVMX_ABI_O32
210285612Sdelphij
211181834Sroberto  /* rdhwr $31 is the 64 bit cmvcount register, it needs to be split
212181834Sroberto     into one or two (depending on the width of the result) properly
213285612Sdelphij     sign extended registers.  All other registers are 32 bits wide
214181834Sroberto     and already properly sign extended. */
215181834Sroberto#  define CVMX_RDHWRX(result, regstr, ASM_STMT) ({			\
216181834Sroberto  if (regstr == 31) {							\
217181834Sroberto    if (sizeof(result) == 8) {						\
218181834Sroberto      ASM_STMT (".set\tpush\n"						\
219181834Sroberto		"\t.set\tmips64r2\n"					\
220181834Sroberto		"\trdhwr\t%L0,$31\n"					\
221181834Sroberto		"\tdsra\t%M0,%L0,32\n"					\
222181834Sroberto		"\tsll\t%L0,%L0,0\n"					\
223181834Sroberto		"\t.set\tpop": "=d"(result));				\
224181834Sroberto    } else {								\
225181834Sroberto      unsigned long _v;							\
226181834Sroberto      ASM_STMT ("rdhwr\t%0,$31\n"					\
227285612Sdelphij		"\tsll\t%0,%0,0" : "=d"(_v));				\
228285612Sdelphij      result = (typeof(result))_v;					\
229181834Sroberto    }									\
230181834Sroberto  } else {								\
231181834Sroberto    unsigned long _v;							\
232181834Sroberto    ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v));		\
233181834Sroberto    result = (typeof(result))_v;					\
234285612Sdelphij  }})
235181834Sroberto
236181834Sroberto
237181834Sroberto
238285612Sdelphij#  define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
239181834Sroberto#  define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
240181834Sroberto#else
241285612Sdelphij#  define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
242285612Sdelphij#  define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
243285612Sdelphij#endif
244285612Sdelphij
245285612Sdelphij// some new cop0-like stuff
246285612Sdelphij#define CVMX_DI(result) asm volatile ("di %[rt]" : [rt] "=d" (result))
247285612Sdelphij#define CVMX_DI_NULL asm volatile ("di")
248285612Sdelphij#define CVMX_EI(result) asm volatile ("ei %[rt]" : [rt] "=d" (result))
249285612Sdelphij#define CVMX_EI_NULL asm volatile ("ei")
250285612Sdelphij#define CVMX_EHB asm volatile ("ehb")
251285612Sdelphij
252285612Sdelphij/* mul stuff */
253285612Sdelphij#define CVMX_MTM0(m) asm volatile ("mtm0 %[rs]" : : [rs] "d" (m))
254285612Sdelphij#define CVMX_MTM1(m) asm volatile ("mtm1 %[rs]" : : [rs] "d" (m))
255285612Sdelphij#define CVMX_MTM2(m) asm volatile ("mtm2 %[rs]" : : [rs] "d" (m))
256285612Sdelphij#define CVMX_MTP0(p) asm volatile ("mtp0 %[rs]" : : [rs] "d" (p))
257285612Sdelphij#define CVMX_MTP1(p) asm volatile ("mtp1 %[rs]" : : [rs] "d" (p))
258285612Sdelphij#define CVMX_MTP2(p) asm volatile ("mtp2 %[rs]" : : [rs] "d" (p))
259181834Sroberto#define CVMX_VMULU(dest,mpcand,accum) asm volatile ("vmulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
260285612Sdelphij#define CVMX_VMM0(dest,mpcand,accum) asm volatile ("vmm0 %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
261285612Sdelphij#define CVMX_V3MULU(dest,mpcand,accum) asm volatile ("v3mulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
262181834Sroberto
263181834Sroberto/* branch stuff */
264181834Sroberto// these are hard to make work because the compiler does not realize that the
265181834Sroberto// instruction is a branch so may optimize away the label
266181834Sroberto// the labels to these next two macros must not include a ":" at the end
267181834Sroberto#define CVMX_BBIT1(var, pos, label) asm volatile ("bbit1 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
268#define CVMX_BBIT0(var, pos, label) asm volatile ("bbit0 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
269// the label to this macro must include a ":" at the end
270#define CVMX_ASM_LABEL(label) label \
271                             asm volatile (CVMX_TMP_STR(label) : : )
272
273//
274// Low-latency memory stuff
275//
276// set can be 0-1
277#define CVMX_MT_LLM_READ_ADDR(set,val)    asm volatile ("dmtc2 %[rt],0x0400+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
278#define CVMX_MT_LLM_WRITE_ADDR_INTERNAL(set,val)   asm volatile ("dmtc2 %[rt],0x0401+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
279#define CVMX_MT_LLM_READ64_ADDR(set,val)  asm volatile ("dmtc2 %[rt],0x0404+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
280#define CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0405+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
281#define CVMX_MT_LLM_DATA(set,val)         asm volatile ("dmtc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
282#define CVMX_MF_LLM_DATA(set,val)         asm volatile ("dmfc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : [rt] "=d" (val) : )
283
284
285// load linked, store conditional
286#define CVMX_LL(dest, address, offset) asm volatile ("ll %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
287#define CVMX_LLD(dest, address, offset) asm volatile ("lld %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
288#define CVMX_SC(srcdest, address, offset) asm volatile ("sc %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
289#define CVMX_SCD(srcdest, address, offset) asm volatile ("scd %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
290
291// load/store word left/right
292#define CVMX_LWR(srcdest, address, offset) asm volatile ("lwr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
293#define CVMX_LWL(srcdest, address, offset) asm volatile ("lwl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
294#define CVMX_LDR(srcdest, address, offset) asm volatile ("ldr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
295#define CVMX_LDL(srcdest, address, offset) asm volatile ("ldl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
296
297#define CVMX_SWR(src, address, offset) asm volatile ("swr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
298#define CVMX_SWL(src, address, offset) asm volatile ("swl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
299#define CVMX_SDR(src, address, offset) asm volatile ("sdr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
300#define CVMX_SDL(src, address, offset) asm volatile ("sdl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
301
302
303
304//
305// Useful crypto ASM's
306//
307
308// CRC
309
310#define CVMX_MT_CRC_POLYNOMIAL(val)         asm volatile ("dmtc2 %[rt],0x4200" : : [rt] "d" (val))
311#define CVMX_MT_CRC_IV(val)                 asm volatile ("dmtc2 %[rt],0x0201" : : [rt] "d" (val))
312#define CVMX_MT_CRC_LEN(val)                asm volatile ("dmtc2 %[rt],0x1202" : : [rt] "d" (val))
313#define CVMX_MT_CRC_BYTE(val)               asm volatile ("dmtc2 %[rt],0x0204" : : [rt] "d" (val))
314#define CVMX_MT_CRC_HALF(val)               asm volatile ("dmtc2 %[rt],0x0205" : : [rt] "d" (val))
315#define CVMX_MT_CRC_WORD(val)               asm volatile ("dmtc2 %[rt],0x0206" : : [rt] "d" (val))
316#define CVMX_MT_CRC_DWORD(val)              asm volatile ("dmtc2 %[rt],0x1207" : : [rt] "d" (val))
317#define CVMX_MT_CRC_VAR(val)                asm volatile ("dmtc2 %[rt],0x1208" : : [rt] "d" (val))
318#define CVMX_MT_CRC_POLYNOMIAL_REFLECT(val) asm volatile ("dmtc2 %[rt],0x4210" : : [rt] "d" (val))
319#define CVMX_MT_CRC_IV_REFLECT(val)         asm volatile ("dmtc2 %[rt],0x0211" : : [rt] "d" (val))
320#define CVMX_MT_CRC_BYTE_REFLECT(val)       asm volatile ("dmtc2 %[rt],0x0214" : : [rt] "d" (val))
321#define CVMX_MT_CRC_HALF_REFLECT(val)       asm volatile ("dmtc2 %[rt],0x0215" : : [rt] "d" (val))
322#define CVMX_MT_CRC_WORD_REFLECT(val)       asm volatile ("dmtc2 %[rt],0x0216" : : [rt] "d" (val))
323#define CVMX_MT_CRC_DWORD_REFLECT(val)      asm volatile ("dmtc2 %[rt],0x1217" : : [rt] "d" (val))
324#define CVMX_MT_CRC_VAR_REFLECT(val)        asm volatile ("dmtc2 %[rt],0x1218" : : [rt] "d" (val))
325
326#define CVMX_MF_CRC_POLYNOMIAL(val)         asm volatile ("dmfc2 %[rt],0x0200" : [rt] "=d" (val) : )
327#define CVMX_MF_CRC_IV(val)                 asm volatile ("dmfc2 %[rt],0x0201" : [rt] "=d" (val) : )
328#define CVMX_MF_CRC_IV_REFLECT(val)         asm volatile ("dmfc2 %[rt],0x0203" : [rt] "=d" (val) : )
329#define CVMX_MF_CRC_LEN(val)                asm volatile ("dmfc2 %[rt],0x0202" : [rt] "=d" (val) : )
330
331// MD5 and SHA-1
332
333// pos can be 0-6
334#define CVMX_MT_HSH_DAT(val,pos)    asm volatile ("dmtc2 %[rt],0x0040+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
335#define CVMX_MT_HSH_DATZ(pos)       asm volatile ("dmtc2    $0,0x0040+" CVMX_TMP_STR(pos) :                 :               )
336// pos can be 0-14
337#define CVMX_MT_HSH_DATW(val,pos)   asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
338#define CVMX_MT_HSH_DATWZ(pos)      asm volatile ("dmtc2    $0,0x0240+" CVMX_TMP_STR(pos) :                 :               )
339#define CVMX_MT_HSH_STARTMD5(val)   asm volatile ("dmtc2 %[rt],0x4047"                   :                 : [rt] "d" (val))
340#define CVMX_MT_HSH_STARTSHA(val)   asm volatile ("dmtc2 %[rt],0x4057"                   :                 : [rt] "d" (val))
341#define CVMX_MT_HSH_STARTSHA256(val)   asm volatile ("dmtc2 %[rt],0x404f"                   :                 : [rt] "d" (val))
342#define CVMX_MT_HSH_STARTSHA512(val)   asm volatile ("dmtc2 %[rt],0x424f"                   :                 : [rt] "d" (val))
343// pos can be 0-3
344#define CVMX_MT_HSH_IV(val,pos)     asm volatile ("dmtc2 %[rt],0x0048+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
345// pos can be 0-7
346#define CVMX_MT_HSH_IVW(val,pos)     asm volatile ("dmtc2 %[rt],0x0250+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
347
348// pos can be 0-6
349#define CVMX_MF_HSH_DAT(val,pos)    asm volatile ("dmfc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
350// pos can be 0-14
351#define CVMX_MF_HSH_DATW(val,pos)   asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
352// pos can be 0-3
353#define CVMX_MF_HSH_IV(val,pos)     asm volatile ("dmfc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
354// pos can be 0-7
355#define CVMX_MF_HSH_IVW(val,pos)     asm volatile ("dmfc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
356
357// 3DES
358
359// pos can be 0-2
360#define CVMX_MT_3DES_KEY(val,pos)   asm volatile ("dmtc2 %[rt],0x0080+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
361#define CVMX_MT_3DES_IV(val)        asm volatile ("dmtc2 %[rt],0x0084"                   :                 : [rt] "d" (val))
362#define CVMX_MT_3DES_ENC_CBC(val)   asm volatile ("dmtc2 %[rt],0x4088"                   :                 : [rt] "d" (val))
363#define CVMX_MT_3DES_ENC(val)       asm volatile ("dmtc2 %[rt],0x408a"                   :                 : [rt] "d" (val))
364#define CVMX_MT_3DES_DEC_CBC(val)   asm volatile ("dmtc2 %[rt],0x408c"                   :                 : [rt] "d" (val))
365#define CVMX_MT_3DES_DEC(val)       asm volatile ("dmtc2 %[rt],0x408e"                   :                 : [rt] "d" (val))
366#define CVMX_MT_3DES_RESULT(val)    asm volatile ("dmtc2 %[rt],0x0098"                   :                 : [rt] "d" (val))
367
368// pos can be 0-2
369#define CVMX_MF_3DES_KEY(val,pos)   asm volatile ("dmfc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
370#define CVMX_MF_3DES_IV(val)        asm volatile ("dmfc2 %[rt],0x0084"                   : [rt] "=d" (val) :               )
371#define CVMX_MF_3DES_RESULT(val)    asm volatile ("dmfc2 %[rt],0x0088"                   : [rt] "=d" (val) :               )
372
373// KASUMI
374
375// pos can be 0-1
376#define CVMX_MT_KAS_KEY(val,pos)    CVMX_MT_3DES_KEY(val,pos)
377#define CVMX_MT_KAS_ENC_CBC(val)    asm volatile ("dmtc2 %[rt],0x4089"                   :                 : [rt] "d" (val))
378#define CVMX_MT_KAS_ENC(val)        asm volatile ("dmtc2 %[rt],0x408b"                   :                 : [rt] "d" (val))
379#define CVMX_MT_KAS_RESULT(val)     CVMX_MT_3DES_RESULT(val)
380
381// pos can be 0-1
382#define CVMX_MF_KAS_KEY(val,pos)    CVMX_MF_3DES_KEY(val,pos)
383#define CVMX_MF_KAS_RESULT(val)     CVMX_MF_3DES_RESULT(val)
384
385// AES
386
387#define CVMX_MT_AES_ENC_CBC0(val)   asm volatile ("dmtc2 %[rt],0x0108"                   :                 : [rt] "d" (val))
388#define CVMX_MT_AES_ENC_CBC1(val)   asm volatile ("dmtc2 %[rt],0x3109"                   :                 : [rt] "d" (val))
389#define CVMX_MT_AES_ENC0(val)       asm volatile ("dmtc2 %[rt],0x010a"                   :                 : [rt] "d" (val))
390#define CVMX_MT_AES_ENC1(val)       asm volatile ("dmtc2 %[rt],0x310b"                   :                 : [rt] "d" (val))
391#define CVMX_MT_AES_DEC_CBC0(val)   asm volatile ("dmtc2 %[rt],0x010c"                   :                 : [rt] "d" (val))
392#define CVMX_MT_AES_DEC_CBC1(val)   asm volatile ("dmtc2 %[rt],0x310d"                   :                 : [rt] "d" (val))
393#define CVMX_MT_AES_DEC0(val)       asm volatile ("dmtc2 %[rt],0x010e"                   :                 : [rt] "d" (val))
394#define CVMX_MT_AES_DEC1(val)       asm volatile ("dmtc2 %[rt],0x310f"                   :                 : [rt] "d" (val))
395// pos can be 0-3
396#define CVMX_MT_AES_KEY(val,pos)    asm volatile ("dmtc2 %[rt],0x0104+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
397// pos can be 0-1
398#define CVMX_MT_AES_IV(val,pos)     asm volatile ("dmtc2 %[rt],0x0102+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
399#define CVMX_MT_AES_KEYLENGTH(val)  asm volatile ("dmtc2 %[rt],0x0110"                   :                 : [rt] "d" (val)) // write the keylen
400// pos can be 0-1
401#define CVMX_MT_AES_RESULT(val,pos) asm volatile ("dmtc2 %[rt],0x0100+" CVMX_TMP_STR(pos) :                 : [rt] "d" (val))
402
403// pos can be 0-1
404#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
405// pos can be 0-1
406#define CVMX_MF_AES_IV(val,pos)     asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
407// pos can be 0-3
408#define CVMX_MF_AES_KEY(val,pos)    asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) :               )
409#define CVMX_MF_AES_KEYLENGTH(val)  asm volatile ("dmfc2 %[rt],0x0110"                   : [rt] "=d" (val) :               ) // read the keylen
410#define CVMX_MF_AES_DAT0(val)       asm volatile ("dmfc2 %[rt],0x0111"                   : [rt] "=d" (val) :               ) // first piece of input data
411/* GFM COP2 macros */
412/* index can be 0 or 1 */
413#define CVMX_MF_GFM_MUL(val, index)     asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(index) : [rt] "=d" (val) :               )
414#define CVMX_MF_GFM_POLY(val)           asm volatile ("dmfc2 %[rt],0x025e"                      : [rt] "=d" (val) :               )
415#define CVMX_MF_GFM_RESINP(val, index)  asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(index) : [rt] "=d" (val) :               )
416
417#define CVMX_MT_GFM_MUL(val, index)     asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(index) :                 : [rt] "d" (val))
418#define CVMX_MT_GFM_POLY(val)           asm volatile ("dmtc2 %[rt],0x025e"                      :                 : [rt] "d" (val))
419#define CVMX_MT_GFM_RESINP(val, index)  asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(index) :                 : [rt] "d" (val))
420#define CVMX_MT_GFM_XOR0(val)           asm volatile ("dmtc2 %[rt],0x025c"                      :                 : [rt] "d" (val))
421#define CVMX_MT_GFM_XORMUL1(val)        asm volatile ("dmtc2 %[rt],0x425d"                      :                 : [rt] "d" (val))
422
423
424/* check_ordering stuff */
425#if 0
426#define CVMX_MF_CHORD(dest)         asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
427#else
428#define CVMX_MF_CHORD(dest)         CVMX_RDHWR(dest, 30)
429#endif
430
431#if 0
432#define CVMX_MF_CYCLE(dest)         asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
433#else
434#define CVMX_MF_CYCLE(dest)         CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
435#endif
436
437#define CVMX_MT_CYCLE(src)         asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
438
439#define CVMX_MF_CACHE_ERR(val)            asm volatile ("dmfc0 %[rt],$27,0" :  [rt] "=d" (val):)
440#define CVMX_MF_DCACHE_ERR(val)           asm volatile ("dmfc0 %[rt],$27,1" :  [rt] "=d" (val):)
441#define CVMX_MF_CVM_MEM_CTL(val)          asm volatile ("dmfc0 %[rt],$11,7" :  [rt] "=d" (val):)
442#define CVMX_MF_CVM_CTL(val)              asm volatile ("dmfc0 %[rt],$9,7"  :  [rt] "=d" (val):)
443#define CVMX_MT_CACHE_ERR(val)            asm volatile ("dmtc0 %[rt],$27,0" : : [rt] "d" (val))
444#define CVMX_MT_DCACHE_ERR(val)           asm volatile ("dmtc0 %[rt],$27,1" : : [rt] "d" (val))
445#define CVMX_MT_CVM_MEM_CTL(val)          asm volatile ("dmtc0 %[rt],$11,7" : : [rt] "d" (val))
446#define CVMX_MT_CVM_CTL(val)              asm volatile ("dmtc0 %[rt],$9,7"  : : [rt] "d" (val))
447
448/* Macros for TLB */
449#define CVMX_TLBWI                       asm volatile ("tlbwi" : : )
450#define CVMX_TLBWR                       asm volatile ("tlbwr" : : )
451#define CVMX_TLBR                        asm volatile ("tlbr" : : )
452#define CVMX_MT_ENTRY_HIGH(val)          asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
453#define CVMX_MT_ENTRY_LO_0(val)          asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
454#define CVMX_MT_ENTRY_LO_1(val)          asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
455#define CVMX_MT_PAGEMASK(val)            asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
456#define CVMX_MT_PAGEGRAIN(val)           asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
457#define CVMX_MT_TLB_INDEX(val)           asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
458#define CVMX_MT_TLB_CONTEXT(val)         asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
459#define CVMX_MT_TLB_WIRED(val)           asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
460#define CVMX_MT_TLB_RANDOM(val)          asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
461#define CVMX_MF_ENTRY_LO_0(val)          asm volatile ("dmfc0 %[rt],$2,0" :  [rt] "=d" (val):)
462#define CVMX_MF_ENTRY_LO_1(val)          asm volatile ("dmfc0 %[rt],$3,0" :  [rt] "=d" (val):)
463#define CVMX_MF_ENTRY_HIGH(val)          asm volatile ("dmfc0 %[rt],$10,0" :  [rt] "=d" (val):)
464#define CVMX_MF_PAGEMASK(val)            asm volatile ("mfc0 %[rt],$5,0" :  [rt] "=d" (val):)
465#define CVMX_MF_PAGEGRAIN(val)           asm volatile ("mfc0 %[rt],$5,1" :  [rt] "=d" (val):)
466#define CVMX_MF_TLB_WIRED(val)           asm volatile ("mfc0 %[rt],$6,0" :  [rt] "=d" (val):)
467#define CVMX_MF_TLB_RANDOM(val)          asm volatile ("mfc0 %[rt],$1,0" :  [rt] "=d" (val):)
468#define TLB_DIRTY   (0x1ULL<<2)
469#define TLB_VALID   (0x1ULL<<1)
470#define TLB_GLOBAL  (0x1ULL<<0)
471
472
473
474/* assembler macros to guarantee byte loads/stores are used */
475/* for an unaligned 16-bit access (these use AT register) */
476/* we need the hidden argument (__a) so that GCC gets the dependencies right */
477#define CVMX_LOADUNA_INT16(result, address, offset) \
478	{ char *__a = (char *)(address); \
479	  asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
480#define CVMX_LOADUNA_UINT16(result, address, offset) \
481	{ char *__a = (char *)(address); \
482	  asm ("ulhu %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1])); }
483#define CVMX_STOREUNA_INT16(data, address, offset) \
484	{ char *__a = (char *)(address); \
485	  asm ("ush %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : "=m"(__a[offset + 0]), "=m"(__a[offset + 1]): [rsrc] "d" (data), [rbase] "d" (__a)); }
486
487#define CVMX_LOADUNA_INT32(result, address, offset) \
488	{ char *__a = (char *)(address); \
489	  asm ("ulw %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
490	       [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3])); }
491#define CVMX_STOREUNA_INT32(data, address, offset) \
492	{ char *__a = (char *)(address); \
493	  asm ("usw %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
494	       "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]) : \
495	       [rsrc] "d" (data), [rbase] "d" (__a)); }
496
497#define CVMX_LOADUNA_INT64(result, address, offset) \
498	{ char *__a = (char *)(address); \
499	  asm ("uld %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
500	       [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3]), \
501	       "m"(__a[offset + 4]), "m"(__a[offset + 5]), "m"(__a[offset + 6]), "m"(__a[offset + 7])); }
502#define CVMX_STOREUNA_INT64(data, address, offset) \
503	{ char *__a = (char *)(address); \
504	  asm ("usd %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
505	       "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
506	       "=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
507	       [rsrc] "d" (data), [rbase] "d" (__a)); }
508
509#ifdef	__cplusplus
510}
511#endif
512
513#endif /* __CVMX_ASM_H__ */
514