1210284Sjmallett/***********************license start***************
2232812Sjmallett * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3215990Sjmallett * reserved.
4210284Sjmallett *
5210284Sjmallett *
6215990Sjmallett * Redistribution and use in source and binary forms, with or without
7215990Sjmallett * modification, are permitted provided that the following conditions are
8215990Sjmallett * met:
9210284Sjmallett *
10215990Sjmallett *   * Redistributions of source code must retain the above copyright
11215990Sjmallett *     notice, this list of conditions and the following disclaimer.
12210284Sjmallett *
13215990Sjmallett *   * Redistributions in binary form must reproduce the above
14215990Sjmallett *     copyright notice, this list of conditions and the following
15215990Sjmallett *     disclaimer in the documentation and/or other materials provided
16215990Sjmallett *     with the distribution.
17215990Sjmallett
18232812Sjmallett *   * Neither the name of Cavium Inc. nor the names of
19215990Sjmallett *     its contributors may be used to endorse or promote products
20215990Sjmallett *     derived from this software without specific prior written
21215990Sjmallett *     permission.
22215990Sjmallett
23215990Sjmallett * This Software, including technical data, may be subject to U.S. export  control
24215990Sjmallett * laws, including the U.S. Export Administration Act and its  associated
25215990Sjmallett * regulations, and may be subject to export or import  regulations in other
26215990Sjmallett * countries.
27215990Sjmallett
28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29232812Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38210284Sjmallett ***********************license end**************************************/
39210284Sjmallett
40215990Sjmallett
41210284Sjmallett/**
42210284Sjmallett * @file
43210284Sjmallett * Functions for accessing memory and CSRs on Octeon when we are compiling
44210284Sjmallett * natively.
45210284Sjmallett *
46210284Sjmallett * <hr>$Revision: 38306 $<hr>
47210284Sjmallett*/
48210284Sjmallett#ifndef __CVMX_ACCESS_NATIVE_H__
49210284Sjmallett#define __CVMX_ACCESS_NATIVE_H__
50210284Sjmallett
51210284Sjmallett#ifdef	__cplusplus
52210284Sjmallettextern "C" {
53210284Sjmallett#endif
54210284Sjmallett
55210284Sjmallett/**
56210284Sjmallett * Returns the Octeon processor ID.
57210284Sjmallett *
58210284Sjmallett * @return Octeon processor ID from COP0
59210284Sjmallett */
60210284Sjmallettstatic inline uint32_t cvmx_get_proc_id(void)
61210284Sjmallett{
62210284Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_USER
63210284Sjmallett    extern uint32_t cvmx_app_init_processor_id;
64210284Sjmallett    return cvmx_app_init_processor_id;
65210284Sjmallett#else
66210284Sjmallett    uint32_t id;
67210284Sjmallett    asm ("mfc0 %0, $15,0" : "=r" (id));
68210284Sjmallett    return id;
69210284Sjmallett#endif
70210284Sjmallett}
71210284Sjmallett
72210284Sjmallett/**
73210284Sjmallett * Convert a memory pointer (void*) into a hardware compatable
74210284Sjmallett * memory address (uint64_t). Octeon hardware widgets don't
75210284Sjmallett * understand logical addresses.
76210284Sjmallett *
77210284Sjmallett * @param ptr    C style memory pointer
78210284Sjmallett * @return Hardware physical address
79210284Sjmallett */
80210284Sjmallettstatic inline uint64_t cvmx_ptr_to_phys(void *ptr)
81210284Sjmallett{
82210284Sjmallett    if (CVMX_ENABLE_PARAMETER_CHECKING)
83210284Sjmallett        cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
84210284Sjmallett
85210284Sjmallett#ifdef CVMX_BUILD_FOR_UBOOT
86215990Sjmallett    uint64_t uboot_tlb_ptr_to_phys(void *ptr);
87215990Sjmallett
88215990Sjmallett    if (((uint32_t)ptr) < 0x80000000)
89215990Sjmallett    {
90215990Sjmallett        /* Handle useg (unmapped due to ERL) here*/
91215990Sjmallett        return(CAST64(ptr) & 0x7FFFFFFF);
92215990Sjmallett    }
93215990Sjmallett    else if (((uint32_t)ptr) < 0xC0000000)
94215990Sjmallett    {
95215990Sjmallett        /* Here we handle KSEG0/KSEG1 _pointers_.  We know we are dealing
96215990Sjmallett        ** with 32 bit only values, so we treat them that way.  Note that
97215990Sjmallett        ** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
98215990Sjmallett        ** but the physical address of the KSEG0/KSEG1 address. */
99215990Sjmallett        return(CAST64(ptr) & 0x1FFFFFFF);
100215990Sjmallett    }
101215990Sjmallett    else
102215990Sjmallett        return(uboot_tlb_ptr_to_phys(ptr));   /* Should not get get here in !TLB case */
103215990Sjmallett
104210284Sjmallett#endif
105210284Sjmallett
106210284Sjmallett#ifdef __linux__
107210284Sjmallett    if (sizeof(void*) == 8)
108210284Sjmallett    {
109210284Sjmallett        /* We're running in 64 bit mode. Normally this means that we can use
110210284Sjmallett            40 bits of address space (the hardware limit). Unfortunately there
111210284Sjmallett            is one case were we need to limit this to 30 bits, sign extended
112210284Sjmallett            32 bit. Although these are 64 bits wide, only 30 bits can be used */
113210284Sjmallett        if ((CAST64(ptr) >> 62) == 3)
114210284Sjmallett            return CAST64(ptr) & cvmx_build_mask(30);
115210284Sjmallett        else
116210284Sjmallett            return CAST64(ptr) & cvmx_build_mask(40);
117210284Sjmallett    }
118210284Sjmallett    else
119210284Sjmallett    {
120210284Sjmallett#ifdef __KERNEL__
121210284Sjmallett	return (long)(ptr) & 0x1fffffff;
122210284Sjmallett#else
123210284Sjmallett        extern uint64_t linux_mem32_offset;
124210284Sjmallett        if (cvmx_likely(ptr))
125210284Sjmallett            return CAST64(ptr) - linux_mem32_offset;
126210284Sjmallett        else
127210284Sjmallett            return 0;
128210284Sjmallett#endif
129210284Sjmallett    }
130210284Sjmallett#elif defined(_WRS_KERNEL)
131210284Sjmallett	return (long)(ptr) & 0x7fffffff;
132210284Sjmallett#elif defined(VXWORKS_USER_MAPPINGS)
133210284Sjmallett    /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
134210284Sjmallett        2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
135210284Sjmallett    uint64_t address = (long)ptr;
136210284Sjmallett    if (address & 0x80000000)
137210284Sjmallett        return address & 0x1fffffff;    /* KSEG pointers directly map the lower 256MB and bootbus */
138210284Sjmallett    else if ((address >= 0x10000000) && (address < 0x20000000))
139210284Sjmallett        return address + 0x400000000ull;   /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
140210284Sjmallett    else
141210284Sjmallett        return address; /* Looks to be a 1:1 mapped userspace pointer */
142210311Sjmallett#elif defined(__FreeBSD__) && defined(_KERNEL)
143210311Sjmallett    return (pmap_kextract((vm_offset_t)ptr));
144210284Sjmallett#else
145210284Sjmallett#if CVMX_USE_1_TO_1_TLB_MAPPINGS
146210284Sjmallett    /* We are assumung we're running the Simple Executive standalone. In this
147210284Sjmallett        mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
148210284Sjmallett        addresses are never used. Since we know all this, save the masking
149210284Sjmallett        cycles and do nothing */
150210284Sjmallett    return CAST64(ptr);
151210284Sjmallett#else
152210284Sjmallett
153210284Sjmallett    if (sizeof(void*) == 8)
154210284Sjmallett    {
155210284Sjmallett        /* We're running in 64 bit mode. Normally this means that we can use
156210284Sjmallett            40 bits of address space (the hardware limit). Unfortunately there
157210284Sjmallett            is one case were we need to limit this to 30 bits, sign extended
158210284Sjmallett            32 bit. Although these are 64 bits wide, only 30 bits can be used */
159210284Sjmallett        if ((CAST64(ptr) >> 62) == 3)
160210284Sjmallett            return CAST64(ptr) & cvmx_build_mask(30);
161210284Sjmallett        else
162210284Sjmallett            return CAST64(ptr) & cvmx_build_mask(40);
163210284Sjmallett    }
164210284Sjmallett    else
165210284Sjmallett	return (long)(ptr) & 0x7fffffff;
166210284Sjmallett
167210284Sjmallett#endif
168210284Sjmallett#endif
169210284Sjmallett}
170210284Sjmallett
171210284Sjmallett
172210284Sjmallett/**
173210284Sjmallett * Convert a hardware physical address (uint64_t) into a
174210284Sjmallett * memory pointer (void *).
175210284Sjmallett *
176210284Sjmallett * @param physical_address
177210284Sjmallett *               Hardware physical address to memory
178210284Sjmallett * @return Pointer to memory
179210284Sjmallett */
180210284Sjmallettstatic inline void *cvmx_phys_to_ptr(uint64_t physical_address)
181210284Sjmallett{
182210284Sjmallett    if (CVMX_ENABLE_PARAMETER_CHECKING)
183210284Sjmallett        cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
184210284Sjmallett
185210284Sjmallett#ifdef CVMX_BUILD_FOR_UBOOT
186210284Sjmallett
187215990Sjmallett    /* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
188215990Sjmallett    ** which can have a physical address above the 32 bit address space.  1-1 mappings are used
189215990Sjmallett    ** to allow the low 2 GBytes to be accessed as in error level.
190215990Sjmallett    **
191215990Sjmallett    ** NOTE:  This conversion can cause problems in u-boot, as users may want to enter addresses
192215990Sjmallett    ** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
193215990Sjmallett    ** but is likely intended to be a boot bus address. */
194215990Sjmallett
195215990Sjmallett    if (physical_address < 0x80000000)
196215990Sjmallett    {
197215990Sjmallett        /* Handle useg here.  ERL is set, so useg is unmapped.  This is the only physical
198215990Sjmallett        ** address range that is directly addressable by u-boot. */
199215990Sjmallett        return CASTPTR(void, physical_address);
200215990Sjmallett    }
201215990Sjmallett    else
202215990Sjmallett    {
203215990Sjmallett	DECLARE_GLOBAL_DATA_PTR;
204215990Sjmallett        extern char uboot_start;
205215990Sjmallett        /* Above 0x80000000 we can only support one case - a physical address
206215990Sjmallett        ** that is mapped for u-boot code/data.  We check against the u-boot mem range,
207215990Sjmallett        ** and return NULL if it is out of this range.
208215990Sjmallett        */
209215990Sjmallett        if (physical_address >= gd->bd->bi_uboot_ram_addr
210215990Sjmallett            && physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
211215990Sjmallett        {
212215990Sjmallett            return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
213215990Sjmallett        }
214215990Sjmallett        else
215215990Sjmallett            return(NULL);
216215990Sjmallett    }
217215990Sjmallett
218215990Sjmallett    if (physical_address >= 0x80000000)
219215990Sjmallett        return NULL;
220215990Sjmallett    else
221215990Sjmallett#endif
222215990Sjmallett
223210284Sjmallett#ifdef __linux__
224210284Sjmallett    if (sizeof(void*) == 8)
225210284Sjmallett    {
226210284Sjmallett        /* Just set the top bit, avoiding any TLB uglyness */
227210284Sjmallett        return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
228210284Sjmallett    }
229210284Sjmallett    else
230210284Sjmallett    {
231210284Sjmallett#ifdef __KERNEL__
232210284Sjmallett	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
233210284Sjmallett#else
234210284Sjmallett        extern uint64_t linux_mem32_offset;
235210284Sjmallett        if (cvmx_likely(physical_address))
236210284Sjmallett            return CASTPTR(void, physical_address + linux_mem32_offset);
237210284Sjmallett        else
238210284Sjmallett            return NULL;
239210284Sjmallett#endif
240210284Sjmallett    }
241210284Sjmallett#elif defined(_WRS_KERNEL)
242210284Sjmallett	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
243210284Sjmallett#elif defined(VXWORKS_USER_MAPPINGS)
244210284Sjmallett    /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
245210284Sjmallett        2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
246210284Sjmallett    if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
247210284Sjmallett        return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
248232812Sjmallett    else if ((OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
249232812Sjmallett              && (physical_address >= 0x410000000ull)
250232812Sjmallett              && (physical_address < 0x420000000ull))
251210284Sjmallett        return CASTPTR(void, physical_address - 0x400000000ull);
252210284Sjmallett    else
253210284Sjmallett        return CASTPTR(void, physical_address);
254210311Sjmallett#elif defined(__FreeBSD__) && defined(_KERNEL)
255210311Sjmallett#if defined(__mips_n64)
256210311Sjmallett    return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
257210284Sjmallett#else
258210311Sjmallett    if (physical_address < 0x20000000)
259210311Sjmallett	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
260210311Sjmallett    else
261210311Sjmallett	panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
262210311Sjmallett#endif
263210311Sjmallett#else
264210284Sjmallett
265210284Sjmallett#if CVMX_USE_1_TO_1_TLB_MAPPINGS
266210284Sjmallett        /* We are assumung we're running the Simple Executive standalone. In this
267210284Sjmallett            mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
268210284Sjmallett            addresses are never used. Since we know all this, save bit insert
269210284Sjmallett            cycles and do nothing */
270210284Sjmallett    return CASTPTR(void, physical_address);
271210284Sjmallett#else
272210284Sjmallett    /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
273210284Sjmallett    if (sizeof(void*) == 8)
274210284Sjmallett        return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
275210284Sjmallett    else
276210284Sjmallett	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
277210284Sjmallett
278210284Sjmallett#endif
279210284Sjmallett
280210284Sjmallett#endif
281210284Sjmallett}
282210284Sjmallett
283210284Sjmallett
284210284Sjmallett/* The following #if controls the definition of the macro
285210284Sjmallett    CVMX_BUILD_WRITE64. This macro is used to build a store operation to
286210284Sjmallett    a full 64bit address. With a 64bit ABI, this can be done with a simple
287210284Sjmallett    pointer access. 32bit ABIs require more complicated assembly */
288210284Sjmallett#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
289210284Sjmallett
290210284Sjmallett/* We have a full 64bit ABI. Writing to a 64bit address can be done with
291210284Sjmallett    a simple volatile pointer */
292210284Sjmallett#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
293210284Sjmallettstatic inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
294210284Sjmallett{                                                                       \
295210284Sjmallett    *CASTPTR(volatile TYPE##_t, addr) = val;                            \
296210284Sjmallett}
297210284Sjmallett
298210284Sjmallett#elif defined(CVMX_ABI_N32)
299210284Sjmallett
300210284Sjmallett/* The N32 ABI passes all 64bit quantities in a single register, so it is
301210284Sjmallett    possible to use the arguments directly. We have to use inline assembly
302210284Sjmallett    for the actual store since a pointer would truncate the address */
303210284Sjmallett#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
304210284Sjmallettstatic inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
305210284Sjmallett{                                                                       \
306210284Sjmallett    asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
307210284Sjmallett}
308210284Sjmallett
309210284Sjmallett#elif defined(CVMX_ABI_O32)
310210284Sjmallett
311210284Sjmallett#ifdef __KERNEL__
312210284Sjmallett#define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
313210284Sjmallett#else
314210284Sjmallett
315210284Sjmallett/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
316210284Sjmallett    separate registers. Assembly must be used to put them back together
317210284Sjmallett    before they're used. What should be a simple store becomes a
318210284Sjmallett    convoluted mess of shifts and ors */
319210284Sjmallett#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
320210284Sjmallettstatic inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
321210284Sjmallett{                                                                       \
322210284Sjmallett    if (sizeof(TYPE##_t) == 8)                                          \
323210284Sjmallett    {                                                                   \
324210284Sjmallett        uint32_t csr_addrh = csr_addr>>32;                              \
325210284Sjmallett        uint32_t csr_addrl = csr_addr;                                  \
326210284Sjmallett        uint32_t valh = (uint64_t)val>>32;                              \
327210284Sjmallett        uint32_t vall = val;                                            \
328210284Sjmallett        uint32_t tmp1;                                                  \
329210284Sjmallett        uint32_t tmp2;                                                  \
330210284Sjmallett        uint32_t tmp3;                                                  \
331210284Sjmallett                                                                        \
332210284Sjmallett        asm volatile (                                                  \
333210284Sjmallett            ".set push\n"                                             \
334210284Sjmallett            ".set mips64\n"                                             \
335210284Sjmallett            "dsll   %[tmp1], %[valh], 32\n"                             \
336210284Sjmallett            "dsll   %[tmp2], %[csrh], 32\n"                             \
337210284Sjmallett            "dsll   %[tmp3], %[vall], 32\n"                             \
338210284Sjmallett            "dsrl   %[tmp3], %[tmp3], 32\n"                             \
339210284Sjmallett            "or     %[tmp1], %[tmp1], %[tmp3]\n"                        \
340210284Sjmallett            "dsll   %[tmp3], %[csrl], 32\n"                             \
341210284Sjmallett            "dsrl   %[tmp3], %[tmp3], 32\n"                             \
342210284Sjmallett            "or     %[tmp2], %[tmp2], %[tmp3]\n"                        \
343210284Sjmallett            ST "    %[tmp1], 0(%[tmp2])\n"                              \
344210284Sjmallett            ".set pop\n"                                             \
345210284Sjmallett            : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
346210284Sjmallett            : [valh] "r" (valh), [vall] "r" (vall),                     \
347210284Sjmallett              [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
348210284Sjmallett        );                                                              \
349210284Sjmallett    }                                                                   \
350210284Sjmallett    else                                                                \
351210284Sjmallett    {                                                                   \
352210284Sjmallett        uint32_t csr_addrh = csr_addr>>32;                              \
353210284Sjmallett        uint32_t csr_addrl = csr_addr;                                  \
354210284Sjmallett        uint32_t tmp1;                                                  \
355210284Sjmallett        uint32_t tmp2;                                                  \
356210284Sjmallett                                                                        \
357210284Sjmallett        asm volatile (                                                  \
358210284Sjmallett            ".set push\n"                                             \
359210284Sjmallett            ".set mips64\n"                                             \
360210284Sjmallett            "dsll   %[tmp1], %[csrh], 32\n"                             \
361210284Sjmallett            "dsll   %[tmp2], %[csrl], 32\n"                             \
362210284Sjmallett            "dsrl   %[tmp2], %[tmp2], 32\n"                             \
363210284Sjmallett            "or     %[tmp1], %[tmp1], %[tmp2]\n"                        \
364210284Sjmallett            ST "    %[val], 0(%[tmp1])\n"                               \
365210284Sjmallett            ".set pop\n"                                             \
366210284Sjmallett            : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2)                  \
367210284Sjmallett            : [val] "r" (val), [csrh] "r" (csr_addrh),                  \
368210284Sjmallett              [csrl] "r" (csr_addrl)                                    \
369210284Sjmallett        );                                                              \
370210284Sjmallett    }                                                                   \
371210284Sjmallett}
372210284Sjmallett
373210284Sjmallett#endif
374210284Sjmallett
375210284Sjmallett#else
376210284Sjmallett
377210284Sjmallett/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
378210284Sjmallett#error: Unsupported ABI
379210284Sjmallett
380210284Sjmallett#endif
381210284Sjmallett
382210284Sjmallett/* The following #if controls the definition of the macro
383210284Sjmallett    CVMX_BUILD_READ64. This macro is used to build a load operation from
384210284Sjmallett    a full 64bit address. With a 64bit ABI, this can be done with a simple
385210284Sjmallett    pointer access. 32bit ABIs require more complicated assembly */
386210284Sjmallett#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
387210284Sjmallett
388210284Sjmallett/* We have a full 64bit ABI. Writing to a 64bit address can be done with
389210284Sjmallett    a simple volatile pointer */
390210284Sjmallett#define CVMX_BUILD_READ64(TYPE, LT)                                     \
391210284Sjmallettstatic inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
392210284Sjmallett{                                                                       \
393210284Sjmallett    return *CASTPTR(volatile TYPE##_t, addr);                           \
394210284Sjmallett}
395210284Sjmallett
396210284Sjmallett#elif defined(CVMX_ABI_N32)
397210284Sjmallett
398210284Sjmallett/* The N32 ABI passes all 64bit quantities in a single register, so it is
399210284Sjmallett    possible to use the arguments directly. We have to use inline assembly
400210284Sjmallett    for the actual store since a pointer would truncate the address */
401210284Sjmallett#define CVMX_BUILD_READ64(TYPE, LT)                                     \
402210284Sjmallettstatic inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
403210284Sjmallett{                                                                       \
404210284Sjmallett    TYPE##_t val;                                                       \
405210284Sjmallett    asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
406210284Sjmallett    return val;                                                         \
407210284Sjmallett}
408210284Sjmallett
409210284Sjmallett#elif defined(CVMX_ABI_O32)
410210284Sjmallett
411210284Sjmallett#ifdef __KERNEL__
412210284Sjmallett#define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
413210284Sjmallett#else
414210284Sjmallett
415210284Sjmallett/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
416210284Sjmallett    separate registers. Assembly must be used to put them back together
417210284Sjmallett    before they're used. What should be a simple load becomes a
418210284Sjmallett    convoluted mess of shifts and ors */
419210284Sjmallett#define CVMX_BUILD_READ64(TYPE, LT)                                     \
420210284Sjmallettstatic inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr)            \
421210284Sjmallett{                                                                       \
422210284Sjmallett    if (sizeof(TYPE##_t) == 8)                                          \
423210284Sjmallett    {                                                                   \
424210284Sjmallett        uint32_t csr_addrh = csr_addr>>32;                              \
425210284Sjmallett        uint32_t csr_addrl = csr_addr;                                  \
426210284Sjmallett        uint32_t valh;                                                  \
427210284Sjmallett        uint32_t vall;                                                  \
428210284Sjmallett                                                                        \
429210284Sjmallett        asm volatile (                                                  \
430210284Sjmallett            ".set push\n"                                               \
431210284Sjmallett            ".set mips64\n"                                             \
432210284Sjmallett            "dsll   %[valh], %[csrh], 32\n"                             \
433210284Sjmallett            "dsll   %[vall], %[csrl], 32\n"                             \
434210284Sjmallett            "dsrl   %[vall], %[vall], 32\n"                             \
435210284Sjmallett            "or     %[valh], %[valh], %[vall]\n"                        \
436210284Sjmallett            LT "    %[vall], 0(%[valh])\n"                              \
437210284Sjmallett            "dsrl   %[valh], %[vall], 32\n"                             \
438210284Sjmallett            "sll    %[vall], 0\n"                                       \
439210284Sjmallett            "sll    %[valh], 0\n"                                       \
440210284Sjmallett            ".set pop\n"                                                \
441210284Sjmallett            : [valh] "=&r" (valh), [vall] "=&r" (vall)                  \
442210284Sjmallett            : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
443210284Sjmallett        );                                                              \
444210284Sjmallett        return ((uint64_t)valh<<32) | vall;                             \
445210284Sjmallett    }                                                                   \
446210284Sjmallett    else                                                                \
447210284Sjmallett    {                                                                   \
448210284Sjmallett        uint32_t csr_addrh = csr_addr>>32;                              \
449210284Sjmallett        uint32_t csr_addrl = csr_addr;                                  \
450210284Sjmallett        TYPE##_t val;                                                   \
451210284Sjmallett        uint32_t tmp;                                                   \
452210284Sjmallett                                                                        \
453210284Sjmallett        asm volatile (                                                  \
454210284Sjmallett            ".set push\n"                                             \
455210284Sjmallett            ".set mips64\n"                                             \
456210284Sjmallett            "dsll   %[val], %[csrh], 32\n"                              \
457210284Sjmallett            "dsll   %[tmp], %[csrl], 32\n"                              \
458210284Sjmallett            "dsrl   %[tmp], %[tmp], 32\n"                               \
459210284Sjmallett            "or     %[val], %[val], %[tmp]\n"                           \
460210284Sjmallett            LT "    %[val], 0(%[val])\n"                                \
461210284Sjmallett            ".set pop\n"                                             \
462210284Sjmallett            : [val] "=&r" (val), [tmp] "=&r" (tmp)                      \
463210284Sjmallett            : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
464210284Sjmallett        );                                                              \
465210284Sjmallett        return val;                                                     \
466210284Sjmallett    }                                                                   \
467210284Sjmallett}
468210284Sjmallett
469210284Sjmallett#endif /* __KERNEL__ */
470210284Sjmallett
471210284Sjmallett#else
472210284Sjmallett
473210284Sjmallett/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
474210284Sjmallett#error: Unsupported ABI
475210284Sjmallett
476210284Sjmallett#endif
477210284Sjmallett
478210284Sjmallett/* The following defines 8 functions for writing to a 64bit address. Each
479210284Sjmallett    takes two arguments, the address and the value to write.
480210284Sjmallett    cvmx_write64_int64      cvmx_write64_uint64
481210284Sjmallett    cvmx_write64_int32      cvmx_write64_uint32
482210284Sjmallett    cvmx_write64_int16      cvmx_write64_uint16
483210284Sjmallett    cvmx_write64_int8       cvmx_write64_uint8 */
484210284SjmallettCVMX_BUILD_WRITE64(int64, "sd");
485210284SjmallettCVMX_BUILD_WRITE64(int32, "sw");
486210284SjmallettCVMX_BUILD_WRITE64(int16, "sh");
487210284SjmallettCVMX_BUILD_WRITE64(int8, "sb");
488210284SjmallettCVMX_BUILD_WRITE64(uint64, "sd");
489210284SjmallettCVMX_BUILD_WRITE64(uint32, "sw");
490210284SjmallettCVMX_BUILD_WRITE64(uint16, "sh");
491210284SjmallettCVMX_BUILD_WRITE64(uint8, "sb");
492210284Sjmallett
493210284Sjmallett/* The following defines 8 functions for reading from a 64bit address. Each
494210284Sjmallett    takes the address as the only argument
495210284Sjmallett    cvmx_read64_int64       cvmx_read64_uint64
496210284Sjmallett    cvmx_read64_int32       cvmx_read64_uint32
497210284Sjmallett    cvmx_read64_int16       cvmx_read64_uint16
498210284Sjmallett    cvmx_read64_int8        cvmx_read64_uint8 */
499210284SjmallettCVMX_BUILD_READ64(int64, "ld");
500210284SjmallettCVMX_BUILD_READ64(int32, "lw");
501210284SjmallettCVMX_BUILD_READ64(int16, "lh");
502210284SjmallettCVMX_BUILD_READ64(int8, "lb");
503210284SjmallettCVMX_BUILD_READ64(uint64, "ld");
504210284SjmallettCVMX_BUILD_READ64(uint32, "lw");
505210284SjmallettCVMX_BUILD_READ64(uint16, "lhu");
506210284SjmallettCVMX_BUILD_READ64(uint8, "lbu");
507210284Sjmallett
508210284Sjmallettstatic inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
509210284Sjmallett{
510210284Sjmallett    cvmx_write64_uint64(csr_addr, val);
511210284Sjmallett
512210284Sjmallett    /* Perform an immediate read after every write to an RSL register to force
513210284Sjmallett        the write to complete. It doesn't matter what RSL read we do, so we
514210284Sjmallett        choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
515215990Sjmallett    if (((csr_addr >> 40) & 0x7ffff) == (0x118))
516210284Sjmallett        cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
517210284Sjmallett}
518210284Sjmallett
519210284Sjmallettstatic inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
520210284Sjmallett{
521210284Sjmallett    cvmx_write64_uint64(io_addr, val);
522210284Sjmallett}
523210284Sjmallett
524210284Sjmallettstatic inline uint64_t cvmx_read_csr(uint64_t csr_addr)
525210284Sjmallett{
526210284Sjmallett    return cvmx_read64_uint64(csr_addr);
527210284Sjmallett}
528210284Sjmallett
529210284Sjmallettstatic inline void cvmx_send_single(uint64_t data)
530210284Sjmallett{
531210284Sjmallett    const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
532210284Sjmallett    cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
533210284Sjmallett}
534210284Sjmallett
535210284Sjmallettstatic inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
536210284Sjmallett{
537210284Sjmallett    union
538210284Sjmallett    {
539210284Sjmallett        uint64_t    u64;
540210284Sjmallett        struct {
541210284Sjmallett            uint64_t    scraddr : 8;
542210284Sjmallett            uint64_t    len     : 8;
543210284Sjmallett            uint64_t    addr    :48;
544210284Sjmallett        } s;
545210284Sjmallett    } addr;
546210284Sjmallett    addr.u64 = csr_addr;
547210284Sjmallett    addr.s.scraddr = scraddr >> 3;
548210284Sjmallett    addr.s.len = 1;
549210284Sjmallett    cvmx_send_single(addr.u64);
550210284Sjmallett}
551210284Sjmallett
552210284Sjmallett
553210284Sjmallett/**
554215990Sjmallett * Number of the Core on which the program is currently running.
555210284Sjmallett *
556210284Sjmallett * @return Number of cores
557210284Sjmallett */
558210284Sjmallettstatic inline unsigned int cvmx_get_core_num(void)
559210284Sjmallett{
560210284Sjmallett    unsigned int core_num;
561210284Sjmallett    CVMX_RDHWRNV(core_num, 0);
562210284Sjmallett    return core_num;
563210284Sjmallett}
564210284Sjmallett
565210284Sjmallett
566210284Sjmallett/**
567210284Sjmallett * Returns the number of bits set in the provided value.
568210284Sjmallett * Simple wrapper for POP instruction.
569210284Sjmallett *
570210284Sjmallett * @param val    32 bit value to count set bits in
571210284Sjmallett *
572210284Sjmallett * @return Number of bits set
573210284Sjmallett */
574210284Sjmallettstatic inline uint32_t cvmx_pop(uint32_t val)
575210284Sjmallett{
576210284Sjmallett    uint32_t pop;
577210284Sjmallett    CVMX_POP(pop, val);
578210284Sjmallett    return pop;
579210284Sjmallett}
580210284Sjmallett
581210284Sjmallett
582210284Sjmallett/**
583210284Sjmallett * Returns the number of bits set in the provided value.
584210284Sjmallett * Simple wrapper for DPOP instruction.
585210284Sjmallett *
586210284Sjmallett * @param val    64 bit value to count set bits in
587210284Sjmallett *
588210284Sjmallett * @return Number of bits set
589210284Sjmallett */
590210284Sjmallettstatic inline int cvmx_dpop(uint64_t val)
591210284Sjmallett{
592210284Sjmallett    int pop;
593210284Sjmallett    CVMX_DPOP(pop, val);
594210284Sjmallett    return pop;
595210284Sjmallett}
596210284Sjmallett
597210284Sjmallett
598210284Sjmallett/**
599215990Sjmallett * @deprecated
600215990Sjmallett * Provide current cycle counter as a return value. Deprecated, use
601215990Sjmallett * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
602210284Sjmallett *
603210284Sjmallett * @return current cycle counter
604210284Sjmallett */
605210284Sjmallettstatic inline uint64_t cvmx_get_cycle(void)
606210284Sjmallett{
607215990Sjmallett    return cvmx_clock_get_count(CVMX_CLOCK_CORE);
608210284Sjmallett}
609210284Sjmallett
610210284Sjmallett
611210284Sjmallett/**
612215990Sjmallett * @deprecated
613215990Sjmallett * Reads a chip global cycle counter.  This counts SCLK cycles since
614215990Sjmallett * chip reset.  The counter is 64 bit. This function is deprecated as the rate
615215990Sjmallett * of the global cycle counter is different between Octeon+ and Octeon2, use
616215990Sjmallett * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
617215990Sjmallett * of SCLK may be differnet than the core clock.
618210284Sjmallett *
619210284Sjmallett * @return Global chip cycle count since chip reset.
620210284Sjmallett */
621210284Sjmallettstatic inline uint64_t cvmx_get_cycle_global(void)
622210284Sjmallett{
623215990Sjmallett    return cvmx_clock_get_count(CVMX_CLOCK_IPD);
624210284Sjmallett}
625210284Sjmallett
626210284Sjmallett
627210284Sjmallett/**
628215990Sjmallett * Wait for the specified number of core clock cycles
629210284Sjmallett *
630210284Sjmallett * @param cycles
631210284Sjmallett */
632210284Sjmallettstatic inline void cvmx_wait(uint64_t cycles)
633210284Sjmallett{
634210284Sjmallett    uint64_t done = cvmx_get_cycle() + cycles;
635210284Sjmallett
636210284Sjmallett    while (cvmx_get_cycle() < done)
637210284Sjmallett    {
638210284Sjmallett        /* Spin */
639210284Sjmallett    }
640210284Sjmallett}
641210284Sjmallett
642210284Sjmallett
643210284Sjmallett/**
644210284Sjmallett * Wait for the specified number of micro seconds
645210284Sjmallett *
646210284Sjmallett * @param usec   micro seconds to wait
647210284Sjmallett */
648210284Sjmallettstatic inline void cvmx_wait_usec(uint64_t usec)
649210284Sjmallett{
650215990Sjmallett    uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
651210284Sjmallett    while (cvmx_get_cycle() < done)
652210284Sjmallett    {
653210284Sjmallett        /* Spin */
654210284Sjmallett    }
655210284Sjmallett}
656210284Sjmallett
657210284Sjmallett
658210284Sjmallett/**
659215990Sjmallett * Wait for the specified number of io clock cycles
660215990Sjmallett *
661215990Sjmallett * @param cycles
662215990Sjmallett */
663215990Sjmallettstatic inline void cvmx_wait_io(uint64_t cycles)
664215990Sjmallett{
665215990Sjmallett    uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
666215990Sjmallett
667215990Sjmallett    while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
668215990Sjmallett    {
669215990Sjmallett        /* Spin */
670215990Sjmallett    }
671215990Sjmallett}
672215990Sjmallett
673215990Sjmallett
674215990Sjmallett/**
675210284Sjmallett * Perform a soft reset of Octeon
676210284Sjmallett *
677210284Sjmallett * @return
678210284Sjmallett */
679210284Sjmallettstatic inline void cvmx_reset_octeon(void)
680210284Sjmallett{
681210284Sjmallett    cvmx_ciu_soft_rst_t ciu_soft_rst;
682210284Sjmallett    ciu_soft_rst.u64 = 0;
683210284Sjmallett    ciu_soft_rst.s.soft_rst = 1;
684210284Sjmallett    cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
685210284Sjmallett}
686210284Sjmallett
687210284Sjmallett
688210284Sjmallett/**
689210284Sjmallett * Read a byte of fuse data
690210284Sjmallett * @param byte_addr   address to read
691210284Sjmallett *
692210284Sjmallett * @return fuse value: 0 or 1
693210284Sjmallett */
694210284Sjmallettstatic inline uint8_t cvmx_fuse_read_byte(int byte_addr)
695210284Sjmallett{
696210284Sjmallett    cvmx_mio_fus_rcmd_t read_cmd;
697210284Sjmallett
698210284Sjmallett    read_cmd.u64 = 0;
699210284Sjmallett    read_cmd.s.addr = byte_addr;
700210284Sjmallett    read_cmd.s.pend = 1;
701210284Sjmallett    cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
702210284Sjmallett    while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
703210284Sjmallett        ;
704210284Sjmallett    return(read_cmd.s.dat);
705210284Sjmallett}
706210284Sjmallett
707210284Sjmallett
708210284Sjmallett/**
709210284Sjmallett * Read a single fuse bit
710210284Sjmallett *
711210284Sjmallett * @param fuse   Fuse number (0-1024)
712210284Sjmallett *
713210284Sjmallett * @return fuse value: 0 or 1
714210284Sjmallett */
715210284Sjmallettstatic inline int cvmx_fuse_read(int fuse)
716210284Sjmallett{
717210284Sjmallett    return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
718210284Sjmallett}
719210284Sjmallett
720210284Sjmallett#ifdef	__cplusplus
721210284Sjmallett}
722210284Sjmallett#endif
723210284Sjmallett
724210284Sjmallett#endif /* __CVMX_ACCESS_NATIVE_H__ */
725210284Sjmallett
726