1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * @file
43 * Functions for accessing memory and CSRs on Octeon when we are compiling
44 * natively.
45 *
46 * <hr>$Revision: 38306 $<hr>
47*/
48#ifndef __CVMX_ACCESS_NATIVE_H__
49#define __CVMX_ACCESS_NATIVE_H__
50
51#ifdef	__cplusplus
52extern "C" {
53#endif
54
55/**
56 * Returns the Octeon processor ID.
57 *
58 * @return Octeon processor ID from COP0
59 */
60static inline uint32_t cvmx_get_proc_id(void)
61{
62#ifdef CVMX_BUILD_FOR_LINUX_USER
63    extern uint32_t cvmx_app_init_processor_id;
64    return cvmx_app_init_processor_id;
65#else
66    uint32_t id;
67    asm ("mfc0 %0, $15,0" : "=r" (id));
68    return id;
69#endif
70}
71
72/**
73 * Convert a memory pointer (void*) into a hardware compatable
74 * memory address (uint64_t). Octeon hardware widgets don't
75 * understand logical addresses.
76 *
77 * @param ptr    C style memory pointer
78 * @return Hardware physical address
79 */
80static inline uint64_t cvmx_ptr_to_phys(void *ptr)
81{
82    if (CVMX_ENABLE_PARAMETER_CHECKING)
83        cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
84
85#ifdef CVMX_BUILD_FOR_UBOOT
86    uint64_t uboot_tlb_ptr_to_phys(void *ptr);
87
88    if (((uint32_t)ptr) < 0x80000000)
89    {
90        /* Handle useg (unmapped due to ERL) here*/
91        return(CAST64(ptr) & 0x7FFFFFFF);
92    }
93    else if (((uint32_t)ptr) < 0xC0000000)
94    {
95        /* Here we handle KSEG0/KSEG1 _pointers_.  We know we are dealing
96        ** with 32 bit only values, so we treat them that way.  Note that
97        ** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
98        ** but the physical address of the KSEG0/KSEG1 address. */
99        return(CAST64(ptr) & 0x1FFFFFFF);
100    }
101    else
102        return(uboot_tlb_ptr_to_phys(ptr));   /* Should not get get here in !TLB case */
103
104#endif
105
106#ifdef __linux__
107    if (sizeof(void*) == 8)
108    {
109        /* We're running in 64 bit mode. Normally this means that we can use
110            40 bits of address space (the hardware limit). Unfortunately there
111            is one case were we need to limit this to 30 bits, sign extended
112            32 bit. Although these are 64 bits wide, only 30 bits can be used */
113        if ((CAST64(ptr) >> 62) == 3)
114            return CAST64(ptr) & cvmx_build_mask(30);
115        else
116            return CAST64(ptr) & cvmx_build_mask(40);
117    }
118    else
119    {
120#ifdef __KERNEL__
121	return (long)(ptr) & 0x1fffffff;
122#else
123        extern uint64_t linux_mem32_offset;
124        if (cvmx_likely(ptr))
125            return CAST64(ptr) - linux_mem32_offset;
126        else
127            return 0;
128#endif
129    }
130#elif defined(_WRS_KERNEL)
131	return (long)(ptr) & 0x7fffffff;
132#elif defined(VXWORKS_USER_MAPPINGS)
133    /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
134        2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
135    uint64_t address = (long)ptr;
136    if (address & 0x80000000)
137        return address & 0x1fffffff;    /* KSEG pointers directly map the lower 256MB and bootbus */
138    else if ((address >= 0x10000000) && (address < 0x20000000))
139        return address + 0x400000000ull;   /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
140    else
141        return address; /* Looks to be a 1:1 mapped userspace pointer */
142#elif defined(__FreeBSD__) && defined(_KERNEL)
143    return (pmap_kextract((vm_offset_t)ptr));
144#else
145#if CVMX_USE_1_TO_1_TLB_MAPPINGS
146    /* We are assumung we're running the Simple Executive standalone. In this
147        mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
148        addresses are never used. Since we know all this, save the masking
149        cycles and do nothing */
150    return CAST64(ptr);
151#else
152
153    if (sizeof(void*) == 8)
154    {
155        /* We're running in 64 bit mode. Normally this means that we can use
156            40 bits of address space (the hardware limit). Unfortunately there
157            is one case were we need to limit this to 30 bits, sign extended
158            32 bit. Although these are 64 bits wide, only 30 bits can be used */
159        if ((CAST64(ptr) >> 62) == 3)
160            return CAST64(ptr) & cvmx_build_mask(30);
161        else
162            return CAST64(ptr) & cvmx_build_mask(40);
163    }
164    else
165	return (long)(ptr) & 0x7fffffff;
166
167#endif
168#endif
169}
170
171
172/**
173 * Convert a hardware physical address (uint64_t) into a
174 * memory pointer (void *).
175 *
176 * @param physical_address
177 *               Hardware physical address to memory
178 * @return Pointer to memory
179 */
180static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
181{
182    if (CVMX_ENABLE_PARAMETER_CHECKING)
183        cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
184
185#ifdef CVMX_BUILD_FOR_UBOOT
186#if !CONFIG_OCTEON_UBOOT_TLB
187    if (physical_address >= 0x80000000)
188        return NULL;
189    else
190        return CASTPTR(void, (physical_address & 0x7FFFFFFF));
191#endif
192
193    /* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
194    ** which can have a physical address above the 32 bit address space.  1-1 mappings are used
195    ** to allow the low 2 GBytes to be accessed as in error level.
196    **
197    ** NOTE:  This conversion can cause problems in u-boot, as users may want to enter addresses
198    ** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
199    ** but is likely intended to be a boot bus address. */
200
201    if (physical_address < 0x80000000)
202    {
203        /* Handle useg here.  ERL is set, so useg is unmapped.  This is the only physical
204        ** address range that is directly addressable by u-boot. */
205        return CASTPTR(void, physical_address);
206    }
207    else
208    {
209	DECLARE_GLOBAL_DATA_PTR;
210        extern char uboot_start;
211        /* Above 0x80000000 we can only support one case - a physical address
212        ** that is mapped for u-boot code/data.  We check against the u-boot mem range,
213        ** and return NULL if it is out of this range.
214        */
215        if (physical_address >= gd->bd->bi_uboot_ram_addr
216            && physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
217        {
218            return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
219        }
220        else
221            return(NULL);
222    }
223
224    if (physical_address >= 0x80000000)
225        return NULL;
226    else
227#endif
228
229#ifdef __linux__
230    if (sizeof(void*) == 8)
231    {
232        /* Just set the top bit, avoiding any TLB uglyness */
233        return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
234    }
235    else
236    {
237#ifdef __KERNEL__
238	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
239#else
240        extern uint64_t linux_mem32_offset;
241        if (cvmx_likely(physical_address))
242            return CASTPTR(void, physical_address + linux_mem32_offset);
243        else
244            return NULL;
245#endif
246    }
247#elif defined(_WRS_KERNEL)
248	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
249#elif defined(VXWORKS_USER_MAPPINGS)
250    /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
251        2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
252    if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
253        return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
254    else if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && (physical_address >= 0x410000000ull) &&
255                                                       (physical_address < 0x420000000ull))
256        return CASTPTR(void, physical_address - 0x400000000ull);
257    else
258        return CASTPTR(void, physical_address);
259#elif defined(__FreeBSD__) && defined(_KERNEL)
260#if defined(__mips_n64)
261    return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
262#else
263    if (physical_address < 0x20000000)
264	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
265    else
266	panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
267#endif
268#else
269
270#if CVMX_USE_1_TO_1_TLB_MAPPINGS
271        /* We are assumung we're running the Simple Executive standalone. In this
272            mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
273            addresses are never used. Since we know all this, save bit insert
274            cycles and do nothing */
275    return CASTPTR(void, physical_address);
276#else
277    /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
278    if (sizeof(void*) == 8)
279        return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
280    else
281	return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
282
283#endif
284
285#endif
286}
287
288
289/* The following #if controls the definition of the macro
290    CVMX_BUILD_WRITE64. This macro is used to build a store operation to
291    a full 64bit address. With a 64bit ABI, this can be done with a simple
292    pointer access. 32bit ABIs require more complicated assembly */
293#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
294
295/* We have a full 64bit ABI. Writing to a 64bit address can be done with
296    a simple volatile pointer */
297#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
298static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
299{                                                                       \
300    *CASTPTR(volatile TYPE##_t, addr) = val;                            \
301}
302
303#elif defined(CVMX_ABI_N32)
304
305/* The N32 ABI passes all 64bit quantities in a single register, so it is
306    possible to use the arguments directly. We have to use inline assembly
307    for the actual store since a pointer would truncate the address */
308#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
309static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
310{                                                                       \
311    asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
312}
313
314#elif defined(CVMX_ABI_O32)
315
316#ifdef __KERNEL__
317#define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
318#else
319
320/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
321    separate registers. Assembly must be used to put them back together
322    before they're used. What should be a simple store becomes a
323    convoluted mess of shifts and ors */
324#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
325static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
326{                                                                       \
327    if (sizeof(TYPE##_t) == 8)                                          \
328    {                                                                   \
329        uint32_t csr_addrh = csr_addr>>32;                              \
330        uint32_t csr_addrl = csr_addr;                                  \
331        uint32_t valh = (uint64_t)val>>32;                              \
332        uint32_t vall = val;                                            \
333        uint32_t tmp1;                                                  \
334        uint32_t tmp2;                                                  \
335        uint32_t tmp3;                                                  \
336                                                                        \
337        asm volatile (                                                  \
338            ".set push\n"                                             \
339            ".set mips64\n"                                             \
340            "dsll   %[tmp1], %[valh], 32\n"                             \
341            "dsll   %[tmp2], %[csrh], 32\n"                             \
342            "dsll   %[tmp3], %[vall], 32\n"                             \
343            "dsrl   %[tmp3], %[tmp3], 32\n"                             \
344            "or     %[tmp1], %[tmp1], %[tmp3]\n"                        \
345            "dsll   %[tmp3], %[csrl], 32\n"                             \
346            "dsrl   %[tmp3], %[tmp3], 32\n"                             \
347            "or     %[tmp2], %[tmp2], %[tmp3]\n"                        \
348            ST "    %[tmp1], 0(%[tmp2])\n"                              \
349            ".set pop\n"                                             \
350            : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
351            : [valh] "r" (valh), [vall] "r" (vall),                     \
352              [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
353        );                                                              \
354    }                                                                   \
355    else                                                                \
356    {                                                                   \
357        uint32_t csr_addrh = csr_addr>>32;                              \
358        uint32_t csr_addrl = csr_addr;                                  \
359        uint32_t tmp1;                                                  \
360        uint32_t tmp2;                                                  \
361                                                                        \
362        asm volatile (                                                  \
363            ".set push\n"                                             \
364            ".set mips64\n"                                             \
365            "dsll   %[tmp1], %[csrh], 32\n"                             \
366            "dsll   %[tmp2], %[csrl], 32\n"                             \
367            "dsrl   %[tmp2], %[tmp2], 32\n"                             \
368            "or     %[tmp1], %[tmp1], %[tmp2]\n"                        \
369            ST "    %[val], 0(%[tmp1])\n"                               \
370            ".set pop\n"                                             \
371            : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2)                  \
372            : [val] "r" (val), [csrh] "r" (csr_addrh),                  \
373              [csrl] "r" (csr_addrl)                                    \
374        );                                                              \
375    }                                                                   \
376}
377
378#endif
379
380#else
381
382/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
383#error: Unsupported ABI
384
385#endif
386
387/* The following #if controls the definition of the macro
388    CVMX_BUILD_READ64. This macro is used to build a load operation from
389    a full 64bit address. With a 64bit ABI, this can be done with a simple
390    pointer access. 32bit ABIs require more complicated assembly */
391#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
392
393/* We have a full 64bit ABI. Writing to a 64bit address can be done with
394    a simple volatile pointer */
395#define CVMX_BUILD_READ64(TYPE, LT)                                     \
396static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
397{                                                                       \
398    return *CASTPTR(volatile TYPE##_t, addr);                           \
399}
400
401#elif defined(CVMX_ABI_N32)
402
403/* The N32 ABI passes all 64bit quantities in a single register, so it is
404    possible to use the arguments directly. We have to use inline assembly
405    for the actual store since a pointer would truncate the address */
406#define CVMX_BUILD_READ64(TYPE, LT)                                     \
407static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
408{                                                                       \
409    TYPE##_t val;                                                       \
410    asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
411    return val;                                                         \
412}
413
414#elif defined(CVMX_ABI_O32)
415
416#ifdef __KERNEL__
417#define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
418#else
419
420/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
421    separate registers. Assembly must be used to put them back together
422    before they're used. What should be a simple load becomes a
423    convoluted mess of shifts and ors */
424#define CVMX_BUILD_READ64(TYPE, LT)                                     \
425static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr)            \
426{                                                                       \
427    if (sizeof(TYPE##_t) == 8)                                          \
428    {                                                                   \
429        uint32_t csr_addrh = csr_addr>>32;                              \
430        uint32_t csr_addrl = csr_addr;                                  \
431        uint32_t valh;                                                  \
432        uint32_t vall;                                                  \
433                                                                        \
434        asm volatile (                                                  \
435            ".set push\n"                                               \
436            ".set mips64\n"                                             \
437            "dsll   %[valh], %[csrh], 32\n"                             \
438            "dsll   %[vall], %[csrl], 32\n"                             \
439            "dsrl   %[vall], %[vall], 32\n"                             \
440            "or     %[valh], %[valh], %[vall]\n"                        \
441            LT "    %[vall], 0(%[valh])\n"                              \
442            "dsrl   %[valh], %[vall], 32\n"                             \
443            "sll    %[vall], 0\n"                                       \
444            "sll    %[valh], 0\n"                                       \
445            ".set pop\n"                                                \
446            : [valh] "=&r" (valh), [vall] "=&r" (vall)                  \
447            : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
448        );                                                              \
449        return ((uint64_t)valh<<32) | vall;                             \
450    }                                                                   \
451    else                                                                \
452    {                                                                   \
453        uint32_t csr_addrh = csr_addr>>32;                              \
454        uint32_t csr_addrl = csr_addr;                                  \
455        TYPE##_t val;                                                   \
456        uint32_t tmp;                                                   \
457                                                                        \
458        asm volatile (                                                  \
459            ".set push\n"                                             \
460            ".set mips64\n"                                             \
461            "dsll   %[val], %[csrh], 32\n"                              \
462            "dsll   %[tmp], %[csrl], 32\n"                              \
463            "dsrl   %[tmp], %[tmp], 32\n"                               \
464            "or     %[val], %[val], %[tmp]\n"                           \
465            LT "    %[val], 0(%[val])\n"                                \
466            ".set pop\n"                                             \
467            : [val] "=&r" (val), [tmp] "=&r" (tmp)                      \
468            : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
469        );                                                              \
470        return val;                                                     \
471    }                                                                   \
472}
473
474#endif /* __KERNEL__ */
475
476#else
477
478/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
479#error: Unsupported ABI
480
481#endif
482
483/* The following defines 8 functions for writing to a 64bit address. Each
484    takes two arguments, the address and the value to write.
485    cvmx_write64_int64      cvmx_write64_uint64
486    cvmx_write64_int32      cvmx_write64_uint32
487    cvmx_write64_int16      cvmx_write64_uint16
488    cvmx_write64_int8       cvmx_write64_uint8 */
489CVMX_BUILD_WRITE64(int64, "sd");
490CVMX_BUILD_WRITE64(int32, "sw");
491CVMX_BUILD_WRITE64(int16, "sh");
492CVMX_BUILD_WRITE64(int8, "sb");
493CVMX_BUILD_WRITE64(uint64, "sd");
494CVMX_BUILD_WRITE64(uint32, "sw");
495CVMX_BUILD_WRITE64(uint16, "sh");
496CVMX_BUILD_WRITE64(uint8, "sb");
497
498/* The following defines 8 functions for reading from a 64bit address. Each
499    takes the address as the only argument
500    cvmx_read64_int64       cvmx_read64_uint64
501    cvmx_read64_int32       cvmx_read64_uint32
502    cvmx_read64_int16       cvmx_read64_uint16
503    cvmx_read64_int8        cvmx_read64_uint8 */
504CVMX_BUILD_READ64(int64, "ld");
505CVMX_BUILD_READ64(int32, "lw");
506CVMX_BUILD_READ64(int16, "lh");
507CVMX_BUILD_READ64(int8, "lb");
508CVMX_BUILD_READ64(uint64, "ld");
509CVMX_BUILD_READ64(uint32, "lw");
510CVMX_BUILD_READ64(uint16, "lhu");
511CVMX_BUILD_READ64(uint8, "lbu");
512
513static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
514{
515    cvmx_write64_uint64(csr_addr, val);
516
517    /* Perform an immediate read after every write to an RSL register to force
518        the write to complete. It doesn't matter what RSL read we do, so we
519        choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
520    if (((csr_addr >> 40) & 0x7ffff) == (0x118))
521        cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
522}
523
524static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
525{
526    cvmx_write64_uint64(io_addr, val);
527}
528
529static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
530{
531    return cvmx_read64_uint64(csr_addr);
532}
533
534static inline void cvmx_send_single(uint64_t data)
535{
536    const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
537    cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
538}
539
540static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
541{
542    union
543    {
544        uint64_t    u64;
545        struct {
546            uint64_t    scraddr : 8;
547            uint64_t    len     : 8;
548            uint64_t    addr    :48;
549        } s;
550    } addr;
551    addr.u64 = csr_addr;
552    addr.s.scraddr = scraddr >> 3;
553    addr.s.len = 1;
554    cvmx_send_single(addr.u64);
555}
556
557
558/**
559 * Number of the Core on which the program is currently running.
560 *
561 * @return Number of cores
562 */
563static inline unsigned int cvmx_get_core_num(void)
564{
565    unsigned int core_num;
566    CVMX_RDHWRNV(core_num, 0);
567    return core_num;
568}
569
570
571/**
572 * Returns the number of bits set in the provided value.
573 * Simple wrapper for POP instruction.
574 *
575 * @param val    32 bit value to count set bits in
576 *
577 * @return Number of bits set
578 */
579static inline uint32_t cvmx_pop(uint32_t val)
580{
581    uint32_t pop;
582    CVMX_POP(pop, val);
583    return pop;
584}
585
586
587/**
588 * Returns the number of bits set in the provided value.
589 * Simple wrapper for DPOP instruction.
590 *
591 * @param val    64 bit value to count set bits in
592 *
593 * @return Number of bits set
594 */
595static inline int cvmx_dpop(uint64_t val)
596{
597    int pop;
598    CVMX_DPOP(pop, val);
599    return pop;
600}
601
602
603/**
604 * @deprecated
605 * Provide current cycle counter as a return value. Deprecated, use
606 * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
607 *
608 * @return current cycle counter
609 */
610static inline uint64_t cvmx_get_cycle(void)
611{
612    return cvmx_clock_get_count(CVMX_CLOCK_CORE);
613}
614
615
616/**
617 * @deprecated
618 * Reads a chip global cycle counter.  This counts SCLK cycles since
619 * chip reset.  The counter is 64 bit. This function is deprecated as the rate
620 * of the global cycle counter is different between Octeon+ and Octeon2, use
621 * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
622 * of SCLK may be differnet than the core clock.
623 *
624 * @return Global chip cycle count since chip reset.
625 */
626static inline uint64_t cvmx_get_cycle_global(void)
627{
628    return cvmx_clock_get_count(CVMX_CLOCK_IPD);
629}
630
631
632/**
633 * Wait for the specified number of core clock cycles
634 *
635 * @param cycles
636 */
637static inline void cvmx_wait(uint64_t cycles)
638{
639    uint64_t done = cvmx_get_cycle() + cycles;
640
641    while (cvmx_get_cycle() < done)
642    {
643        /* Spin */
644    }
645}
646
647
648/**
649 * Wait for the specified number of micro seconds
650 *
651 * @param usec   micro seconds to wait
652 */
653static inline void cvmx_wait_usec(uint64_t usec)
654{
655    uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
656    while (cvmx_get_cycle() < done)
657    {
658        /* Spin */
659    }
660}
661
662
663/**
664 * Wait for the specified number of io clock cycles
665 *
666 * @param cycles
667 */
668static inline void cvmx_wait_io(uint64_t cycles)
669{
670    uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
671
672    while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
673    {
674        /* Spin */
675    }
676}
677
678
679/**
680 * Perform a soft reset of Octeon
681 *
682 * @return
683 */
684static inline void cvmx_reset_octeon(void)
685{
686    cvmx_ciu_soft_rst_t ciu_soft_rst;
687    ciu_soft_rst.u64 = 0;
688    ciu_soft_rst.s.soft_rst = 1;
689    cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
690}
691
692
693/**
694 * Read a byte of fuse data
695 * @param byte_addr   address to read
696 *
697 * @return fuse value: 0 or 1
698 */
699static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
700{
701    cvmx_mio_fus_rcmd_t read_cmd;
702
703    read_cmd.u64 = 0;
704    read_cmd.s.addr = byte_addr;
705    read_cmd.s.pend = 1;
706    cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
707    while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
708        ;
709    return(read_cmd.s.dat);
710}
711
712
713/**
714 * Read a single fuse bit
715 *
716 * @param fuse   Fuse number (0-1024)
717 *
718 * @return fuse value: 0 or 1
719 */
720static inline int cvmx_fuse_read(int fuse)
721{
722    return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
723}
724
725#ifdef	__cplusplus
726}
727#endif
728
729#endif /* __CVMX_ACCESS_NATIVE_H__ */
730
731