cvmx-fpa.h revision 210284
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Interface to the hardware Free Pool Allocator.
48 *
49 * <hr>$Revision: 41586 $<hr>
50 *
51 */
52
53#ifndef __CVMX_FPA_H__
54#define __CVMX_FPA_H__
55
56#ifdef	__cplusplus
57extern "C" {
58#endif
59
60#define CVMX_FPA_NUM_POOLS      8
61#define CVMX_FPA_MIN_BLOCK_SIZE 128
62#define CVMX_FPA_ALIGNMENT      128
63
64/**
65 * Structure describing the data format used for stores to the FPA.
66 */
67typedef union
68{
69    uint64_t        u64;
70    struct {
71        uint64_t    scraddr : 8;    /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
72        uint64_t    len     : 8;    /**< the number of words in the response (0 => no response) */
73        uint64_t    did     : 8;    /**< the ID of the device on the non-coherent bus */
74        uint64_t    addr    :40;    /**< the address that will appear in the first tick on the NCB bus */
75    } s;
76} cvmx_fpa_iobdma_data_t;
77
78/**
79 * Structure describing the current state of a FPA pool.
80 */
81typedef struct
82{
83    const char *name;                   /**< Name it was created under */
84    uint64_t    size;                   /**< Size of each block */
85    void *      base;                   /**< The base memory address of whole block */
86    uint64_t    starting_element_count; /**< The number of elements in the pool at creation */
87} cvmx_fpa_pool_info_t;
88
89/**
90 * Current state of all the pools. Use access functions
91 * instead of using it directly.
92 */
93extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
94
95/* CSR typedefs have been moved to cvmx-csr-*.h */
96
97/**
98 * Return the name of the pool
99 *
100 * @param pool   Pool to get the name of
101 * @return The name
102 */
103static inline const char *cvmx_fpa_get_name(uint64_t pool)
104{
105    return cvmx_fpa_pool_info[pool].name;
106}
107
108/**
109 * Return the base of the pool
110 *
111 * @param pool   Pool to get the base of
112 * @return The base
113 */
114static inline void *cvmx_fpa_get_base(uint64_t pool)
115{
116    return cvmx_fpa_pool_info[pool].base;
117}
118
119/**
120 * Check if a pointer belongs to an FPA pool. Return non-zero
121 * if the supplied pointer is inside the memory controlled by
122 * an FPA pool.
123 *
124 * @param pool   Pool to check
125 * @param ptr    Pointer to check
126 * @return Non-zero if pointer is in the pool. Zero if not
127 */
128static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
129{
130    return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
131            ((char*)ptr < ((char*)(cvmx_fpa_pool_info[pool].base)) + cvmx_fpa_pool_info[pool].size * cvmx_fpa_pool_info[pool].starting_element_count));
132}
133
134
135
136/**
137 * Enable the FPA for use. Must be performed after any CSR
138 * configuration but before any other FPA functions.
139 */
140static inline void cvmx_fpa_enable(void)
141{
142    cvmx_fpa_ctl_status_t status;
143
144    status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
145    if (status.s.enb)
146    {
147        cvmx_dprintf("Warning: Enabling FPA when FPA already enabled.\n");
148    }
149
150    /* Do runtime check as we allow pass1 compiled code to run on pass2 chips */
151    if (cvmx_octeon_is_pass1())
152    {
153        cvmx_fpa_fpf_marks_t marks;
154        int i;
155        for (i=1; i<8; i++)
156        {
157            marks.u64 = cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i-1)*8ull);
158            marks.s.fpf_wr = 0xe0;
159            cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i-1)*8ull, marks.u64);
160        }
161
162        /* Enforce a 10 cycle delay between config and enable */
163        cvmx_wait(10);
164    }
165
166    status.u64 = 0; /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
167    status.s.enb = 1;
168    cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
169}
170
171
172/**
173 * Get a new block from the FPA
174 *
175 * @param pool   Pool to get the block from
176 * @return Pointer to the block or NULL on failure
177 */
178static inline void *cvmx_fpa_alloc(uint64_t pool)
179{
180    uint64_t address = cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool)));
181    if (address)
182        return cvmx_phys_to_ptr(address);
183    else
184        return NULL;
185}
186
187
188/**
189 * Asynchronously get a new block from the FPA
190 *
191 * @param scr_addr Local scratch address to put response in.  This is a byte address,
192 *                  but must be 8 byte aligned.
193 * @param pool      Pool to get the block from
194 */
195static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
196{
197   cvmx_fpa_iobdma_data_t data;
198
199   /* Hardware only uses 64 bit alligned locations, so convert from byte address
200   ** to 64-bit index
201   */
202   data.s.scraddr = scr_addr >> 3;
203   data.s.len = 1;
204   data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool);
205   data.s.addr = 0;
206   cvmx_send_single(data.u64);
207}
208
209
210/**
211 * Free a block allocated with a FPA pool.
212 * Does NOT provide memory ordering in cases where the memory block was modified by the core.
213 *
214 * @param ptr    Block to free
215 * @param pool   Pool to put it in
216 * @param num_cache_lines
217 *               Cache lines to invalidate
218 */
219static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, uint64_t num_cache_lines)
220{
221    cvmx_addr_t newptr;
222    newptr.u64 = cvmx_ptr_to_phys(ptr);
223    newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
224    asm volatile ("" : : : "memory");  /* Prevent GCC from reordering around free */
225    /* value written is number of cache lines not written back */
226    cvmx_write_io(newptr.u64, num_cache_lines);
227}
228
229/**
230 * Free a block allocated with a FPA pool.  Provides required memory
231 * ordering in cases where memory block was modified by core.
232 *
233 * @param ptr    Block to free
234 * @param pool   Pool to put it in
235 * @param num_cache_lines
236 *               Cache lines to invalidate
237 */
238static inline void cvmx_fpa_free(void *ptr, uint64_t pool, uint64_t num_cache_lines)
239{
240    cvmx_addr_t newptr;
241    newptr.u64 = cvmx_ptr_to_phys(ptr);
242    newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
243    /* Make sure that any previous writes to memory go out before we free this buffer.
244    ** This also serves as a barrier to prevent GCC from reordering operations to after
245    ** the free. */
246    CVMX_SYNCWS;
247    /* value written is number of cache lines not written back */
248    cvmx_write_io(newptr.u64, num_cache_lines);
249}
250
251
252/**
253 * Setup a FPA pool to control a new block of memory.
254 * This can only be called once per pool. Make sure proper
255 * locking enforces this.
256 *
257 * @param pool       Pool to initialize
258 *                   0 <= pool < 8
259 * @param name       Constant character string to name this pool.
260 *                   String is not copied.
261 * @param buffer     Pointer to the block of memory to use. This must be
262 *                   accessable by all processors and external hardware.
263 * @param block_size Size for each block controlled by the FPA
264 * @param num_blocks Number of blocks
265 *
266 * @return 0 on Success,
267 *         -1 on failure
268 */
269extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
270                                uint64_t block_size, uint64_t num_blocks);
271
272
273/**
274 * Shutdown a Memory pool and validate that it had all of
275 * the buffers originally placed in it. This should only be
276 * called by one processor after all hardware has finished
277 * using the pool.
278 *
279 * @param pool   Pool to shutdown
280 * @return Zero on success
281 *         - Positive is count of missing buffers
282 *         - Negative is too many buffers or corrupted pointers
283 */
284extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
285
286
287/**
288 * Get the size of blocks controlled by the pool
289 * This is resolved to a constant at compile time.
290 *
291 * @param pool   Pool to access
292 * @return Size of the block in bytes
293 */
294uint64_t cvmx_fpa_get_block_size(uint64_t pool);
295
296#ifdef	__cplusplus
297}
298#endif
299
300#endif //  __CVM_FPA_H__
301