Deleted Added
full compact
netmap_mem2.c (241643) netmap_mem2.c (241719)
1/*
1/*
2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved.
2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 241643 2012-10-17 18:21:14Z emaste $
28 * $Id: netmap_mem2.c 11445 2012-07-30 10:49:07Z luigi $
27 * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 241719 2012-10-19 04:13:12Z luigi $
28 * $Id: netmap_mem2.c 11881 2012-10-18 23:24:15Z luigi $
29 *
29 *
30 * New memory allocator for netmap
30 * (New) memory allocator for netmap
31 */
32
33/*
31 */
32
33/*
34 * The new version allocates three regions:
35 * nm_if_pool for the struct netmap_if
36 * nm_ring_pool for the struct netmap_ring
37 * nm_buf_pool for the packet buffers.
34 * This allocator creates three memory regions:
35 * nm_if_pool for the struct netmap_if
36 * nm_ring_pool for the struct netmap_ring
37 * nm_buf_pool for the packet buffers.
38 *
38 *
39 * All regions need to be page-sized as we export them to
40 * userspace through mmap. Only the latter need to be dma-able,
39 * All regions need to be multiple of a page size as we export them to
40 * userspace through mmap. Only the latter needs to be dma-able,
41 * but for convenience use the same type of allocator for all.
42 *
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 * [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 * nm_if nm_ring nm_buf
48 *
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
52 * of the region.
53 *
41 * but for convenience use the same type of allocator for all.
42 *
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 * [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 * nm_if nm_ring nm_buf
48 *
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
52 * of the region.
53 *
54 * Allocator for a pool of memory objects of the same size.
54 * The invididual allocators manage a pool of memory for objects of
55 * the same size.
55 * The pool is split into smaller clusters, whose size is a
56 * multiple of the page size. The cluster size is chosen
57 * to minimize the waste for a given max cluster size
58 * (we do it by brute force, as we have relatively few object
59 * per cluster).
60 *
56 * The pool is split into smaller clusters, whose size is a
57 * multiple of the page size. The cluster size is chosen
58 * to minimize the waste for a given max cluster size
59 * (we do it by brute force, as we have relatively few object
60 * per cluster).
61 *
61 * To be polite with the cache, objects are aligned to
62 * the cache line, or 64 bytes. Sizes are rounded to multiple of 64.
63 * For each object we have
64 * one entry in the bitmap to signal the state. Allocation scans
65 * the bitmap, but since this is done only on attach, we are not
62 * Objects are aligned to the cache line (64 bytes) rounding up object
63 * sizes when needed. A bitmap contains the state of each object.
64 * Allocation scans the bitmap; this is done only on attach, so we are not
66 * too worried about performance
65 * too worried about performance
67 */
68
69/*
70 * MEMORY SIZES:
71 *
66 *
72 * (all the parameters below will become tunables)
67 * For each allocator we can define (thorugh sysctl) the size and
68 * number of each object. Memory is allocated at the first use of a
69 * netmap file descriptor, and can be freed when all such descriptors
70 * have been released (including unmapping the memory).
71 * If memory is scarce, the system tries to get as much as possible
72 * and the sysctl values reflect the actual allocation.
73 * Together with desired values, the sysctl export also absolute
74 * min and maximum values that cannot be overridden.
73 *
75 *
74 * struct netmap_if is variable size but small.
75 * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if
76 * uses 120 bytes on a 64-bit machine.
77 * We allocate NETMAP_IF_MAX_SIZE (1024) which should work even for
78 * cards with 48 ring pairs.
79 * The total number of 'struct netmap_if' could be slightly larger
80 * that the total number of rings on all interfaces on the system.
76 * struct netmap_if:
77 * variable size, max 16 bytes per ring pair plus some fixed amount.
78 * 1024 bytes should be large enough in practice.
79 *
80 * In the worst case we have one netmap_if per ring in the system.
81 *
82 * struct netmap_ring
83 * variable too, 8 byte per slot plus some fixed amount.
84 * Rings can be large (e.g. 4k slots, or >32Kbytes).
85 * We default to 36 KB (9 pages), and a few hundred rings.
86 *
87 * struct netmap_buffer
88 * The more the better, both because fast interfaces tend to have
89 * many slots, and because we may want to use buffers to store
90 * packets in userspace avoiding copies.
91 * Must contain a full frame (eg 1518, or more for vlans, jumbo
92 * frames etc.) plus be nicely aligned, plus some NICs restrict
93 * the size to multiple of 1K or so. Default to 2K
81 */
94 */
82#define NETMAP_IF_MAX_SIZE 1024
83#define NETMAP_IF_MAX_NUM 512
84
95
85/*
86 * netmap rings are up to 2..4k descriptors, 8 bytes each,
87 * plus some glue at the beginning (32 bytes).
88 * We set the default ring size to 9 pages (36K) and enable
89 * a few hundreds of them.
90 */
91#define NETMAP_RING_MAX_SIZE (9*PAGE_SIZE)
92#define NETMAP_RING_MAX_NUM 200 /* approx 8MB */
93
94/*
95 * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE,
96 * 2k or slightly less, aligned to 64 bytes.
97 * A large 10G interface can have 2k*18 = 36k buffers per interface,
98 * or about 72MB of memory. Up to us to use more.
99 */
100#ifndef CONSERVATIVE
96#ifndef CONSERVATIVE
101#define NETMAP_BUF_MAX_NUM 100000 /* 200MB */
97#define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
102#else /* CONSERVATIVE */
103#define NETMAP_BUF_MAX_NUM 20000 /* 40MB */
104#endif
105
98#else /* CONSERVATIVE */
99#define NETMAP_BUF_MAX_NUM 20000 /* 40MB */
100#endif
101
102#ifdef linux
103#define NMA_LOCK_T struct semaphore
104#define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1)
105#define NMA_LOCK_DESTROY()
106#define NMA_LOCK() down(&nm_mem.nm_mtx)
107#define NMA_UNLOCK() up(&nm_mem.nm_mtx)
108#else /* !linux */
109#define NMA_LOCK_T struct mtx
110#define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
111#define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx)
112#define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx)
113#define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx)
114#endif /* linux */
106
115
116enum {
117 NETMAP_IF_POOL = 0,
118 NETMAP_RING_POOL,
119 NETMAP_BUF_POOL,
120 NETMAP_POOLS_NR
121};
122
123
124struct netmap_obj_params {
125 u_int size;
126 u_int num;
127};
128
129
130struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
131 [NETMAP_IF_POOL] = {
132 .size = 1024,
133 .num = 100,
134 },
135 [NETMAP_RING_POOL] = {
136 .size = 9*PAGE_SIZE,
137 .num = 200,
138 },
139 [NETMAP_BUF_POOL] = {
140 .size = 2048,
141 .num = NETMAP_BUF_MAX_NUM,
142 },
143};
144
145
107struct netmap_obj_pool {
108 char name[16]; /* name of the allocator */
109 u_int objtotal; /* actual total number of objects. */
110 u_int objfree; /* number of free objects. */
111 u_int clustentries; /* actual objects per cluster */
112
146struct netmap_obj_pool {
147 char name[16]; /* name of the allocator */
148 u_int objtotal; /* actual total number of objects. */
149 u_int objfree; /* number of free objects. */
150 u_int clustentries; /* actual objects per cluster */
151
152 /* limits */
153 u_int objminsize; /* minimum object size */
154 u_int objmaxsize; /* maximum object size */
155 u_int nummin; /* minimum number of objects */
156 u_int nummax; /* maximum number of objects */
157
113 /* the total memory space is _numclusters*_clustsize */
114 u_int _numclusters; /* how many clusters */
115 u_int _clustsize; /* cluster size */
116 u_int _objsize; /* actual object size */
117
118 u_int _memtotal; /* _numclusters*_clustsize */
119 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
120 uint32_t *bitmap; /* one bit per buffer, 1 means free */
158 /* the total memory space is _numclusters*_clustsize */
159 u_int _numclusters; /* how many clusters */
160 u_int _clustsize; /* cluster size */
161 u_int _objsize; /* actual object size */
162
163 u_int _memtotal; /* _numclusters*_clustsize */
164 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
165 uint32_t *bitmap; /* one bit per buffer, 1 means free */
166 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
121};
122
167};
168
169
123struct netmap_mem_d {
170struct netmap_mem_d {
124 NM_LOCK_T nm_mtx; /* protect the allocator ? */
171 NMA_LOCK_T nm_mtx; /* protect the allocator */
125 u_int nm_totalsize; /* shorthand */
126
172 u_int nm_totalsize; /* shorthand */
173
127 /* pointers to the three allocators */
128 struct netmap_obj_pool *nm_if_pool;
129 struct netmap_obj_pool *nm_ring_pool;
130 struct netmap_obj_pool *nm_buf_pool;
174 int finalized; /* !=0 iff preallocation done */
175 int lasterr; /* last error for curr config */
176 int refcount; /* existing priv structures */
177 /* the three allocators */
178 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
131};
132
179};
180
181
182static struct netmap_mem_d nm_mem = { /* Our memory allocator. */
183 .pools = {
184 [NETMAP_IF_POOL] = {
185 .name = "netmap_if",
186 .objminsize = sizeof(struct netmap_if),
187 .objmaxsize = 4096,
188 .nummin = 10, /* don't be stingy */
189 .nummax = 10000, /* XXX very large */
190 },
191 [NETMAP_RING_POOL] = {
192 .name = "netmap_ring",
193 .objminsize = sizeof(struct netmap_ring),
194 .objmaxsize = 32*PAGE_SIZE,
195 .nummin = 2,
196 .nummax = 1024,
197 },
198 [NETMAP_BUF_POOL] = {
199 .name = "netmap_buf",
200 .objminsize = 64,
201 .objmaxsize = 65536,
202 .nummin = 4,
203 .nummax = 1000000, /* one million! */
204 },
205 },
206};
207
133struct lut_entry *netmap_buffer_lut; /* exported */
134
208struct lut_entry *netmap_buffer_lut; /* exported */
209
210/* memory allocator related sysctls */
135
211
212#define STRINGIFY(x) #x
213
214#define DECLARE_SYSCTLS(id, name) \
215 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_size", &netmap_params[id].size); */ \
216 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
217 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
218 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
219 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
220 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_num", &netmap_params[id].num); */ \
221 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
222 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
223 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
224 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
225
226DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
227DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
228DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
229
136/*
137 * Convert a userspace offset to a phisical address.
138 * XXX re-do in a simpler way.
139 *
140 * The idea here is to hide userspace applications the fact that pre-allocated
141 * memory is not contiguous, but fragmented across different clusters and
142 * smaller memory allocators. Consequently, first of all we need to find which
143 * allocator is owning provided offset, then we need to find out the physical
144 * address associated to target page (this is done using the look-up table.
145 */
146static inline vm_paddr_t
147netmap_ofstophys(vm_offset_t offset)
148{
230/*
231 * Convert a userspace offset to a phisical address.
232 * XXX re-do in a simpler way.
233 *
234 * The idea here is to hide userspace applications the fact that pre-allocated
235 * memory is not contiguous, but fragmented across different clusters and
236 * smaller memory allocators. Consequently, first of all we need to find which
237 * allocator is owning provided offset, then we need to find out the physical
238 * address associated to target page (this is done using the look-up table.
239 */
240static inline vm_paddr_t
241netmap_ofstophys(vm_offset_t offset)
242{
149 const struct netmap_obj_pool *p[] = {
150 nm_mem->nm_if_pool,
151 nm_mem->nm_ring_pool,
152 nm_mem->nm_buf_pool };
153 int i;
154 vm_offset_t o = offset;
243 int i;
244 vm_offset_t o = offset;
245 struct netmap_obj_pool *p = nm_mem.pools;
155
246
156
157 for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) {
158 if (offset >= p[i]->_memtotal)
247 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) {
248 if (offset >= p[i]._memtotal)
159 continue;
160 // XXX now scan the clusters
249 continue;
250 // XXX now scan the clusters
161 return p[i]->lut[offset / p[i]->_objsize].paddr +
162 offset % p[i]->_objsize;
251 return p[i].lut[offset / p[i]._objsize].paddr +
252 offset % p[i]._objsize;
163 }
253 }
254 /* this is only in case of errors */
164 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
255 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
165 p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal,
166 p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal);
256 p[NETMAP_IF_POOL]._memtotal,
257 p[NETMAP_IF_POOL]._memtotal
258 + p[NETMAP_RING_POOL]._memtotal,
259 p[NETMAP_IF_POOL]._memtotal
260 + p[NETMAP_RING_POOL]._memtotal
261 + p[NETMAP_BUF_POOL]._memtotal);
167 return 0; // XXX bad address
168}
169
170/*
171 * we store objects by kernel address, need to find the offset
172 * within the pool to export the value to userspace.
173 * Algorithm: scan until we find the cluster, then add the
174 * actual offset in the cluster

--- 18 unchanged lines hidden (view full) ---

193 }
194 D("address %p is not contained inside any cluster (%s)",
195 vaddr, p->name);
196 return 0; /* An error occurred */
197}
198
199/* Helper functions which convert virtual addresses to offsets */
200#define netmap_if_offset(v) \
262 return 0; // XXX bad address
263}
264
265/*
266 * we store objects by kernel address, need to find the offset
267 * within the pool to export the value to userspace.
268 * Algorithm: scan until we find the cluster, then add the
269 * actual offset in the cluster

--- 18 unchanged lines hidden (view full) ---

288 }
289 D("address %p is not contained inside any cluster (%s)",
290 vaddr, p->name);
291 return 0; /* An error occurred */
292}
293
294/* Helper functions which convert virtual addresses to offsets */
295#define netmap_if_offset(v) \
201 netmap_obj_offset(nm_mem->nm_if_pool, (v))
296 netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v))
202
203#define netmap_ring_offset(v) \
297
298#define netmap_ring_offset(v) \
204 (nm_mem->nm_if_pool->_memtotal + \
205 netmap_obj_offset(nm_mem->nm_ring_pool, (v)))
299 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
300 netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v)))
206
207#define netmap_buf_offset(v) \
301
302#define netmap_buf_offset(v) \
208 (nm_mem->nm_if_pool->_memtotal + \
209 nm_mem->nm_ring_pool->_memtotal + \
210 netmap_obj_offset(nm_mem->nm_buf_pool, (v)))
303 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
304 nm_mem.pools[NETMAP_RING_POOL]._memtotal + \
305 netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)))
211
212
306
307
308/*
309 * report the index, and use start position as a hint,
310 * otherwise buffer allocation becomes terribly expensive.
311 */
213static void *
312static void *
214netmap_obj_malloc(struct netmap_obj_pool *p, int len)
313netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index)
215{
216 uint32_t i = 0; /* index in the bitmap */
217 uint32_t mask, j; /* slot counter */
218 void *vaddr = NULL;
219
220 if (len > p->_objsize) {
221 D("%s request size %d too large", p->name, len);
222 // XXX cannot reduce the size
223 return NULL;
224 }
225
226 if (p->objfree == 0) {
227 D("%s allocator: run out of memory", p->name);
228 return NULL;
229 }
314{
315 uint32_t i = 0; /* index in the bitmap */
316 uint32_t mask, j; /* slot counter */
317 void *vaddr = NULL;
318
319 if (len > p->_objsize) {
320 D("%s request size %d too large", p->name, len);
321 // XXX cannot reduce the size
322 return NULL;
323 }
324
325 if (p->objfree == 0) {
326 D("%s allocator: run out of memory", p->name);
327 return NULL;
328 }
329 if (start)
330 i = *start;
230
331
231 /* termination is guaranteed by p->free */
232 while (vaddr == NULL) {
332 /* termination is guaranteed by p->free, but better check bounds on i */
333 while (vaddr == NULL && i < p->bitmap_slots) {
233 uint32_t cur = p->bitmap[i];
234 if (cur == 0) { /* bitmask is fully used */
235 i++;
236 continue;
237 }
238 /* locate a slot */
239 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
240 ;
241
242 p->bitmap[i] &= ~mask; /* mark object as in use */
243 p->objfree--;
244
245 vaddr = p->lut[i * 32 + j].vaddr;
334 uint32_t cur = p->bitmap[i];
335 if (cur == 0) { /* bitmask is fully used */
336 i++;
337 continue;
338 }
339 /* locate a slot */
340 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
341 ;
342
343 p->bitmap[i] &= ~mask; /* mark object as in use */
344 p->objfree--;
345
346 vaddr = p->lut[i * 32 + j].vaddr;
347 if (index)
348 *index = i * 32 + j;
246 }
247 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
248
349 }
350 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
351
352 if (start)
353 *start = i;
249 return vaddr;
250}
251
252
253/*
254 * free by index, not by address
255 */
256static void

--- 25 unchanged lines hidden (view full) ---

282 KASSERT(j != 0, ("Cannot free object 0"));
283 netmap_obj_free(p, j);
284 return;
285 }
286 ND("address %p is not contained inside any cluster (%s)",
287 vaddr, p->name);
288}
289
354 return vaddr;
355}
356
357
358/*
359 * free by index, not by address
360 */
361static void

--- 25 unchanged lines hidden (view full) ---

387 KASSERT(j != 0, ("Cannot free object 0"));
388 netmap_obj_free(p, j);
389 return;
390 }
391 ND("address %p is not contained inside any cluster (%s)",
392 vaddr, p->name);
393}
394
290#define netmap_if_malloc(len) netmap_obj_malloc(nm_mem->nm_if_pool, len)
291#define netmap_if_free(v) netmap_obj_free_va(nm_mem->nm_if_pool, (v))
292#define netmap_ring_malloc(len) netmap_obj_malloc(nm_mem->nm_ring_pool, len)
293#define netmap_buf_malloc() \
294 netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE)
395#define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL)
396#define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v))
397#define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL)
398#define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v))
399#define netmap_buf_malloc(_pos, _index) \
400 netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index)
295
296
297/* Return the index associated to the given packet buffer */
298#define netmap_buf_index(v) \
401
402
403/* Return the index associated to the given packet buffer */
404#define netmap_buf_index(v) \
299 (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize)
405 (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize)
300
301
406
407
302static void
408/* Return nonzero on error */
409static int
303netmap_new_bufs(struct netmap_if *nifp,
304 struct netmap_slot *slot, u_int n)
305{
410netmap_new_bufs(struct netmap_if *nifp,
411 struct netmap_slot *slot, u_int n)
412{
306 struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
307 uint32_t i = 0; /* slot counter */
413 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
414 int i = 0; /* slot counter */
415 uint32_t pos = 0; /* slot in p->bitmap */
416 uint32_t index = 0; /* buffer index */
308
309 (void)nifp; /* UNUSED */
310 for (i = 0; i < n; i++) {
417
418 (void)nifp; /* UNUSED */
419 for (i = 0; i < n; i++) {
311 void *vaddr = netmap_buf_malloc();
420 void *vaddr = netmap_buf_malloc(&pos, &index);
312 if (vaddr == NULL) {
313 D("unable to locate empty packet buffer");
314 goto cleanup;
315 }
421 if (vaddr == NULL) {
422 D("unable to locate empty packet buffer");
423 goto cleanup;
424 }
316
317 slot[i].buf_idx = netmap_buf_index(vaddr);
318 KASSERT(slot[i].buf_idx != 0,
319 ("Assigning buf_idx=0 to just created slot"));
425 slot[i].buf_idx = index;
320 slot[i].len = p->_objsize;
426 slot[i].len = p->_objsize;
321 slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack
427 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
428 * in the NIC ring. This is a hack that hides missing
429 * initializations in the drivers, and should go away.
430 */
431 slot[i].flags = NS_BUF_CHANGED;
322 }
323
432 }
433
324 ND("allocated %d buffers, %d available", n, p->objfree);
325 return;
434 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
435 return (0);
326
327cleanup:
328 while (i > 0) {
329 i--;
436
437cleanup:
438 while (i > 0) {
439 i--;
330 netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx);
440 netmap_obj_free(p, slot[i].buf_idx);
331 }
441 }
442 bzero(slot, n * sizeof(slot[0]));
443 return (ENOMEM);
332}
333
334
335static void
336netmap_free_buf(struct netmap_if *nifp, uint32_t i)
337{
444}
445
446
447static void
448netmap_free_buf(struct netmap_if *nifp, uint32_t i)
449{
338 struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
450 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
451
339 if (i < 2 || i >= p->objtotal) {
340 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
341 return;
342 }
452 if (i < 2 || i >= p->objtotal) {
453 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
454 return;
455 }
343 netmap_obj_free(nm_mem->nm_buf_pool, i);
456 netmap_obj_free(p, i);
344}
345
457}
458
346
347/*
348 * Free all resources related to an allocator.
349 */
350static void
459static void
351netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
460netmap_reset_obj_allocator(struct netmap_obj_pool *p)
352{
353 if (p == NULL)
354 return;
355 if (p->bitmap)
356 free(p->bitmap, M_NETMAP);
461{
462 if (p == NULL)
463 return;
464 if (p->bitmap)
465 free(p->bitmap, M_NETMAP);
466 p->bitmap = NULL;
357 if (p->lut) {
358 int i;
359 for (i = 0; i < p->objtotal; i += p->clustentries) {
360 if (p->lut[i].vaddr)
361 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
362 }
363 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
467 if (p->lut) {
468 int i;
469 for (i = 0; i < p->objtotal; i += p->clustentries) {
470 if (p->lut[i].vaddr)
471 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
472 }
473 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
474#ifdef linux
475 vfree(p->lut);
476#else
364 free(p->lut, M_NETMAP);
477 free(p->lut, M_NETMAP);
478#endif
365 }
479 }
366 bzero(p, sizeof(*p));
367 free(p, M_NETMAP);
480 p->lut = NULL;
368}
369
370/*
481}
482
483/*
484 * Free all resources related to an allocator.
485 */
486static void
487netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
488{
489 if (p == NULL)
490 return;
491 netmap_reset_obj_allocator(p);
492}
493
494/*
371 * We receive a request for objtotal objects, of size objsize each.
372 * Internally we may round up both numbers, as we allocate objects
373 * in small clusters multiple of the page size.
374 * In the allocator we don't need to store the objsize,
375 * but we do need to keep track of objtotal' and clustentries,
376 * as they are needed when freeing memory.
377 *
378 * XXX note -- userspace needs the buffers to be contiguous,
379 * so we cannot afford gaps at the end of a cluster.
380 */
495 * We receive a request for objtotal objects, of size objsize each.
496 * Internally we may round up both numbers, as we allocate objects
497 * in small clusters multiple of the page size.
498 * In the allocator we don't need to store the objsize,
499 * but we do need to keep track of objtotal' and clustentries,
500 * as they are needed when freeing memory.
501 *
502 * XXX note -- userspace needs the buffers to be contiguous,
503 * so we cannot afford gaps at the end of a cluster.
504 */
381static struct netmap_obj_pool *
382netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize)
505
506
507/* call with NMA_LOCK held */
508static int
509netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
383{
510{
384 struct netmap_obj_pool *p;
385 int i, n;
386 u_int clustsize; /* the cluster size, multiple of page size */
387 u_int clustentries; /* how many objects per entry */
388
389#define MAX_CLUSTSIZE (1<<17)
390#define LINE_ROUND 64
391 if (objsize >= MAX_CLUSTSIZE) {
392 /* we could do it but there is no point */
393 D("unsupported allocation for %d bytes", objsize);
511 int i, n;
512 u_int clustsize; /* the cluster size, multiple of page size */
513 u_int clustentries; /* how many objects per entry */
514
515#define MAX_CLUSTSIZE (1<<17)
516#define LINE_ROUND 64
517 if (objsize >= MAX_CLUSTSIZE) {
518 /* we could do it but there is no point */
519 D("unsupported allocation for %d bytes", objsize);
394 return NULL;
520 goto error;
395 }
396 /* make sure objsize is a multiple of LINE_ROUND */
397 i = (objsize & (LINE_ROUND - 1));
398 if (i) {
399 D("XXX aligning object by %d bytes", LINE_ROUND - i);
400 objsize += LINE_ROUND - i;
401 }
521 }
522 /* make sure objsize is a multiple of LINE_ROUND */
523 i = (objsize & (LINE_ROUND - 1));
524 if (i) {
525 D("XXX aligning object by %d bytes", LINE_ROUND - i);
526 objsize += LINE_ROUND - i;
527 }
528 if (objsize < p->objminsize || objsize > p->objmaxsize) {
529 D("requested objsize %d out of range [%d, %d]",
530 objsize, p->objminsize, p->objmaxsize);
531 goto error;
532 }
533 if (objtotal < p->nummin || objtotal > p->nummax) {
534 D("requested objtotal %d out of range [%d, %d]",
535 objtotal, p->nummin, p->nummax);
536 goto error;
537 }
402 /*
403 * Compute number of objects using a brute-force approach:
404 * given a max cluster size,
405 * we try to fill it with objects keeping track of the
406 * wasted space to the next page boundary.
407 */
408 for (clustentries = 0, i = 1;; i++) {
409 u_int delta, used = i * objsize;

--- 11 unchanged lines hidden (view full) ---

421 /* compute clustsize and round to the next page */
422 clustsize = clustentries * objsize;
423 i = (clustsize & (PAGE_SIZE - 1));
424 if (i)
425 clustsize += PAGE_SIZE - i;
426 D("objsize %d clustsize %d objects %d",
427 objsize, clustsize, clustentries);
428
538 /*
539 * Compute number of objects using a brute-force approach:
540 * given a max cluster size,
541 * we try to fill it with objects keeping track of the
542 * wasted space to the next page boundary.
543 */
544 for (clustentries = 0, i = 1;; i++) {
545 u_int delta, used = i * objsize;

--- 11 unchanged lines hidden (view full) ---

557 /* compute clustsize and round to the next page */
558 clustsize = clustentries * objsize;
559 i = (clustsize & (PAGE_SIZE - 1));
560 if (i)
561 clustsize += PAGE_SIZE - i;
562 D("objsize %d clustsize %d objects %d",
563 objsize, clustsize, clustentries);
564
429 p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP,
430 M_WAITOK | M_ZERO);
431 if (p == NULL) {
432 D("Unable to create '%s' allocator", name);
433 return NULL;
434 }
435 /*
565 /*
436 * Allocate and initialize the lookup table.
437 *
438 * The number of clusters is n = ceil(objtotal/clustentries)
439 * objtotal' = n * clustentries
440 */
566 * The number of clusters is n = ceil(objtotal/clustentries)
567 * objtotal' = n * clustentries
568 */
441 strncpy(p->name, name, sizeof(p->name));
442 p->clustentries = clustentries;
443 p->_clustsize = clustsize;
444 n = (objtotal + clustentries - 1) / clustentries;
445 p->_numclusters = n;
446 p->objtotal = n * clustentries;
447 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
569 p->clustentries = clustentries;
570 p->_clustsize = clustsize;
571 n = (objtotal + clustentries - 1) / clustentries;
572 p->_numclusters = n;
573 p->objtotal = n * clustentries;
574 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
448 p->_objsize = objsize;
449 p->_memtotal = p->_numclusters * p->_clustsize;
575 p->_memtotal = p->_numclusters * p->_clustsize;
576 p->_objsize = objsize;
450
577
451 p->lut = malloc(sizeof(struct lut_entry) * p->objtotal,
452 M_NETMAP, M_WAITOK | M_ZERO);
578 return 0;
579
580error:
581 p->_objsize = objsize;
582 p->objtotal = objtotal;
583
584 return EINVAL;
585}
586
587
588/* call with NMA_LOCK held */
589static int
590netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
591{
592 int i, n;
593
594 n = sizeof(struct lut_entry) * p->objtotal;
595#ifdef linux
596 p->lut = vmalloc(n);
597#else
598 p->lut = malloc(n, M_NETMAP, M_WAITOK | M_ZERO);
599#endif
453 if (p->lut == NULL) {
600 if (p->lut == NULL) {
454 D("Unable to create lookup table for '%s' allocator", name);
601 D("Unable to create lookup table (%d bytes) for '%s'", n, p->name);
455 goto clean;
456 }
457
458 /* Allocate the bitmap */
459 n = (p->objtotal + 31) / 32;
460 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
461 if (p->bitmap == NULL) {
462 D("Unable to create bitmap (%d entries) for allocator '%s'", n,
602 goto clean;
603 }
604
605 /* Allocate the bitmap */
606 n = (p->objtotal + 31) / 32;
607 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
608 if (p->bitmap == NULL) {
609 D("Unable to create bitmap (%d entries) for allocator '%s'", n,
463 name);
610 p->name);
464 goto clean;
465 }
611 goto clean;
612 }
613 p->bitmap_slots = n;
466
467 /*
468 * Allocate clusters, init pointers and bitmap
469 */
470 for (i = 0; i < p->objtotal;) {
614
615 /*
616 * Allocate clusters, init pointers and bitmap
617 */
618 for (i = 0; i < p->objtotal;) {
471 int lim = i + clustentries;
619 int lim = i + p->clustentries;
472 char *clust;
473
620 char *clust;
621
474 clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO,
622 clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO,
475 0, -1UL, PAGE_SIZE, 0);
476 if (clust == NULL) {
477 /*
478 * If we get here, there is a severe memory shortage,
479 * so halve the allocated memory to reclaim some.
623 0, -1UL, PAGE_SIZE, 0);
624 if (clust == NULL) {
625 /*
626 * If we get here, there is a severe memory shortage,
627 * so halve the allocated memory to reclaim some.
628 * XXX check boundaries
480 */
481 D("Unable to create cluster at %d for '%s' allocator",
629 */
630 D("Unable to create cluster at %d for '%s' allocator",
482 i, name);
631 i, p->name);
483 lim = i / 2;
632 lim = i / 2;
484 for (; i >= lim; i--) {
633 for (i--; i >= lim; i--) {
485 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
634 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
486 if (i % clustentries == 0 && p->lut[i].vaddr)
635 if (i % p->clustentries == 0 && p->lut[i].vaddr)
487 contigfree(p->lut[i].vaddr,
488 p->_clustsize, M_NETMAP);
489 }
490 p->objtotal = i;
491 p->objfree = p->objtotal - 2;
636 contigfree(p->lut[i].vaddr,
637 p->_clustsize, M_NETMAP);
638 }
639 p->objtotal = i;
640 p->objfree = p->objtotal - 2;
492 p->_numclusters = i / clustentries;
641 p->_numclusters = i / p->clustentries;
493 p->_memtotal = p->_numclusters * p->_clustsize;
494 break;
495 }
642 p->_memtotal = p->_numclusters * p->_clustsize;
643 break;
644 }
496 for (; i < lim; i++, clust += objsize) {
645 for (; i < lim; i++, clust += p->_objsize) {
497 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
498 p->lut[i].vaddr = clust;
499 p->lut[i].paddr = vtophys(clust);
500 }
501 }
502 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
503 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
504 p->_numclusters, p->_clustsize >> 10,
646 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
647 p->lut[i].vaddr = clust;
648 p->lut[i].paddr = vtophys(clust);
649 }
650 }
651 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
652 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
653 p->_numclusters, p->_clustsize >> 10,
505 p->_memtotal >> 10, name);
654 p->_memtotal >> 10, p->name);
506
655
507 return p;
656 return 0;
508
509clean:
657
658clean:
510 netmap_destroy_obj_allocator(p);
511 return NULL;
659 netmap_reset_obj_allocator(p);
660 return ENOMEM;
512}
513
661}
662
663/* call with lock held */
514static int
664static int
515netmap_memory_init(void)
665netmap_memory_config_changed(void)
516{
666{
517 struct netmap_obj_pool *p;
667 int i;
518
668
519 nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
520 M_WAITOK | M_ZERO);
521 if (nm_mem == NULL)
522 goto clean;
669 for (i = 0; i < NETMAP_POOLS_NR; i++) {
670 if (nm_mem.pools[i]._objsize != netmap_params[i].size ||
671 nm_mem.pools[i].objtotal != netmap_params[i].num)
672 return 1;
673 }
674 return 0;
675}
523
676
524 p = netmap_new_obj_allocator("netmap_if",
525 NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE);
526 if (p == NULL)
527 goto clean;
528 nm_mem->nm_if_pool = p;
529
677
530 p = netmap_new_obj_allocator("netmap_ring",
531 NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE);
532 if (p == NULL)
533 goto clean;
534 nm_mem->nm_ring_pool = p;
678/* call with lock held */
679static int
680netmap_memory_config(void)
681{
682 int i;
535
683
536 p = netmap_new_obj_allocator("netmap_buf",
537 NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE);
538 if (p == NULL)
539 goto clean;
540 netmap_total_buffers = p->objtotal;
541 netmap_buffer_lut = p->lut;
542 nm_mem->nm_buf_pool = p;
543 netmap_buffer_base = p->lut[0].vaddr;
544
684
545 mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL,
546 MTX_DEF);
547 nm_mem->nm_totalsize =
548 nm_mem->nm_if_pool->_memtotal +
549 nm_mem->nm_ring_pool->_memtotal +
550 nm_mem->nm_buf_pool->_memtotal;
685 if (!netmap_memory_config_changed())
686 goto out;
551
687
688 D("reconfiguring");
689
690 if (nm_mem.finalized) {
691 /* reset previous allocation */
692 for (i = 0; i < NETMAP_POOLS_NR; i++) {
693 netmap_reset_obj_allocator(&nm_mem.pools[i]);
694 }
695 nm_mem.finalized = 0;
696 }
697
698 for (i = 0; i < NETMAP_POOLS_NR; i++) {
699 nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i],
700 netmap_params[i].num, netmap_params[i].size);
701 if (nm_mem.lasterr)
702 goto out;
703 }
704
552 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
705 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
553 nm_mem->nm_if_pool->_memtotal >> 10,
554 nm_mem->nm_ring_pool->_memtotal >> 10,
555 nm_mem->nm_buf_pool->_memtotal >> 20);
556 return 0;
706 nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10,
707 nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10,
708 nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20);
557
709
558clean:
559 if (nm_mem) {
560 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
561 netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
562 free(nm_mem, M_NETMAP);
710out:
711
712 return nm_mem.lasterr;
713}
714
715/* call with lock held */
716static int
717netmap_memory_finalize(void)
718{
719 int i;
720 u_int totalsize = 0;
721
722 nm_mem.refcount++;
723 if (nm_mem.refcount > 1) {
724 D("busy (refcount %d)", nm_mem.refcount);
725 goto out;
563 }
726 }
564 return ENOMEM;
727
728 /* update configuration if changed */
729 if (netmap_memory_config())
730 goto out;
731
732 if (nm_mem.finalized) {
733 /* may happen if config is not changed */
734 ND("nothing to do");
735 goto out;
736 }
737
738 for (i = 0; i < NETMAP_POOLS_NR; i++) {
739 nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]);
740 if (nm_mem.lasterr)
741 goto cleanup;
742 totalsize += nm_mem.pools[i]._memtotal;
743 }
744 nm_mem.nm_totalsize = totalsize;
745
746 /* backward compatibility */
747 netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize;
748 netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal;
749
750 netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut;
751 netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr;
752
753 nm_mem.finalized = 1;
754 nm_mem.lasterr = 0;
755
756 /* make sysctl values match actual values in the pools */
757 for (i = 0; i < NETMAP_POOLS_NR; i++) {
758 netmap_params[i].size = nm_mem.pools[i]._objsize;
759 netmap_params[i].num = nm_mem.pools[i].objtotal;
760 }
761
762out:
763 if (nm_mem.lasterr)
764 nm_mem.refcount--;
765
766 return nm_mem.lasterr;
767
768cleanup:
769 for (i = 0; i < NETMAP_POOLS_NR; i++) {
770 netmap_reset_obj_allocator(&nm_mem.pools[i]);
771 }
772 nm_mem.refcount--;
773
774 return nm_mem.lasterr;
565}
566
775}
776
777static int
778netmap_memory_init(void)
779{
780 NMA_LOCK_INIT();
781 return (0);
782}
567
568static void
569netmap_memory_fini(void)
570{
783
784static void
785netmap_memory_fini(void)
786{
571 if (!nm_mem)
572 return;
573 netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
574 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
575 netmap_destroy_obj_allocator(nm_mem->nm_buf_pool);
576 mtx_destroy(&nm_mem->nm_mtx);
577 free(nm_mem, M_NETMAP);
787 int i;
788
789 for (i = 0; i < NETMAP_POOLS_NR; i++) {
790 netmap_destroy_obj_allocator(&nm_mem.pools[i]);
791 }
792 NMA_LOCK_DESTROY();
578}
579
793}
794
795static void
796netmap_free_rings(struct netmap_adapter *na)
797{
798 int i;
799 for (i = 0; i < na->num_tx_rings + 1; i++) {
800 netmap_ring_free(na->tx_rings[i].ring);
801 na->tx_rings[i].ring = NULL;
802 }
803 for (i = 0; i < na->num_rx_rings + 1; i++) {
804 netmap_ring_free(na->rx_rings[i].ring);
805 na->rx_rings[i].ring = NULL;
806 }
807}
580
581
808
809
810
811/* call with NMA_LOCK held */
582static void *
583netmap_if_new(const char *ifname, struct netmap_adapter *na)
584{
585 struct netmap_if *nifp;
586 struct netmap_ring *ring;
587 ssize_t base; /* handy for relative offsets between rings and nifp */
588 u_int i, len, ndesc;
589 u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
590 u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
591 struct netmap_kring *kring;
592
812static void *
813netmap_if_new(const char *ifname, struct netmap_adapter *na)
814{
815 struct netmap_if *nifp;
816 struct netmap_ring *ring;
817 ssize_t base; /* handy for relative offsets between rings and nifp */
818 u_int i, len, ndesc;
819 u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
820 u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
821 struct netmap_kring *kring;
822
593 NMA_LOCK();
594 /*
595 * the descriptor is followed inline by an array of offsets
596 * to the tx and rx rings in the shared memory region.
597 */
598 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
599 nifp = netmap_if_malloc(len);
600 if (nifp == NULL) {
823 /*
824 * the descriptor is followed inline by an array of offsets
825 * to the tx and rx rings in the shared memory region.
826 */
827 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
828 nifp = netmap_if_malloc(len);
829 if (nifp == NULL) {
601 NMA_UNLOCK();
602 return NULL;
603 }
604
605 /* initialize base fields -- override const */
606 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
607 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
608 strncpy(nifp->ni_name, ifname, IFNAMSIZ);
609
610 (na->refcount)++; /* XXX atomic ? we are under lock */
611 if (na->refcount > 1) { /* already setup, we are done */
830 return NULL;
831 }
832
833 /* initialize base fields -- override const */
834 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
835 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
836 strncpy(nifp->ni_name, ifname, IFNAMSIZ);
837
838 (na->refcount)++; /* XXX atomic ? we are under lock */
839 if (na->refcount > 1) { /* already setup, we are done */
612 NMA_UNLOCK();
613 goto final;
614 }
615
616 /*
617 * First instance, allocate netmap rings and buffers for this card
618 * The rings are contiguous, but have variable size.
619 */
620 for (i = 0; i < ntx; i++) { /* Transmit rings */

--- 7 unchanged lines hidden (view full) ---

628 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
629 goto cleanup;
630 }
631 ND("txring[%d] at %p ofs %d", i, ring);
632 kring->na = na;
633 kring->ring = ring;
634 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
635 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
840 goto final;
841 }
842
843 /*
844 * First instance, allocate netmap rings and buffers for this card
845 * The rings are contiguous, but have variable size.
846 */
847 for (i = 0; i < ntx; i++) { /* Transmit rings */

--- 7 unchanged lines hidden (view full) ---

855 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
856 goto cleanup;
857 }
858 ND("txring[%d] at %p ofs %d", i, ring);
859 kring->na = na;
860 kring->ring = ring;
861 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
862 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
636 (nm_mem->nm_if_pool->_memtotal +
637 nm_mem->nm_ring_pool->_memtotal) -
863 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
864 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
638 netmap_ring_offset(ring);
639
640 /*
641 * IMPORTANT:
642 * Always keep one slot empty, so we can detect new
643 * transmissions comparing cur and nr_hwcur (they are
644 * the same only if there are no new transmissions).
645 */
646 ring->avail = kring->nr_hwavail = ndesc - 1;
647 ring->cur = kring->nr_hwcur = 0;
648 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
649 ND("initializing slots for txring[%d]", i);
865 netmap_ring_offset(ring);
866
867 /*
868 * IMPORTANT:
869 * Always keep one slot empty, so we can detect new
870 * transmissions comparing cur and nr_hwcur (they are
871 * the same only if there are no new transmissions).
872 */
873 ring->avail = kring->nr_hwavail = ndesc - 1;
874 ring->cur = kring->nr_hwcur = 0;
875 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
876 ND("initializing slots for txring[%d]", i);
650 netmap_new_bufs(nifp, ring->slot, ndesc);
877 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
878 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
879 goto cleanup;
880 }
651 }
652
653 for (i = 0; i < nrx; i++) { /* Receive rings */
654 kring = &na->rx_rings[i];
655 ndesc = na->num_rx_desc;
656 bzero(kring, sizeof(*kring));
657 len = sizeof(struct netmap_ring) +
658 ndesc * sizeof(struct netmap_slot);
659 ring = netmap_ring_malloc(len);
660 if (ring == NULL) {
661 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
662 goto cleanup;
663 }
664 ND("rxring[%d] at %p ofs %d", i, ring);
665
666 kring->na = na;
667 kring->ring = ring;
668 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
669 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
881 }
882
883 for (i = 0; i < nrx; i++) { /* Receive rings */
884 kring = &na->rx_rings[i];
885 ndesc = na->num_rx_desc;
886 bzero(kring, sizeof(*kring));
887 len = sizeof(struct netmap_ring) +
888 ndesc * sizeof(struct netmap_slot);
889 ring = netmap_ring_malloc(len);
890 if (ring == NULL) {
891 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
892 goto cleanup;
893 }
894 ND("rxring[%d] at %p ofs %d", i, ring);
895
896 kring->na = na;
897 kring->ring = ring;
898 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
899 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
670 (nm_mem->nm_if_pool->_memtotal +
671 nm_mem->nm_ring_pool->_memtotal) -
900 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
901 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
672 netmap_ring_offset(ring);
673
674 ring->cur = kring->nr_hwcur = 0;
675 ring->avail = kring->nr_hwavail = 0; /* empty */
676 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
677 ND("initializing slots for rxring[%d]", i);
902 netmap_ring_offset(ring);
903
904 ring->cur = kring->nr_hwcur = 0;
905 ring->avail = kring->nr_hwavail = 0; /* empty */
906 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
907 ND("initializing slots for rxring[%d]", i);
678 netmap_new_bufs(nifp, ring->slot, ndesc);
908 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
909 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
910 goto cleanup;
911 }
679 }
912 }
680 NMA_UNLOCK();
681#ifdef linux
682 // XXX initialize the selrecord structs.
683 for (i = 0; i < ntx; i++)
684 init_waitqueue_head(&na->tx_rings[i].si);
685 for (i = 0; i < nrx; i++)
686 init_waitqueue_head(&na->rx_rings[i].si);
687 init_waitqueue_head(&na->tx_si);
688 init_waitqueue_head(&na->rx_si);

--- 10 unchanged lines hidden (view full) ---

699 netmap_ring_offset(na->tx_rings[i].ring) - base;
700 }
701 for (i = 0; i < nrx; i++) {
702 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
703 netmap_ring_offset(na->rx_rings[i].ring) - base;
704 }
705 return (nifp);
706cleanup:
913#ifdef linux
914 // XXX initialize the selrecord structs.
915 for (i = 0; i < ntx; i++)
916 init_waitqueue_head(&na->tx_rings[i].si);
917 for (i = 0; i < nrx; i++)
918 init_waitqueue_head(&na->rx_rings[i].si);
919 init_waitqueue_head(&na->tx_si);
920 init_waitqueue_head(&na->rx_si);

--- 10 unchanged lines hidden (view full) ---

931 netmap_ring_offset(na->tx_rings[i].ring) - base;
932 }
933 for (i = 0; i < nrx; i++) {
934 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
935 netmap_ring_offset(na->rx_rings[i].ring) - base;
936 }
937 return (nifp);
938cleanup:
707 // XXX missing
708 NMA_UNLOCK();
939 netmap_free_rings(na);
940 netmap_if_free(nifp);
941 (na->refcount)--;
709 return NULL;
710}
711
942 return NULL;
943}
944
945/* call with NMA_LOCK held */
712static void
946static void
713netmap_free_rings(struct netmap_adapter *na)
947netmap_memory_deref(void)
714{
948{
715 int i;
716 for (i = 0; i < na->num_tx_rings + 1; i++)
717 netmap_obj_free_va(nm_mem->nm_ring_pool,
718 na->tx_rings[i].ring);
719 for (i = 0; i < na->num_rx_rings + 1; i++)
720 netmap_obj_free_va(nm_mem->nm_ring_pool,
721 na->rx_rings[i].ring);
949 nm_mem.refcount--;
950 D("refcount = %d", nm_mem.refcount);
722}
951}