netmap_mem2.c revision 241719
1234228Sluigi/* 2241719Sluigi * Copyright (C) 2012 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3234228Sluigi * 4234228Sluigi * Redistribution and use in source and binary forms, with or without 5234228Sluigi * modification, are permitted provided that the following conditions 6234228Sluigi * are met: 7234228Sluigi * 1. Redistributions of source code must retain the above copyright 8234228Sluigi * notice, this list of conditions and the following disclaimer. 9234228Sluigi * 2. Redistributions in binary form must reproduce the above copyright 10234228Sluigi * notice, this list of conditions and the following disclaimer in the 11234228Sluigi * documentation and/or other materials provided with the distribution. 12234228Sluigi * 13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16234228Sluigi * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23234228Sluigi * SUCH DAMAGE. 24234228Sluigi */ 25234228Sluigi 26234228Sluigi/* 27234228Sluigi * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 241719 2012-10-19 04:13:12Z luigi $ 28241719Sluigi * $Id: netmap_mem2.c 11881 2012-10-18 23:24:15Z luigi $ 29234228Sluigi * 30241719Sluigi * (New) memory allocator for netmap 31234228Sluigi */ 32234228Sluigi 33234228Sluigi/* 34241719Sluigi * This allocator creates three memory regions: 35241719Sluigi * nm_if_pool for the struct netmap_if 36241719Sluigi * nm_ring_pool for the struct netmap_ring 37241719Sluigi * nm_buf_pool for the packet buffers. 38234228Sluigi * 39241719Sluigi * All regions need to be multiple of a page size as we export them to 40241719Sluigi * userspace through mmap. Only the latter needs to be dma-able, 41234228Sluigi * but for convenience use the same type of allocator for all. 42234228Sluigi * 43234228Sluigi * Once mapped, the three regions are exported to userspace 44234228Sluigi * as a contiguous block, starting from nm_if_pool. Each 45234228Sluigi * cluster (and pool) is an integral number of pages. 46234228Sluigi * [ . . . ][ . . . . . .][ . . . . . . . . . .] 47234228Sluigi * nm_if nm_ring nm_buf 48234228Sluigi * 49234228Sluigi * The userspace areas contain offsets of the objects in userspace. 50234228Sluigi * When (at init time) we write these offsets, we find out the index 51234228Sluigi * of the object, and from there locate the offset from the beginning 52234228Sluigi * of the region. 53234228Sluigi * 54241719Sluigi * The invididual allocators manage a pool of memory for objects of 55241719Sluigi * the same size. 56234228Sluigi * The pool is split into smaller clusters, whose size is a 57234228Sluigi * multiple of the page size. The cluster size is chosen 58234228Sluigi * to minimize the waste for a given max cluster size 59234228Sluigi * (we do it by brute force, as we have relatively few object 60234228Sluigi * per cluster). 61234228Sluigi * 62241719Sluigi * Objects are aligned to the cache line (64 bytes) rounding up object 63241719Sluigi * sizes when needed. A bitmap contains the state of each object. 64241719Sluigi * Allocation scans the bitmap; this is done only on attach, so we are not 65234228Sluigi * too worried about performance 66234228Sluigi * 67241719Sluigi * For each allocator we can define (thorugh sysctl) the size and 68241719Sluigi * number of each object. Memory is allocated at the first use of a 69241719Sluigi * netmap file descriptor, and can be freed when all such descriptors 70241719Sluigi * have been released (including unmapping the memory). 71241719Sluigi * If memory is scarce, the system tries to get as much as possible 72241719Sluigi * and the sysctl values reflect the actual allocation. 73241719Sluigi * Together with desired values, the sysctl export also absolute 74241719Sluigi * min and maximum values that cannot be overridden. 75234228Sluigi * 76241719Sluigi * struct netmap_if: 77241719Sluigi * variable size, max 16 bytes per ring pair plus some fixed amount. 78241719Sluigi * 1024 bytes should be large enough in practice. 79241719Sluigi * 80241719Sluigi * In the worst case we have one netmap_if per ring in the system. 81241719Sluigi * 82241719Sluigi * struct netmap_ring 83241719Sluigi * variable too, 8 byte per slot plus some fixed amount. 84241719Sluigi * Rings can be large (e.g. 4k slots, or >32Kbytes). 85241719Sluigi * We default to 36 KB (9 pages), and a few hundred rings. 86241719Sluigi * 87241719Sluigi * struct netmap_buffer 88241719Sluigi * The more the better, both because fast interfaces tend to have 89241719Sluigi * many slots, and because we may want to use buffers to store 90241719Sluigi * packets in userspace avoiding copies. 91241719Sluigi * Must contain a full frame (eg 1518, or more for vlans, jumbo 92241719Sluigi * frames etc.) plus be nicely aligned, plus some NICs restrict 93241719Sluigi * the size to multiple of 1K or so. Default to 2K 94234228Sluigi */ 95234228Sluigi 96234228Sluigi#ifndef CONSERVATIVE 97241719Sluigi#define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 98234228Sluigi#else /* CONSERVATIVE */ 99234228Sluigi#define NETMAP_BUF_MAX_NUM 20000 /* 40MB */ 100234228Sluigi#endif 101234228Sluigi 102241719Sluigi#ifdef linux 103241719Sluigi#define NMA_LOCK_T struct semaphore 104241719Sluigi#define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1) 105241719Sluigi#define NMA_LOCK_DESTROY() 106241719Sluigi#define NMA_LOCK() down(&nm_mem.nm_mtx) 107241719Sluigi#define NMA_UNLOCK() up(&nm_mem.nm_mtx) 108241719Sluigi#else /* !linux */ 109241719Sluigi#define NMA_LOCK_T struct mtx 110241719Sluigi#define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF) 111241719Sluigi#define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx) 112241719Sluigi#define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx) 113241719Sluigi#define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx) 114241719Sluigi#endif /* linux */ 115234228Sluigi 116241719Sluigienum { 117241719Sluigi NETMAP_IF_POOL = 0, 118241719Sluigi NETMAP_RING_POOL, 119241719Sluigi NETMAP_BUF_POOL, 120241719Sluigi NETMAP_POOLS_NR 121241719Sluigi}; 122241719Sluigi 123241719Sluigi 124241719Sluigistruct netmap_obj_params { 125241719Sluigi u_int size; 126241719Sluigi u_int num; 127241719Sluigi}; 128241719Sluigi 129241719Sluigi 130241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 131241719Sluigi [NETMAP_IF_POOL] = { 132241719Sluigi .size = 1024, 133241719Sluigi .num = 100, 134241719Sluigi }, 135241719Sluigi [NETMAP_RING_POOL] = { 136241719Sluigi .size = 9*PAGE_SIZE, 137241719Sluigi .num = 200, 138241719Sluigi }, 139241719Sluigi [NETMAP_BUF_POOL] = { 140241719Sluigi .size = 2048, 141241719Sluigi .num = NETMAP_BUF_MAX_NUM, 142241719Sluigi }, 143241719Sluigi}; 144241719Sluigi 145241719Sluigi 146234228Sluigistruct netmap_obj_pool { 147234228Sluigi char name[16]; /* name of the allocator */ 148234228Sluigi u_int objtotal; /* actual total number of objects. */ 149234228Sluigi u_int objfree; /* number of free objects. */ 150234228Sluigi u_int clustentries; /* actual objects per cluster */ 151234228Sluigi 152241719Sluigi /* limits */ 153241719Sluigi u_int objminsize; /* minimum object size */ 154241719Sluigi u_int objmaxsize; /* maximum object size */ 155241719Sluigi u_int nummin; /* minimum number of objects */ 156241719Sluigi u_int nummax; /* maximum number of objects */ 157241719Sluigi 158234228Sluigi /* the total memory space is _numclusters*_clustsize */ 159234228Sluigi u_int _numclusters; /* how many clusters */ 160234228Sluigi u_int _clustsize; /* cluster size */ 161234228Sluigi u_int _objsize; /* actual object size */ 162234228Sluigi 163234228Sluigi u_int _memtotal; /* _numclusters*_clustsize */ 164234228Sluigi struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 165234228Sluigi uint32_t *bitmap; /* one bit per buffer, 1 means free */ 166241719Sluigi uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 167234228Sluigi}; 168234228Sluigi 169241719Sluigi 170234228Sluigistruct netmap_mem_d { 171241719Sluigi NMA_LOCK_T nm_mtx; /* protect the allocator */ 172234228Sluigi u_int nm_totalsize; /* shorthand */ 173234228Sluigi 174241719Sluigi int finalized; /* !=0 iff preallocation done */ 175241719Sluigi int lasterr; /* last error for curr config */ 176241719Sluigi int refcount; /* existing priv structures */ 177241719Sluigi /* the three allocators */ 178241719Sluigi struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 179234228Sluigi}; 180234228Sluigi 181241719Sluigi 182241719Sluigistatic struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 183241719Sluigi .pools = { 184241719Sluigi [NETMAP_IF_POOL] = { 185241719Sluigi .name = "netmap_if", 186241719Sluigi .objminsize = sizeof(struct netmap_if), 187241719Sluigi .objmaxsize = 4096, 188241719Sluigi .nummin = 10, /* don't be stingy */ 189241719Sluigi .nummax = 10000, /* XXX very large */ 190241719Sluigi }, 191241719Sluigi [NETMAP_RING_POOL] = { 192241719Sluigi .name = "netmap_ring", 193241719Sluigi .objminsize = sizeof(struct netmap_ring), 194241719Sluigi .objmaxsize = 32*PAGE_SIZE, 195241719Sluigi .nummin = 2, 196241719Sluigi .nummax = 1024, 197241719Sluigi }, 198241719Sluigi [NETMAP_BUF_POOL] = { 199241719Sluigi .name = "netmap_buf", 200241719Sluigi .objminsize = 64, 201241719Sluigi .objmaxsize = 65536, 202241719Sluigi .nummin = 4, 203241719Sluigi .nummax = 1000000, /* one million! */ 204241719Sluigi }, 205241719Sluigi }, 206241719Sluigi}; 207241719Sluigi 208234228Sluigistruct lut_entry *netmap_buffer_lut; /* exported */ 209234228Sluigi 210241719Sluigi/* memory allocator related sysctls */ 211234228Sluigi 212241719Sluigi#define STRINGIFY(x) #x 213241719Sluigi 214241719Sluigi#define DECLARE_SYSCTLS(id, name) \ 215241719Sluigi /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_size", &netmap_params[id].size); */ \ 216241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 217241719Sluigi CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 218241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 219241719Sluigi CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 220241719Sluigi /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_num", &netmap_params[id].num); */ \ 221241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 222241719Sluigi CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 223241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 224241719Sluigi CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 225241719Sluigi 226241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if); 227241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 228241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 229241719Sluigi 230234228Sluigi/* 231234228Sluigi * Convert a userspace offset to a phisical address. 232234228Sluigi * XXX re-do in a simpler way. 233234228Sluigi * 234234228Sluigi * The idea here is to hide userspace applications the fact that pre-allocated 235234228Sluigi * memory is not contiguous, but fragmented across different clusters and 236234228Sluigi * smaller memory allocators. Consequently, first of all we need to find which 237234228Sluigi * allocator is owning provided offset, then we need to find out the physical 238234228Sluigi * address associated to target page (this is done using the look-up table. 239234228Sluigi */ 240234228Sluigistatic inline vm_paddr_t 241234228Sluiginetmap_ofstophys(vm_offset_t offset) 242234228Sluigi{ 243234228Sluigi int i; 244234228Sluigi vm_offset_t o = offset; 245241719Sluigi struct netmap_obj_pool *p = nm_mem.pools; 246234228Sluigi 247241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) { 248241719Sluigi if (offset >= p[i]._memtotal) 249234228Sluigi continue; 250234228Sluigi // XXX now scan the clusters 251241719Sluigi return p[i].lut[offset / p[i]._objsize].paddr + 252241719Sluigi offset % p[i]._objsize; 253234228Sluigi } 254241719Sluigi /* this is only in case of errors */ 255234290Sluigi D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 256241719Sluigi p[NETMAP_IF_POOL]._memtotal, 257241719Sluigi p[NETMAP_IF_POOL]._memtotal 258241719Sluigi + p[NETMAP_RING_POOL]._memtotal, 259241719Sluigi p[NETMAP_IF_POOL]._memtotal 260241719Sluigi + p[NETMAP_RING_POOL]._memtotal 261241719Sluigi + p[NETMAP_BUF_POOL]._memtotal); 262234228Sluigi return 0; // XXX bad address 263234228Sluigi} 264234228Sluigi 265234228Sluigi/* 266234228Sluigi * we store objects by kernel address, need to find the offset 267234228Sluigi * within the pool to export the value to userspace. 268234228Sluigi * Algorithm: scan until we find the cluster, then add the 269234228Sluigi * actual offset in the cluster 270234228Sluigi */ 271234242Sluigistatic ssize_t 272234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 273234228Sluigi{ 274234228Sluigi int i, k = p->clustentries, n = p->objtotal; 275234228Sluigi ssize_t ofs = 0; 276234228Sluigi 277234228Sluigi for (i = 0; i < n; i += k, ofs += p->_clustsize) { 278234228Sluigi const char *base = p->lut[i].vaddr; 279234228Sluigi ssize_t relofs = (const char *) vaddr - base; 280234228Sluigi 281234228Sluigi if (relofs < 0 || relofs > p->_clustsize) 282234228Sluigi continue; 283234228Sluigi 284234228Sluigi ofs = ofs + relofs; 285234228Sluigi ND("%s: return offset %d (cluster %d) for pointer %p", 286234228Sluigi p->name, ofs, i, vaddr); 287234228Sluigi return ofs; 288234228Sluigi } 289234228Sluigi D("address %p is not contained inside any cluster (%s)", 290234228Sluigi vaddr, p->name); 291234228Sluigi return 0; /* An error occurred */ 292234228Sluigi} 293234228Sluigi 294234228Sluigi/* Helper functions which convert virtual addresses to offsets */ 295234228Sluigi#define netmap_if_offset(v) \ 296241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v)) 297234228Sluigi 298234228Sluigi#define netmap_ring_offset(v) \ 299241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 300241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v))) 301234228Sluigi 302234228Sluigi#define netmap_buf_offset(v) \ 303241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 304241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal + \ 305241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v))) 306234228Sluigi 307234228Sluigi 308241719Sluigi/* 309241719Sluigi * report the index, and use start position as a hint, 310241719Sluigi * otherwise buffer allocation becomes terribly expensive. 311241719Sluigi */ 312234228Sluigistatic void * 313241719Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index) 314234228Sluigi{ 315234228Sluigi uint32_t i = 0; /* index in the bitmap */ 316234228Sluigi uint32_t mask, j; /* slot counter */ 317234228Sluigi void *vaddr = NULL; 318234228Sluigi 319234228Sluigi if (len > p->_objsize) { 320234228Sluigi D("%s request size %d too large", p->name, len); 321234228Sluigi // XXX cannot reduce the size 322234228Sluigi return NULL; 323234228Sluigi } 324234228Sluigi 325234228Sluigi if (p->objfree == 0) { 326234228Sluigi D("%s allocator: run out of memory", p->name); 327234228Sluigi return NULL; 328234228Sluigi } 329241719Sluigi if (start) 330241719Sluigi i = *start; 331234228Sluigi 332241719Sluigi /* termination is guaranteed by p->free, but better check bounds on i */ 333241719Sluigi while (vaddr == NULL && i < p->bitmap_slots) { 334234228Sluigi uint32_t cur = p->bitmap[i]; 335234228Sluigi if (cur == 0) { /* bitmask is fully used */ 336234228Sluigi i++; 337234228Sluigi continue; 338234228Sluigi } 339234228Sluigi /* locate a slot */ 340234228Sluigi for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 341234228Sluigi ; 342234228Sluigi 343234228Sluigi p->bitmap[i] &= ~mask; /* mark object as in use */ 344234228Sluigi p->objfree--; 345234228Sluigi 346234228Sluigi vaddr = p->lut[i * 32 + j].vaddr; 347241719Sluigi if (index) 348241719Sluigi *index = i * 32 + j; 349234228Sluigi } 350234228Sluigi ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 351234228Sluigi 352241719Sluigi if (start) 353241719Sluigi *start = i; 354234228Sluigi return vaddr; 355234228Sluigi} 356234228Sluigi 357234228Sluigi 358234228Sluigi/* 359234228Sluigi * free by index, not by address 360234228Sluigi */ 361234228Sluigistatic void 362234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 363234228Sluigi{ 364234228Sluigi if (j >= p->objtotal) { 365234228Sluigi D("invalid index %u, max %u", j, p->objtotal); 366234228Sluigi return; 367234228Sluigi } 368234228Sluigi p->bitmap[j / 32] |= (1 << (j % 32)); 369234228Sluigi p->objfree++; 370234228Sluigi return; 371234228Sluigi} 372234228Sluigi 373234228Sluigistatic void 374234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 375234228Sluigi{ 376234228Sluigi int i, j, n = p->_memtotal / p->_clustsize; 377234228Sluigi 378234228Sluigi for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 379234228Sluigi void *base = p->lut[i * p->clustentries].vaddr; 380234228Sluigi ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 381234228Sluigi 382234228Sluigi /* Given address, is out of the scope of the current cluster.*/ 383234228Sluigi if (vaddr < base || relofs > p->_clustsize) 384234228Sluigi continue; 385234228Sluigi 386234228Sluigi j = j + relofs / p->_objsize; 387234228Sluigi KASSERT(j != 0, ("Cannot free object 0")); 388234228Sluigi netmap_obj_free(p, j); 389234228Sluigi return; 390234228Sluigi } 391234228Sluigi ND("address %p is not contained inside any cluster (%s)", 392234228Sluigi vaddr, p->name); 393234228Sluigi} 394234228Sluigi 395241719Sluigi#define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL) 396241719Sluigi#define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v)) 397241719Sluigi#define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL) 398241719Sluigi#define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v)) 399241719Sluigi#define netmap_buf_malloc(_pos, _index) \ 400241719Sluigi netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index) 401234228Sluigi 402234228Sluigi 403234228Sluigi/* Return the index associated to the given packet buffer */ 404234228Sluigi#define netmap_buf_index(v) \ 405241719Sluigi (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize) 406234228Sluigi 407234228Sluigi 408241719Sluigi/* Return nonzero on error */ 409241719Sluigistatic int 410238912Sluiginetmap_new_bufs(struct netmap_if *nifp, 411234228Sluigi struct netmap_slot *slot, u_int n) 412234228Sluigi{ 413241719Sluigi struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 414241719Sluigi int i = 0; /* slot counter */ 415241719Sluigi uint32_t pos = 0; /* slot in p->bitmap */ 416241719Sluigi uint32_t index = 0; /* buffer index */ 417234228Sluigi 418238912Sluigi (void)nifp; /* UNUSED */ 419234228Sluigi for (i = 0; i < n; i++) { 420241719Sluigi void *vaddr = netmap_buf_malloc(&pos, &index); 421234228Sluigi if (vaddr == NULL) { 422234228Sluigi D("unable to locate empty packet buffer"); 423234228Sluigi goto cleanup; 424234228Sluigi } 425241719Sluigi slot[i].buf_idx = index; 426234228Sluigi slot[i].len = p->_objsize; 427241719Sluigi /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 428241719Sluigi * in the NIC ring. This is a hack that hides missing 429241719Sluigi * initializations in the drivers, and should go away. 430241719Sluigi */ 431241719Sluigi slot[i].flags = NS_BUF_CHANGED; 432234228Sluigi } 433234228Sluigi 434241719Sluigi ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 435241719Sluigi return (0); 436234228Sluigi 437234228Sluigicleanup: 438241643Semaste while (i > 0) { 439241643Semaste i--; 440241719Sluigi netmap_obj_free(p, slot[i].buf_idx); 441234228Sluigi } 442241719Sluigi bzero(slot, n * sizeof(slot[0])); 443241719Sluigi return (ENOMEM); 444234228Sluigi} 445234228Sluigi 446234228Sluigi 447234228Sluigistatic void 448234228Sluiginetmap_free_buf(struct netmap_if *nifp, uint32_t i) 449234228Sluigi{ 450241719Sluigi struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 451241719Sluigi 452234228Sluigi if (i < 2 || i >= p->objtotal) { 453234228Sluigi D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 454234228Sluigi return; 455234228Sluigi } 456241719Sluigi netmap_obj_free(p, i); 457234228Sluigi} 458234228Sluigi 459234228Sluigistatic void 460241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p) 461234228Sluigi{ 462234228Sluigi if (p == NULL) 463234228Sluigi return; 464234228Sluigi if (p->bitmap) 465234228Sluigi free(p->bitmap, M_NETMAP); 466241719Sluigi p->bitmap = NULL; 467234228Sluigi if (p->lut) { 468234228Sluigi int i; 469234228Sluigi for (i = 0; i < p->objtotal; i += p->clustentries) { 470234228Sluigi if (p->lut[i].vaddr) 471234228Sluigi contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 472234228Sluigi } 473234228Sluigi bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 474241719Sluigi#ifdef linux 475241719Sluigi vfree(p->lut); 476241719Sluigi#else 477234228Sluigi free(p->lut, M_NETMAP); 478241719Sluigi#endif 479234228Sluigi } 480241719Sluigi p->lut = NULL; 481234228Sluigi} 482234228Sluigi 483234228Sluigi/* 484241719Sluigi * Free all resources related to an allocator. 485241719Sluigi */ 486241719Sluigistatic void 487241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p) 488241719Sluigi{ 489241719Sluigi if (p == NULL) 490241719Sluigi return; 491241719Sluigi netmap_reset_obj_allocator(p); 492241719Sluigi} 493241719Sluigi 494241719Sluigi/* 495234228Sluigi * We receive a request for objtotal objects, of size objsize each. 496234228Sluigi * Internally we may round up both numbers, as we allocate objects 497234228Sluigi * in small clusters multiple of the page size. 498234228Sluigi * In the allocator we don't need to store the objsize, 499234228Sluigi * but we do need to keep track of objtotal' and clustentries, 500234228Sluigi * as they are needed when freeing memory. 501234228Sluigi * 502234228Sluigi * XXX note -- userspace needs the buffers to be contiguous, 503234228Sluigi * so we cannot afford gaps at the end of a cluster. 504234228Sluigi */ 505241719Sluigi 506241719Sluigi 507241719Sluigi/* call with NMA_LOCK held */ 508241719Sluigistatic int 509241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 510234228Sluigi{ 511234228Sluigi int i, n; 512234228Sluigi u_int clustsize; /* the cluster size, multiple of page size */ 513234228Sluigi u_int clustentries; /* how many objects per entry */ 514234228Sluigi 515234228Sluigi#define MAX_CLUSTSIZE (1<<17) 516234228Sluigi#define LINE_ROUND 64 517234228Sluigi if (objsize >= MAX_CLUSTSIZE) { 518234228Sluigi /* we could do it but there is no point */ 519234228Sluigi D("unsupported allocation for %d bytes", objsize); 520241719Sluigi goto error; 521234228Sluigi } 522234228Sluigi /* make sure objsize is a multiple of LINE_ROUND */ 523234228Sluigi i = (objsize & (LINE_ROUND - 1)); 524234228Sluigi if (i) { 525234228Sluigi D("XXX aligning object by %d bytes", LINE_ROUND - i); 526234228Sluigi objsize += LINE_ROUND - i; 527234228Sluigi } 528241719Sluigi if (objsize < p->objminsize || objsize > p->objmaxsize) { 529241719Sluigi D("requested objsize %d out of range [%d, %d]", 530241719Sluigi objsize, p->objminsize, p->objmaxsize); 531241719Sluigi goto error; 532241719Sluigi } 533241719Sluigi if (objtotal < p->nummin || objtotal > p->nummax) { 534241719Sluigi D("requested objtotal %d out of range [%d, %d]", 535241719Sluigi objtotal, p->nummin, p->nummax); 536241719Sluigi goto error; 537241719Sluigi } 538234228Sluigi /* 539234228Sluigi * Compute number of objects using a brute-force approach: 540234228Sluigi * given a max cluster size, 541234228Sluigi * we try to fill it with objects keeping track of the 542234228Sluigi * wasted space to the next page boundary. 543234228Sluigi */ 544234228Sluigi for (clustentries = 0, i = 1;; i++) { 545234228Sluigi u_int delta, used = i * objsize; 546234228Sluigi if (used > MAX_CLUSTSIZE) 547234228Sluigi break; 548234228Sluigi delta = used % PAGE_SIZE; 549234228Sluigi if (delta == 0) { // exact solution 550234228Sluigi clustentries = i; 551234228Sluigi break; 552234228Sluigi } 553234228Sluigi if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 554234228Sluigi clustentries = i; 555234228Sluigi } 556234228Sluigi // D("XXX --- ouch, delta %d (bad for buffers)", delta); 557234228Sluigi /* compute clustsize and round to the next page */ 558234228Sluigi clustsize = clustentries * objsize; 559234228Sluigi i = (clustsize & (PAGE_SIZE - 1)); 560234228Sluigi if (i) 561234228Sluigi clustsize += PAGE_SIZE - i; 562234228Sluigi D("objsize %d clustsize %d objects %d", 563234228Sluigi objsize, clustsize, clustentries); 564234228Sluigi 565234228Sluigi /* 566234228Sluigi * The number of clusters is n = ceil(objtotal/clustentries) 567234228Sluigi * objtotal' = n * clustentries 568234228Sluigi */ 569234228Sluigi p->clustentries = clustentries; 570234228Sluigi p->_clustsize = clustsize; 571234228Sluigi n = (objtotal + clustentries - 1) / clustentries; 572234228Sluigi p->_numclusters = n; 573234228Sluigi p->objtotal = n * clustentries; 574234228Sluigi p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 575241719Sluigi p->_memtotal = p->_numclusters * p->_clustsize; 576234228Sluigi p->_objsize = objsize; 577234228Sluigi 578241719Sluigi return 0; 579241719Sluigi 580241719Sluigierror: 581241719Sluigi p->_objsize = objsize; 582241719Sluigi p->objtotal = objtotal; 583241719Sluigi 584241719Sluigi return EINVAL; 585241719Sluigi} 586241719Sluigi 587241719Sluigi 588241719Sluigi/* call with NMA_LOCK held */ 589241719Sluigistatic int 590241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p) 591241719Sluigi{ 592241719Sluigi int i, n; 593241719Sluigi 594241719Sluigi n = sizeof(struct lut_entry) * p->objtotal; 595241719Sluigi#ifdef linux 596241719Sluigi p->lut = vmalloc(n); 597241719Sluigi#else 598241719Sluigi p->lut = malloc(n, M_NETMAP, M_WAITOK | M_ZERO); 599241719Sluigi#endif 600234228Sluigi if (p->lut == NULL) { 601241719Sluigi D("Unable to create lookup table (%d bytes) for '%s'", n, p->name); 602234228Sluigi goto clean; 603234228Sluigi } 604234228Sluigi 605234228Sluigi /* Allocate the bitmap */ 606234228Sluigi n = (p->objtotal + 31) / 32; 607234228Sluigi p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO); 608234228Sluigi if (p->bitmap == NULL) { 609234228Sluigi D("Unable to create bitmap (%d entries) for allocator '%s'", n, 610241719Sluigi p->name); 611234228Sluigi goto clean; 612234228Sluigi } 613241719Sluigi p->bitmap_slots = n; 614234228Sluigi 615234228Sluigi /* 616234228Sluigi * Allocate clusters, init pointers and bitmap 617234228Sluigi */ 618234228Sluigi for (i = 0; i < p->objtotal;) { 619241719Sluigi int lim = i + p->clustentries; 620234228Sluigi char *clust; 621234228Sluigi 622241719Sluigi clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO, 623234228Sluigi 0, -1UL, PAGE_SIZE, 0); 624234228Sluigi if (clust == NULL) { 625234228Sluigi /* 626234228Sluigi * If we get here, there is a severe memory shortage, 627234228Sluigi * so halve the allocated memory to reclaim some. 628241719Sluigi * XXX check boundaries 629234228Sluigi */ 630234228Sluigi D("Unable to create cluster at %d for '%s' allocator", 631241719Sluigi i, p->name); 632234228Sluigi lim = i / 2; 633241719Sluigi for (i--; i >= lim; i--) { 634234228Sluigi p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 635241719Sluigi if (i % p->clustentries == 0 && p->lut[i].vaddr) 636234228Sluigi contigfree(p->lut[i].vaddr, 637234228Sluigi p->_clustsize, M_NETMAP); 638234228Sluigi } 639234228Sluigi p->objtotal = i; 640234228Sluigi p->objfree = p->objtotal - 2; 641241719Sluigi p->_numclusters = i / p->clustentries; 642234228Sluigi p->_memtotal = p->_numclusters * p->_clustsize; 643234228Sluigi break; 644234228Sluigi } 645241719Sluigi for (; i < lim; i++, clust += p->_objsize) { 646234228Sluigi p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 647234228Sluigi p->lut[i].vaddr = clust; 648234228Sluigi p->lut[i].paddr = vtophys(clust); 649234228Sluigi } 650234228Sluigi } 651234228Sluigi p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 652234228Sluigi D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 653234228Sluigi p->_numclusters, p->_clustsize >> 10, 654241719Sluigi p->_memtotal >> 10, p->name); 655234228Sluigi 656241719Sluigi return 0; 657234228Sluigi 658234228Sluigiclean: 659241719Sluigi netmap_reset_obj_allocator(p); 660241719Sluigi return ENOMEM; 661234228Sluigi} 662234228Sluigi 663241719Sluigi/* call with lock held */ 664234228Sluigistatic int 665241719Sluiginetmap_memory_config_changed(void) 666234228Sluigi{ 667241719Sluigi int i; 668234228Sluigi 669241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 670241719Sluigi if (nm_mem.pools[i]._objsize != netmap_params[i].size || 671241719Sluigi nm_mem.pools[i].objtotal != netmap_params[i].num) 672241719Sluigi return 1; 673241719Sluigi } 674241719Sluigi return 0; 675241719Sluigi} 676234228Sluigi 677234228Sluigi 678241719Sluigi/* call with lock held */ 679241719Sluigistatic int 680241719Sluiginetmap_memory_config(void) 681241719Sluigi{ 682241719Sluigi int i; 683234228Sluigi 684234228Sluigi 685241719Sluigi if (!netmap_memory_config_changed()) 686241719Sluigi goto out; 687234228Sluigi 688241719Sluigi D("reconfiguring"); 689241719Sluigi 690241719Sluigi if (nm_mem.finalized) { 691241719Sluigi /* reset previous allocation */ 692241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 693241719Sluigi netmap_reset_obj_allocator(&nm_mem.pools[i]); 694241719Sluigi } 695241719Sluigi nm_mem.finalized = 0; 696241719Sluigi } 697241719Sluigi 698241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 699241719Sluigi nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i], 700241719Sluigi netmap_params[i].num, netmap_params[i].size); 701241719Sluigi if (nm_mem.lasterr) 702241719Sluigi goto out; 703241719Sluigi } 704241719Sluigi 705234228Sluigi D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 706241719Sluigi nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10, 707241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10, 708241719Sluigi nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20); 709234228Sluigi 710241719Sluigiout: 711241719Sluigi 712241719Sluigi return nm_mem.lasterr; 713241719Sluigi} 714241719Sluigi 715241719Sluigi/* call with lock held */ 716241719Sluigistatic int 717241719Sluiginetmap_memory_finalize(void) 718241719Sluigi{ 719241719Sluigi int i; 720241719Sluigi u_int totalsize = 0; 721241719Sluigi 722241719Sluigi nm_mem.refcount++; 723241719Sluigi if (nm_mem.refcount > 1) { 724241719Sluigi D("busy (refcount %d)", nm_mem.refcount); 725241719Sluigi goto out; 726234228Sluigi } 727241719Sluigi 728241719Sluigi /* update configuration if changed */ 729241719Sluigi if (netmap_memory_config()) 730241719Sluigi goto out; 731241719Sluigi 732241719Sluigi if (nm_mem.finalized) { 733241719Sluigi /* may happen if config is not changed */ 734241719Sluigi ND("nothing to do"); 735241719Sluigi goto out; 736241719Sluigi } 737241719Sluigi 738241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 739241719Sluigi nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]); 740241719Sluigi if (nm_mem.lasterr) 741241719Sluigi goto cleanup; 742241719Sluigi totalsize += nm_mem.pools[i]._memtotal; 743241719Sluigi } 744241719Sluigi nm_mem.nm_totalsize = totalsize; 745241719Sluigi 746241719Sluigi /* backward compatibility */ 747241719Sluigi netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize; 748241719Sluigi netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal; 749241719Sluigi 750241719Sluigi netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut; 751241719Sluigi netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr; 752241719Sluigi 753241719Sluigi nm_mem.finalized = 1; 754241719Sluigi nm_mem.lasterr = 0; 755241719Sluigi 756241719Sluigi /* make sysctl values match actual values in the pools */ 757241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 758241719Sluigi netmap_params[i].size = nm_mem.pools[i]._objsize; 759241719Sluigi netmap_params[i].num = nm_mem.pools[i].objtotal; 760241719Sluigi } 761241719Sluigi 762241719Sluigiout: 763241719Sluigi if (nm_mem.lasterr) 764241719Sluigi nm_mem.refcount--; 765241719Sluigi 766241719Sluigi return nm_mem.lasterr; 767241719Sluigi 768241719Sluigicleanup: 769241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 770241719Sluigi netmap_reset_obj_allocator(&nm_mem.pools[i]); 771241719Sluigi } 772241719Sluigi nm_mem.refcount--; 773241719Sluigi 774241719Sluigi return nm_mem.lasterr; 775234228Sluigi} 776234228Sluigi 777241719Sluigistatic int 778241719Sluiginetmap_memory_init(void) 779241719Sluigi{ 780241719Sluigi NMA_LOCK_INIT(); 781241719Sluigi return (0); 782241719Sluigi} 783234228Sluigi 784234228Sluigistatic void 785234228Sluiginetmap_memory_fini(void) 786234228Sluigi{ 787241719Sluigi int i; 788241719Sluigi 789241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 790241719Sluigi netmap_destroy_obj_allocator(&nm_mem.pools[i]); 791241719Sluigi } 792241719Sluigi NMA_LOCK_DESTROY(); 793234228Sluigi} 794234228Sluigi 795241719Sluigistatic void 796241719Sluiginetmap_free_rings(struct netmap_adapter *na) 797241719Sluigi{ 798241719Sluigi int i; 799241719Sluigi for (i = 0; i < na->num_tx_rings + 1; i++) { 800241719Sluigi netmap_ring_free(na->tx_rings[i].ring); 801241719Sluigi na->tx_rings[i].ring = NULL; 802241719Sluigi } 803241719Sluigi for (i = 0; i < na->num_rx_rings + 1; i++) { 804241719Sluigi netmap_ring_free(na->rx_rings[i].ring); 805241719Sluigi na->rx_rings[i].ring = NULL; 806241719Sluigi } 807241719Sluigi} 808234228Sluigi 809234228Sluigi 810241719Sluigi 811241719Sluigi/* call with NMA_LOCK held */ 812234228Sluigistatic void * 813234228Sluiginetmap_if_new(const char *ifname, struct netmap_adapter *na) 814234228Sluigi{ 815234228Sluigi struct netmap_if *nifp; 816234228Sluigi struct netmap_ring *ring; 817234228Sluigi ssize_t base; /* handy for relative offsets between rings and nifp */ 818234228Sluigi u_int i, len, ndesc; 819234228Sluigi u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 820234228Sluigi u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 821234228Sluigi struct netmap_kring *kring; 822234228Sluigi 823234228Sluigi /* 824234228Sluigi * the descriptor is followed inline by an array of offsets 825234228Sluigi * to the tx and rx rings in the shared memory region. 826234228Sluigi */ 827234228Sluigi len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 828234228Sluigi nifp = netmap_if_malloc(len); 829234228Sluigi if (nifp == NULL) { 830234228Sluigi return NULL; 831234228Sluigi } 832234228Sluigi 833234228Sluigi /* initialize base fields -- override const */ 834234228Sluigi *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 835234228Sluigi *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 836234228Sluigi strncpy(nifp->ni_name, ifname, IFNAMSIZ); 837234228Sluigi 838234228Sluigi (na->refcount)++; /* XXX atomic ? we are under lock */ 839234228Sluigi if (na->refcount > 1) { /* already setup, we are done */ 840234228Sluigi goto final; 841234228Sluigi } 842234228Sluigi 843234228Sluigi /* 844234228Sluigi * First instance, allocate netmap rings and buffers for this card 845234228Sluigi * The rings are contiguous, but have variable size. 846234228Sluigi */ 847234228Sluigi for (i = 0; i < ntx; i++) { /* Transmit rings */ 848234228Sluigi kring = &na->tx_rings[i]; 849234228Sluigi ndesc = na->num_tx_desc; 850234228Sluigi bzero(kring, sizeof(*kring)); 851234228Sluigi len = sizeof(struct netmap_ring) + 852234228Sluigi ndesc * sizeof(struct netmap_slot); 853234228Sluigi ring = netmap_ring_malloc(len); 854234228Sluigi if (ring == NULL) { 855234228Sluigi D("Cannot allocate tx_ring[%d] for %s", i, ifname); 856234228Sluigi goto cleanup; 857234228Sluigi } 858234228Sluigi ND("txring[%d] at %p ofs %d", i, ring); 859234228Sluigi kring->na = na; 860234228Sluigi kring->ring = ring; 861234228Sluigi *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 862234228Sluigi *(ssize_t *)(uintptr_t)&ring->buf_ofs = 863241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 864241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 865234228Sluigi netmap_ring_offset(ring); 866234228Sluigi 867234228Sluigi /* 868234228Sluigi * IMPORTANT: 869234228Sluigi * Always keep one slot empty, so we can detect new 870234228Sluigi * transmissions comparing cur and nr_hwcur (they are 871234228Sluigi * the same only if there are no new transmissions). 872234228Sluigi */ 873234228Sluigi ring->avail = kring->nr_hwavail = ndesc - 1; 874234228Sluigi ring->cur = kring->nr_hwcur = 0; 875234228Sluigi *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 876234228Sluigi ND("initializing slots for txring[%d]", i); 877241719Sluigi if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 878241719Sluigi D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname); 879241719Sluigi goto cleanup; 880241719Sluigi } 881234228Sluigi } 882234228Sluigi 883234228Sluigi for (i = 0; i < nrx; i++) { /* Receive rings */ 884234228Sluigi kring = &na->rx_rings[i]; 885234228Sluigi ndesc = na->num_rx_desc; 886234228Sluigi bzero(kring, sizeof(*kring)); 887234228Sluigi len = sizeof(struct netmap_ring) + 888234228Sluigi ndesc * sizeof(struct netmap_slot); 889234228Sluigi ring = netmap_ring_malloc(len); 890234228Sluigi if (ring == NULL) { 891234228Sluigi D("Cannot allocate rx_ring[%d] for %s", i, ifname); 892234228Sluigi goto cleanup; 893234228Sluigi } 894234228Sluigi ND("rxring[%d] at %p ofs %d", i, ring); 895234228Sluigi 896234228Sluigi kring->na = na; 897234228Sluigi kring->ring = ring; 898234228Sluigi *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 899234228Sluigi *(ssize_t *)(uintptr_t)&ring->buf_ofs = 900241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 901241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 902234228Sluigi netmap_ring_offset(ring); 903234228Sluigi 904234228Sluigi ring->cur = kring->nr_hwcur = 0; 905234228Sluigi ring->avail = kring->nr_hwavail = 0; /* empty */ 906234228Sluigi *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 907234228Sluigi ND("initializing slots for rxring[%d]", i); 908241719Sluigi if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 909241719Sluigi D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname); 910241719Sluigi goto cleanup; 911241719Sluigi } 912234228Sluigi } 913234228Sluigi#ifdef linux 914234228Sluigi // XXX initialize the selrecord structs. 915234228Sluigi for (i = 0; i < ntx; i++) 916238812Sluigi init_waitqueue_head(&na->tx_rings[i].si); 917238812Sluigi for (i = 0; i < nrx; i++) 918234228Sluigi init_waitqueue_head(&na->rx_rings[i].si); 919238812Sluigi init_waitqueue_head(&na->tx_si); 920234228Sluigi init_waitqueue_head(&na->rx_si); 921234228Sluigi#endif 922234228Sluigifinal: 923234228Sluigi /* 924234228Sluigi * fill the slots for the rx and tx rings. They contain the offset 925234228Sluigi * between the ring and nifp, so the information is usable in 926234228Sluigi * userspace to reach the ring from the nifp. 927234228Sluigi */ 928234228Sluigi base = netmap_if_offset(nifp); 929234228Sluigi for (i = 0; i < ntx; i++) { 930234228Sluigi *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 931234228Sluigi netmap_ring_offset(na->tx_rings[i].ring) - base; 932234228Sluigi } 933234228Sluigi for (i = 0; i < nrx; i++) { 934234228Sluigi *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 935234228Sluigi netmap_ring_offset(na->rx_rings[i].ring) - base; 936234228Sluigi } 937234228Sluigi return (nifp); 938234228Sluigicleanup: 939241719Sluigi netmap_free_rings(na); 940241719Sluigi netmap_if_free(nifp); 941241719Sluigi (na->refcount)--; 942234228Sluigi return NULL; 943234228Sluigi} 944234228Sluigi 945241719Sluigi/* call with NMA_LOCK held */ 946234228Sluigistatic void 947241719Sluiginetmap_memory_deref(void) 948234228Sluigi{ 949241719Sluigi nm_mem.refcount--; 950241719Sluigi D("refcount = %d", nm_mem.refcount); 951234228Sluigi} 952