netmap_mem2.c revision 250184
1234228Sluigi/* 2249659Sluigi * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3234228Sluigi * 4234228Sluigi * Redistribution and use in source and binary forms, with or without 5234228Sluigi * modification, are permitted provided that the following conditions 6234228Sluigi * are met: 7234228Sluigi * 1. Redistributions of source code must retain the above copyright 8234228Sluigi * notice, this list of conditions and the following disclaimer. 9234228Sluigi * 2. Redistributions in binary form must reproduce the above copyright 10234228Sluigi * notice, this list of conditions and the following disclaimer in the 11234228Sluigi * documentation and/or other materials provided with the distribution. 12234228Sluigi * 13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16234228Sluigi * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23234228Sluigi * SUCH DAMAGE. 24234228Sluigi */ 25234228Sluigi 26234228Sluigi/* 27234228Sluigi * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 250184 2013-05-02 16:01:04Z luigi $ 28234228Sluigi * 29241719Sluigi * (New) memory allocator for netmap 30234228Sluigi */ 31234228Sluigi 32234228Sluigi/* 33249659Sluigi * This allocator creates three memory pools: 34241719Sluigi * nm_if_pool for the struct netmap_if 35241719Sluigi * nm_ring_pool for the struct netmap_ring 36241719Sluigi * nm_buf_pool for the packet buffers. 37234228Sluigi * 38249659Sluigi * that contain netmap objects. Each pool is made of a number of clusters, 39249659Sluigi * multiple of a page size, each containing an integer number of objects. 40249659Sluigi * The clusters are contiguous in user space but not in the kernel. 41249659Sluigi * Only nm_buf_pool needs to be dma-able, 42234228Sluigi * but for convenience use the same type of allocator for all. 43234228Sluigi * 44249659Sluigi * Once mapped, the three pools are exported to userspace 45234228Sluigi * as a contiguous block, starting from nm_if_pool. Each 46234228Sluigi * cluster (and pool) is an integral number of pages. 47234228Sluigi * [ . . . ][ . . . . . .][ . . . . . . . . . .] 48234228Sluigi * nm_if nm_ring nm_buf 49234228Sluigi * 50234228Sluigi * The userspace areas contain offsets of the objects in userspace. 51234228Sluigi * When (at init time) we write these offsets, we find out the index 52234228Sluigi * of the object, and from there locate the offset from the beginning 53234228Sluigi * of the region. 54234228Sluigi * 55241719Sluigi * The invididual allocators manage a pool of memory for objects of 56241719Sluigi * the same size. 57234228Sluigi * The pool is split into smaller clusters, whose size is a 58234228Sluigi * multiple of the page size. The cluster size is chosen 59234228Sluigi * to minimize the waste for a given max cluster size 60249659Sluigi * (we do it by brute force, as we have relatively few objects 61234228Sluigi * per cluster). 62234228Sluigi * 63241719Sluigi * Objects are aligned to the cache line (64 bytes) rounding up object 64241719Sluigi * sizes when needed. A bitmap contains the state of each object. 65241719Sluigi * Allocation scans the bitmap; this is done only on attach, so we are not 66234228Sluigi * too worried about performance 67234228Sluigi * 68241719Sluigi * For each allocator we can define (thorugh sysctl) the size and 69241719Sluigi * number of each object. Memory is allocated at the first use of a 70241719Sluigi * netmap file descriptor, and can be freed when all such descriptors 71241719Sluigi * have been released (including unmapping the memory). 72241719Sluigi * If memory is scarce, the system tries to get as much as possible 73241719Sluigi * and the sysctl values reflect the actual allocation. 74241719Sluigi * Together with desired values, the sysctl export also absolute 75241719Sluigi * min and maximum values that cannot be overridden. 76234228Sluigi * 77241719Sluigi * struct netmap_if: 78241719Sluigi * variable size, max 16 bytes per ring pair plus some fixed amount. 79241719Sluigi * 1024 bytes should be large enough in practice. 80241719Sluigi * 81241719Sluigi * In the worst case we have one netmap_if per ring in the system. 82241719Sluigi * 83241719Sluigi * struct netmap_ring 84249659Sluigi * variable size, 8 byte per slot plus some fixed amount. 85241719Sluigi * Rings can be large (e.g. 4k slots, or >32Kbytes). 86241719Sluigi * We default to 36 KB (9 pages), and a few hundred rings. 87241719Sluigi * 88241719Sluigi * struct netmap_buffer 89241719Sluigi * The more the better, both because fast interfaces tend to have 90241719Sluigi * many slots, and because we may want to use buffers to store 91241719Sluigi * packets in userspace avoiding copies. 92241719Sluigi * Must contain a full frame (eg 1518, or more for vlans, jumbo 93241719Sluigi * frames etc.) plus be nicely aligned, plus some NICs restrict 94241719Sluigi * the size to multiple of 1K or so. Default to 2K 95234228Sluigi */ 96234228Sluigi 97241719Sluigi#define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 98234228Sluigi 99241719Sluigi#ifdef linux 100250054Sluigi// XXX a mtx would suffice here 20130415 lr 101250054Sluigi// #define NMA_LOCK_T safe_spinlock_t 102241719Sluigi#define NMA_LOCK_T struct semaphore 103241719Sluigi#define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1) 104250184Sluigi#define NMA_LOCK_DESTROY() 105241719Sluigi#define NMA_LOCK() down(&nm_mem.nm_mtx) 106241719Sluigi#define NMA_UNLOCK() up(&nm_mem.nm_mtx) 107241719Sluigi#else /* !linux */ 108241719Sluigi#define NMA_LOCK_T struct mtx 109241719Sluigi#define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF) 110241719Sluigi#define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx) 111241719Sluigi#define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx) 112241719Sluigi#define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx) 113241719Sluigi#endif /* linux */ 114234228Sluigi 115241719Sluigienum { 116241719Sluigi NETMAP_IF_POOL = 0, 117241719Sluigi NETMAP_RING_POOL, 118241719Sluigi NETMAP_BUF_POOL, 119241719Sluigi NETMAP_POOLS_NR 120241719Sluigi}; 121241719Sluigi 122241719Sluigi 123241719Sluigistruct netmap_obj_params { 124241719Sluigi u_int size; 125241719Sluigi u_int num; 126241719Sluigi}; 127241719Sluigi 128241719Sluigi 129241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 130241719Sluigi [NETMAP_IF_POOL] = { 131241719Sluigi .size = 1024, 132241719Sluigi .num = 100, 133241719Sluigi }, 134241719Sluigi [NETMAP_RING_POOL] = { 135241719Sluigi .size = 9*PAGE_SIZE, 136241719Sluigi .num = 200, 137241719Sluigi }, 138241719Sluigi [NETMAP_BUF_POOL] = { 139241719Sluigi .size = 2048, 140241719Sluigi .num = NETMAP_BUF_MAX_NUM, 141241719Sluigi }, 142241719Sluigi}; 143241719Sluigi 144241719Sluigi 145234228Sluigistruct netmap_obj_pool { 146234228Sluigi char name[16]; /* name of the allocator */ 147234228Sluigi u_int objtotal; /* actual total number of objects. */ 148234228Sluigi u_int objfree; /* number of free objects. */ 149234228Sluigi u_int clustentries; /* actual objects per cluster */ 150234228Sluigi 151241719Sluigi /* limits */ 152241719Sluigi u_int objminsize; /* minimum object size */ 153241719Sluigi u_int objmaxsize; /* maximum object size */ 154241719Sluigi u_int nummin; /* minimum number of objects */ 155241719Sluigi u_int nummax; /* maximum number of objects */ 156241719Sluigi 157234228Sluigi /* the total memory space is _numclusters*_clustsize */ 158234228Sluigi u_int _numclusters; /* how many clusters */ 159234228Sluigi u_int _clustsize; /* cluster size */ 160234228Sluigi u_int _objsize; /* actual object size */ 161234228Sluigi 162234228Sluigi u_int _memtotal; /* _numclusters*_clustsize */ 163234228Sluigi struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 164234228Sluigi uint32_t *bitmap; /* one bit per buffer, 1 means free */ 165241719Sluigi uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 166234228Sluigi}; 167234228Sluigi 168241719Sluigi 169234228Sluigistruct netmap_mem_d { 170241719Sluigi NMA_LOCK_T nm_mtx; /* protect the allocator */ 171234228Sluigi u_int nm_totalsize; /* shorthand */ 172234228Sluigi 173241719Sluigi int finalized; /* !=0 iff preallocation done */ 174241719Sluigi int lasterr; /* last error for curr config */ 175241719Sluigi int refcount; /* existing priv structures */ 176241719Sluigi /* the three allocators */ 177241719Sluigi struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 178234228Sluigi}; 179234228Sluigi 180249659Sluigi/* 181249659Sluigi * nm_mem is the memory allocator used for all physical interfaces 182249659Sluigi * running in netmap mode. 183249659Sluigi * Virtual (VALE) ports will have each its own allocator. 184249659Sluigi */ 185241719Sluigistatic struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 186241719Sluigi .pools = { 187241719Sluigi [NETMAP_IF_POOL] = { 188241719Sluigi .name = "netmap_if", 189241719Sluigi .objminsize = sizeof(struct netmap_if), 190241719Sluigi .objmaxsize = 4096, 191241719Sluigi .nummin = 10, /* don't be stingy */ 192241719Sluigi .nummax = 10000, /* XXX very large */ 193241719Sluigi }, 194241719Sluigi [NETMAP_RING_POOL] = { 195241719Sluigi .name = "netmap_ring", 196241719Sluigi .objminsize = sizeof(struct netmap_ring), 197241719Sluigi .objmaxsize = 32*PAGE_SIZE, 198241719Sluigi .nummin = 2, 199241719Sluigi .nummax = 1024, 200241719Sluigi }, 201241719Sluigi [NETMAP_BUF_POOL] = { 202241719Sluigi .name = "netmap_buf", 203241719Sluigi .objminsize = 64, 204241719Sluigi .objmaxsize = 65536, 205241719Sluigi .nummin = 4, 206241719Sluigi .nummax = 1000000, /* one million! */ 207241719Sluigi }, 208241719Sluigi }, 209241719Sluigi}; 210241719Sluigi 211249659Sluigi// XXX logically belongs to nm_mem 212234228Sluigistruct lut_entry *netmap_buffer_lut; /* exported */ 213234228Sluigi 214241719Sluigi/* memory allocator related sysctls */ 215234228Sluigi 216241719Sluigi#define STRINGIFY(x) #x 217241719Sluigi 218241719Sluigi#define DECLARE_SYSCTLS(id, name) \ 219241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 220241719Sluigi CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 221241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 222241719Sluigi CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 223241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 224241719Sluigi CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 225241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 226241719Sluigi CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 227241719Sluigi 228241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if); 229241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 230241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 231241719Sluigi 232234228Sluigi/* 233249659Sluigi * Convert a userspace offset to a physical address. 234249659Sluigi * XXX only called in the FreeBSD's netmap_mmap() 235249659Sluigi * because in linux we map everything at once. 236234228Sluigi * 237249659Sluigi * First, find the allocator that contains the requested offset, 238249659Sluigi * then locate the cluster through a lookup table. 239234228Sluigi */ 240234228Sluigistatic inline vm_paddr_t 241234228Sluiginetmap_ofstophys(vm_offset_t offset) 242234228Sluigi{ 243234228Sluigi int i; 244234228Sluigi vm_offset_t o = offset; 245241719Sluigi struct netmap_obj_pool *p = nm_mem.pools; 246234228Sluigi 247241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) { 248241719Sluigi if (offset >= p[i]._memtotal) 249234228Sluigi continue; 250249659Sluigi // now lookup the cluster's address 251241719Sluigi return p[i].lut[offset / p[i]._objsize].paddr + 252241719Sluigi offset % p[i]._objsize; 253234228Sluigi } 254241719Sluigi /* this is only in case of errors */ 255234290Sluigi D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 256241719Sluigi p[NETMAP_IF_POOL]._memtotal, 257241719Sluigi p[NETMAP_IF_POOL]._memtotal 258241719Sluigi + p[NETMAP_RING_POOL]._memtotal, 259241719Sluigi p[NETMAP_IF_POOL]._memtotal 260241719Sluigi + p[NETMAP_RING_POOL]._memtotal 261241719Sluigi + p[NETMAP_BUF_POOL]._memtotal); 262234228Sluigi return 0; // XXX bad address 263234228Sluigi} 264234228Sluigi 265234228Sluigi/* 266234228Sluigi * we store objects by kernel address, need to find the offset 267234228Sluigi * within the pool to export the value to userspace. 268234228Sluigi * Algorithm: scan until we find the cluster, then add the 269234228Sluigi * actual offset in the cluster 270234228Sluigi */ 271234242Sluigistatic ssize_t 272234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 273234228Sluigi{ 274234228Sluigi int i, k = p->clustentries, n = p->objtotal; 275234228Sluigi ssize_t ofs = 0; 276234228Sluigi 277234228Sluigi for (i = 0; i < n; i += k, ofs += p->_clustsize) { 278234228Sluigi const char *base = p->lut[i].vaddr; 279234228Sluigi ssize_t relofs = (const char *) vaddr - base; 280234228Sluigi 281249504Sluigi if (relofs < 0 || relofs >= p->_clustsize) 282234228Sluigi continue; 283234228Sluigi 284234228Sluigi ofs = ofs + relofs; 285234228Sluigi ND("%s: return offset %d (cluster %d) for pointer %p", 286234228Sluigi p->name, ofs, i, vaddr); 287234228Sluigi return ofs; 288234228Sluigi } 289234228Sluigi D("address %p is not contained inside any cluster (%s)", 290234228Sluigi vaddr, p->name); 291234228Sluigi return 0; /* An error occurred */ 292234228Sluigi} 293234228Sluigi 294234228Sluigi/* Helper functions which convert virtual addresses to offsets */ 295234228Sluigi#define netmap_if_offset(v) \ 296241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v)) 297234228Sluigi 298234228Sluigi#define netmap_ring_offset(v) \ 299249659Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 300241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v))) 301234228Sluigi 302234228Sluigi#define netmap_buf_offset(v) \ 303249659Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 304249659Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal + \ 305241719Sluigi netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v))) 306234228Sluigi 307234228Sluigi 308241719Sluigi/* 309241719Sluigi * report the index, and use start position as a hint, 310241719Sluigi * otherwise buffer allocation becomes terribly expensive. 311241719Sluigi */ 312234228Sluigistatic void * 313241719Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index) 314234228Sluigi{ 315234228Sluigi uint32_t i = 0; /* index in the bitmap */ 316234228Sluigi uint32_t mask, j; /* slot counter */ 317234228Sluigi void *vaddr = NULL; 318234228Sluigi 319234228Sluigi if (len > p->_objsize) { 320234228Sluigi D("%s request size %d too large", p->name, len); 321234228Sluigi // XXX cannot reduce the size 322234228Sluigi return NULL; 323234228Sluigi } 324234228Sluigi 325234228Sluigi if (p->objfree == 0) { 326234228Sluigi D("%s allocator: run out of memory", p->name); 327234228Sluigi return NULL; 328234228Sluigi } 329241719Sluigi if (start) 330241719Sluigi i = *start; 331234228Sluigi 332241719Sluigi /* termination is guaranteed by p->free, but better check bounds on i */ 333241719Sluigi while (vaddr == NULL && i < p->bitmap_slots) { 334234228Sluigi uint32_t cur = p->bitmap[i]; 335234228Sluigi if (cur == 0) { /* bitmask is fully used */ 336234228Sluigi i++; 337234228Sluigi continue; 338234228Sluigi } 339234228Sluigi /* locate a slot */ 340234228Sluigi for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 341234228Sluigi ; 342234228Sluigi 343234228Sluigi p->bitmap[i] &= ~mask; /* mark object as in use */ 344234228Sluigi p->objfree--; 345234228Sluigi 346234228Sluigi vaddr = p->lut[i * 32 + j].vaddr; 347241719Sluigi if (index) 348241719Sluigi *index = i * 32 + j; 349234228Sluigi } 350234228Sluigi ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 351234228Sluigi 352241719Sluigi if (start) 353241719Sluigi *start = i; 354234228Sluigi return vaddr; 355234228Sluigi} 356234228Sluigi 357234228Sluigi 358234228Sluigi/* 359249659Sluigi * free by index, not by address. This is slow, but is only used 360249659Sluigi * for a small number of objects (rings, nifp) 361234228Sluigi */ 362234228Sluigistatic void 363234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 364234228Sluigi{ 365234228Sluigi if (j >= p->objtotal) { 366234228Sluigi D("invalid index %u, max %u", j, p->objtotal); 367234228Sluigi return; 368234228Sluigi } 369234228Sluigi p->bitmap[j / 32] |= (1 << (j % 32)); 370234228Sluigi p->objfree++; 371234228Sluigi return; 372234228Sluigi} 373234228Sluigi 374234228Sluigistatic void 375234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 376234228Sluigi{ 377234228Sluigi int i, j, n = p->_memtotal / p->_clustsize; 378234228Sluigi 379234228Sluigi for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 380234228Sluigi void *base = p->lut[i * p->clustentries].vaddr; 381234228Sluigi ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 382234228Sluigi 383234228Sluigi /* Given address, is out of the scope of the current cluster.*/ 384234228Sluigi if (vaddr < base || relofs > p->_clustsize) 385234228Sluigi continue; 386234228Sluigi 387234228Sluigi j = j + relofs / p->_objsize; 388234228Sluigi KASSERT(j != 0, ("Cannot free object 0")); 389234228Sluigi netmap_obj_free(p, j); 390234228Sluigi return; 391234228Sluigi } 392245835Sluigi D("address %p is not contained inside any cluster (%s)", 393234228Sluigi vaddr, p->name); 394234228Sluigi} 395234228Sluigi 396241719Sluigi#define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL) 397241719Sluigi#define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v)) 398241719Sluigi#define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL) 399241719Sluigi#define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v)) 400241719Sluigi#define netmap_buf_malloc(_pos, _index) \ 401241719Sluigi netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index) 402234228Sluigi 403234228Sluigi 404234228Sluigi/* Return the index associated to the given packet buffer */ 405234228Sluigi#define netmap_buf_index(v) \ 406241719Sluigi (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize) 407234228Sluigi 408234228Sluigi 409241719Sluigi/* Return nonzero on error */ 410241719Sluigistatic int 411238912Sluiginetmap_new_bufs(struct netmap_if *nifp, 412234228Sluigi struct netmap_slot *slot, u_int n) 413234228Sluigi{ 414241719Sluigi struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 415241719Sluigi int i = 0; /* slot counter */ 416241719Sluigi uint32_t pos = 0; /* slot in p->bitmap */ 417241719Sluigi uint32_t index = 0; /* buffer index */ 418234228Sluigi 419238912Sluigi (void)nifp; /* UNUSED */ 420234228Sluigi for (i = 0; i < n; i++) { 421241719Sluigi void *vaddr = netmap_buf_malloc(&pos, &index); 422234228Sluigi if (vaddr == NULL) { 423234228Sluigi D("unable to locate empty packet buffer"); 424234228Sluigi goto cleanup; 425234228Sluigi } 426241719Sluigi slot[i].buf_idx = index; 427234228Sluigi slot[i].len = p->_objsize; 428241719Sluigi /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 429241719Sluigi * in the NIC ring. This is a hack that hides missing 430241719Sluigi * initializations in the drivers, and should go away. 431241719Sluigi */ 432249659Sluigi // slot[i].flags = NS_BUF_CHANGED; 433234228Sluigi } 434234228Sluigi 435241719Sluigi ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 436241719Sluigi return (0); 437234228Sluigi 438234228Sluigicleanup: 439241643Semaste while (i > 0) { 440241643Semaste i--; 441241719Sluigi netmap_obj_free(p, slot[i].buf_idx); 442234228Sluigi } 443241719Sluigi bzero(slot, n * sizeof(slot[0])); 444241719Sluigi return (ENOMEM); 445234228Sluigi} 446234228Sluigi 447234228Sluigi 448234228Sluigistatic void 449234228Sluiginetmap_free_buf(struct netmap_if *nifp, uint32_t i) 450234228Sluigi{ 451241719Sluigi struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 452241719Sluigi 453234228Sluigi if (i < 2 || i >= p->objtotal) { 454234228Sluigi D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 455234228Sluigi return; 456234228Sluigi } 457241719Sluigi netmap_obj_free(p, i); 458234228Sluigi} 459234228Sluigi 460234228Sluigistatic void 461241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p) 462234228Sluigi{ 463234228Sluigi if (p == NULL) 464234228Sluigi return; 465234228Sluigi if (p->bitmap) 466234228Sluigi free(p->bitmap, M_NETMAP); 467241719Sluigi p->bitmap = NULL; 468234228Sluigi if (p->lut) { 469234228Sluigi int i; 470234228Sluigi for (i = 0; i < p->objtotal; i += p->clustentries) { 471234228Sluigi if (p->lut[i].vaddr) 472234228Sluigi contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 473234228Sluigi } 474234228Sluigi bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 475241719Sluigi#ifdef linux 476241719Sluigi vfree(p->lut); 477241719Sluigi#else 478234228Sluigi free(p->lut, M_NETMAP); 479241719Sluigi#endif 480234228Sluigi } 481241719Sluigi p->lut = NULL; 482234228Sluigi} 483234228Sluigi 484234228Sluigi/* 485241719Sluigi * Free all resources related to an allocator. 486241719Sluigi */ 487241719Sluigistatic void 488241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p) 489241719Sluigi{ 490241719Sluigi if (p == NULL) 491241719Sluigi return; 492241719Sluigi netmap_reset_obj_allocator(p); 493241719Sluigi} 494241719Sluigi 495241719Sluigi/* 496234228Sluigi * We receive a request for objtotal objects, of size objsize each. 497234228Sluigi * Internally we may round up both numbers, as we allocate objects 498234228Sluigi * in small clusters multiple of the page size. 499234228Sluigi * In the allocator we don't need to store the objsize, 500234228Sluigi * but we do need to keep track of objtotal' and clustentries, 501234228Sluigi * as they are needed when freeing memory. 502234228Sluigi * 503234228Sluigi * XXX note -- userspace needs the buffers to be contiguous, 504234228Sluigi * so we cannot afford gaps at the end of a cluster. 505234228Sluigi */ 506241719Sluigi 507241719Sluigi 508241719Sluigi/* call with NMA_LOCK held */ 509241719Sluigistatic int 510241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 511234228Sluigi{ 512234228Sluigi int i, n; 513234228Sluigi u_int clustsize; /* the cluster size, multiple of page size */ 514234228Sluigi u_int clustentries; /* how many objects per entry */ 515234228Sluigi 516234228Sluigi#define MAX_CLUSTSIZE (1<<17) 517234228Sluigi#define LINE_ROUND 64 518234228Sluigi if (objsize >= MAX_CLUSTSIZE) { 519234228Sluigi /* we could do it but there is no point */ 520234228Sluigi D("unsupported allocation for %d bytes", objsize); 521241719Sluigi goto error; 522234228Sluigi } 523234228Sluigi /* make sure objsize is a multiple of LINE_ROUND */ 524234228Sluigi i = (objsize & (LINE_ROUND - 1)); 525234228Sluigi if (i) { 526234228Sluigi D("XXX aligning object by %d bytes", LINE_ROUND - i); 527234228Sluigi objsize += LINE_ROUND - i; 528234228Sluigi } 529241719Sluigi if (objsize < p->objminsize || objsize > p->objmaxsize) { 530250184Sluigi D("requested objsize %d out of range [%d, %d]", 531241719Sluigi objsize, p->objminsize, p->objmaxsize); 532241719Sluigi goto error; 533241719Sluigi } 534241719Sluigi if (objtotal < p->nummin || objtotal > p->nummax) { 535250184Sluigi D("requested objtotal %d out of range [%d, %d]", 536241719Sluigi objtotal, p->nummin, p->nummax); 537241719Sluigi goto error; 538241719Sluigi } 539234228Sluigi /* 540234228Sluigi * Compute number of objects using a brute-force approach: 541234228Sluigi * given a max cluster size, 542234228Sluigi * we try to fill it with objects keeping track of the 543234228Sluigi * wasted space to the next page boundary. 544234228Sluigi */ 545234228Sluigi for (clustentries = 0, i = 1;; i++) { 546234228Sluigi u_int delta, used = i * objsize; 547234228Sluigi if (used > MAX_CLUSTSIZE) 548234228Sluigi break; 549234228Sluigi delta = used % PAGE_SIZE; 550234228Sluigi if (delta == 0) { // exact solution 551234228Sluigi clustentries = i; 552234228Sluigi break; 553234228Sluigi } 554234228Sluigi if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 555234228Sluigi clustentries = i; 556234228Sluigi } 557234228Sluigi // D("XXX --- ouch, delta %d (bad for buffers)", delta); 558234228Sluigi /* compute clustsize and round to the next page */ 559234228Sluigi clustsize = clustentries * objsize; 560234228Sluigi i = (clustsize & (PAGE_SIZE - 1)); 561234228Sluigi if (i) 562234228Sluigi clustsize += PAGE_SIZE - i; 563245835Sluigi if (netmap_verbose) 564245835Sluigi D("objsize %d clustsize %d objects %d", 565245835Sluigi objsize, clustsize, clustentries); 566234228Sluigi 567234228Sluigi /* 568234228Sluigi * The number of clusters is n = ceil(objtotal/clustentries) 569234228Sluigi * objtotal' = n * clustentries 570234228Sluigi */ 571234228Sluigi p->clustentries = clustentries; 572234228Sluigi p->_clustsize = clustsize; 573234228Sluigi n = (objtotal + clustentries - 1) / clustentries; 574234228Sluigi p->_numclusters = n; 575234228Sluigi p->objtotal = n * clustentries; 576234228Sluigi p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 577241719Sluigi p->_memtotal = p->_numclusters * p->_clustsize; 578234228Sluigi p->_objsize = objsize; 579234228Sluigi 580241719Sluigi return 0; 581241719Sluigi 582241719Sluigierror: 583241719Sluigi p->_objsize = objsize; 584241719Sluigi p->objtotal = objtotal; 585241719Sluigi 586241719Sluigi return EINVAL; 587241719Sluigi} 588241719Sluigi 589241719Sluigi 590241719Sluigi/* call with NMA_LOCK held */ 591241719Sluigistatic int 592241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p) 593241719Sluigi{ 594241719Sluigi int i, n; 595241719Sluigi 596241719Sluigi n = sizeof(struct lut_entry) * p->objtotal; 597241719Sluigi#ifdef linux 598241719Sluigi p->lut = vmalloc(n); 599241719Sluigi#else 600241750Semaste p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 601241719Sluigi#endif 602234228Sluigi if (p->lut == NULL) { 603241719Sluigi D("Unable to create lookup table (%d bytes) for '%s'", n, p->name); 604234228Sluigi goto clean; 605234228Sluigi } 606234228Sluigi 607234228Sluigi /* Allocate the bitmap */ 608234228Sluigi n = (p->objtotal + 31) / 32; 609241750Semaste p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 610234228Sluigi if (p->bitmap == NULL) { 611234228Sluigi D("Unable to create bitmap (%d entries) for allocator '%s'", n, 612241719Sluigi p->name); 613234228Sluigi goto clean; 614234228Sluigi } 615241719Sluigi p->bitmap_slots = n; 616234228Sluigi 617234228Sluigi /* 618234228Sluigi * Allocate clusters, init pointers and bitmap 619234228Sluigi */ 620234228Sluigi for (i = 0; i < p->objtotal;) { 621241719Sluigi int lim = i + p->clustentries; 622234228Sluigi char *clust; 623234228Sluigi 624241719Sluigi clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO, 625234228Sluigi 0, -1UL, PAGE_SIZE, 0); 626234228Sluigi if (clust == NULL) { 627234228Sluigi /* 628234228Sluigi * If we get here, there is a severe memory shortage, 629234228Sluigi * so halve the allocated memory to reclaim some. 630241719Sluigi * XXX check boundaries 631234228Sluigi */ 632234228Sluigi D("Unable to create cluster at %d for '%s' allocator", 633241719Sluigi i, p->name); 634234228Sluigi lim = i / 2; 635241719Sluigi for (i--; i >= lim; i--) { 636234228Sluigi p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 637241719Sluigi if (i % p->clustentries == 0 && p->lut[i].vaddr) 638234228Sluigi contigfree(p->lut[i].vaddr, 639234228Sluigi p->_clustsize, M_NETMAP); 640234228Sluigi } 641234228Sluigi p->objtotal = i; 642234228Sluigi p->objfree = p->objtotal - 2; 643241719Sluigi p->_numclusters = i / p->clustentries; 644234228Sluigi p->_memtotal = p->_numclusters * p->_clustsize; 645234228Sluigi break; 646234228Sluigi } 647241719Sluigi for (; i < lim; i++, clust += p->_objsize) { 648234228Sluigi p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 649234228Sluigi p->lut[i].vaddr = clust; 650234228Sluigi p->lut[i].paddr = vtophys(clust); 651234228Sluigi } 652234228Sluigi } 653234228Sluigi p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 654245835Sluigi if (netmap_verbose) 655245835Sluigi D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 656245835Sluigi p->_numclusters, p->_clustsize >> 10, 657245835Sluigi p->_memtotal >> 10, p->name); 658234228Sluigi 659241719Sluigi return 0; 660234228Sluigi 661234228Sluigiclean: 662241719Sluigi netmap_reset_obj_allocator(p); 663241719Sluigi return ENOMEM; 664234228Sluigi} 665234228Sluigi 666241719Sluigi/* call with lock held */ 667234228Sluigistatic int 668241719Sluiginetmap_memory_config_changed(void) 669234228Sluigi{ 670241719Sluigi int i; 671234228Sluigi 672241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 673241719Sluigi if (nm_mem.pools[i]._objsize != netmap_params[i].size || 674241719Sluigi nm_mem.pools[i].objtotal != netmap_params[i].num) 675241719Sluigi return 1; 676241719Sluigi } 677241719Sluigi return 0; 678241719Sluigi} 679234228Sluigi 680234228Sluigi 681241719Sluigi/* call with lock held */ 682241719Sluigistatic int 683241719Sluiginetmap_memory_config(void) 684241719Sluigi{ 685241719Sluigi int i; 686234228Sluigi 687241719Sluigi if (!netmap_memory_config_changed()) 688241719Sluigi goto out; 689234228Sluigi 690241719Sluigi D("reconfiguring"); 691241719Sluigi 692241719Sluigi if (nm_mem.finalized) { 693241719Sluigi /* reset previous allocation */ 694241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 695241719Sluigi netmap_reset_obj_allocator(&nm_mem.pools[i]); 696250184Sluigi } 697241719Sluigi nm_mem.finalized = 0; 698241719Sluigi } 699241719Sluigi 700241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 701241719Sluigi nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i], 702241719Sluigi netmap_params[i].num, netmap_params[i].size); 703241719Sluigi if (nm_mem.lasterr) 704241719Sluigi goto out; 705241719Sluigi } 706241719Sluigi 707234228Sluigi D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 708241719Sluigi nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10, 709241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10, 710241719Sluigi nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20); 711234228Sluigi 712241719Sluigiout: 713241719Sluigi 714241719Sluigi return nm_mem.lasterr; 715241719Sluigi} 716241719Sluigi 717241719Sluigi/* call with lock held */ 718241719Sluigistatic int 719241719Sluiginetmap_memory_finalize(void) 720241719Sluigi{ 721241719Sluigi int i; 722241719Sluigi u_int totalsize = 0; 723241719Sluigi 724241719Sluigi nm_mem.refcount++; 725241719Sluigi if (nm_mem.refcount > 1) { 726245835Sluigi ND("busy (refcount %d)", nm_mem.refcount); 727241719Sluigi goto out; 728234228Sluigi } 729241719Sluigi 730241719Sluigi /* update configuration if changed */ 731241719Sluigi if (netmap_memory_config()) 732241719Sluigi goto out; 733241719Sluigi 734241719Sluigi if (nm_mem.finalized) { 735241719Sluigi /* may happen if config is not changed */ 736241719Sluigi ND("nothing to do"); 737241719Sluigi goto out; 738241719Sluigi } 739241719Sluigi 740241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 741241719Sluigi nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]); 742241719Sluigi if (nm_mem.lasterr) 743241719Sluigi goto cleanup; 744241719Sluigi totalsize += nm_mem.pools[i]._memtotal; 745241719Sluigi } 746241719Sluigi nm_mem.nm_totalsize = totalsize; 747241719Sluigi 748241719Sluigi /* backward compatibility */ 749241719Sluigi netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize; 750241719Sluigi netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal; 751241719Sluigi 752241719Sluigi netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut; 753241719Sluigi netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr; 754241719Sluigi 755241719Sluigi nm_mem.finalized = 1; 756241719Sluigi nm_mem.lasterr = 0; 757241719Sluigi 758241719Sluigi /* make sysctl values match actual values in the pools */ 759241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 760241719Sluigi netmap_params[i].size = nm_mem.pools[i]._objsize; 761241719Sluigi netmap_params[i].num = nm_mem.pools[i].objtotal; 762241719Sluigi } 763241719Sluigi 764241719Sluigiout: 765241719Sluigi if (nm_mem.lasterr) 766241719Sluigi nm_mem.refcount--; 767241719Sluigi 768241719Sluigi return nm_mem.lasterr; 769241719Sluigi 770241719Sluigicleanup: 771241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 772241719Sluigi netmap_reset_obj_allocator(&nm_mem.pools[i]); 773241719Sluigi } 774241719Sluigi nm_mem.refcount--; 775241719Sluigi 776241719Sluigi return nm_mem.lasterr; 777234228Sluigi} 778234228Sluigi 779241719Sluigistatic int 780241719Sluiginetmap_memory_init(void) 781241719Sluigi{ 782241719Sluigi NMA_LOCK_INIT(); 783241719Sluigi return (0); 784241719Sluigi} 785234228Sluigi 786234228Sluigistatic void 787234228Sluiginetmap_memory_fini(void) 788234228Sluigi{ 789241719Sluigi int i; 790241719Sluigi 791241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 792241719Sluigi netmap_destroy_obj_allocator(&nm_mem.pools[i]); 793241719Sluigi } 794241719Sluigi NMA_LOCK_DESTROY(); 795234228Sluigi} 796234228Sluigi 797241719Sluigistatic void 798241719Sluiginetmap_free_rings(struct netmap_adapter *na) 799241719Sluigi{ 800241719Sluigi int i; 801245835Sluigi if (!na->tx_rings) 802245835Sluigi return; 803241719Sluigi for (i = 0; i < na->num_tx_rings + 1; i++) { 804241719Sluigi netmap_ring_free(na->tx_rings[i].ring); 805241719Sluigi na->tx_rings[i].ring = NULL; 806241719Sluigi } 807241719Sluigi for (i = 0; i < na->num_rx_rings + 1; i++) { 808241719Sluigi netmap_ring_free(na->rx_rings[i].ring); 809241719Sluigi na->rx_rings[i].ring = NULL; 810241719Sluigi } 811245835Sluigi free(na->tx_rings, M_DEVBUF); 812245835Sluigi na->tx_rings = na->rx_rings = NULL; 813241719Sluigi} 814234228Sluigi 815234228Sluigi 816241719Sluigi 817241719Sluigi/* call with NMA_LOCK held */ 818245835Sluigi/* 819245835Sluigi * Allocate the per-fd structure netmap_if. 820245835Sluigi * If this is the first instance, also allocate the krings, rings etc. 821245835Sluigi */ 822234228Sluigistatic void * 823234228Sluiginetmap_if_new(const char *ifname, struct netmap_adapter *na) 824234228Sluigi{ 825234228Sluigi struct netmap_if *nifp; 826234228Sluigi struct netmap_ring *ring; 827234228Sluigi ssize_t base; /* handy for relative offsets between rings and nifp */ 828245835Sluigi u_int i, len, ndesc, ntx, nrx; 829234228Sluigi struct netmap_kring *kring; 830234228Sluigi 831245835Sluigi if (netmap_update_config(na)) { 832245835Sluigi /* configuration mismatch, report and fail */ 833245835Sluigi return NULL; 834245835Sluigi } 835245835Sluigi ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 836245835Sluigi nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 837234228Sluigi /* 838234228Sluigi * the descriptor is followed inline by an array of offsets 839234228Sluigi * to the tx and rx rings in the shared memory region. 840234228Sluigi */ 841234228Sluigi len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 842234228Sluigi nifp = netmap_if_malloc(len); 843234228Sluigi if (nifp == NULL) { 844234228Sluigi return NULL; 845234228Sluigi } 846234228Sluigi 847234228Sluigi /* initialize base fields -- override const */ 848234228Sluigi *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 849234228Sluigi *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 850234228Sluigi strncpy(nifp->ni_name, ifname, IFNAMSIZ); 851234228Sluigi 852234228Sluigi (na->refcount)++; /* XXX atomic ? we are under lock */ 853234228Sluigi if (na->refcount > 1) { /* already setup, we are done */ 854234228Sluigi goto final; 855234228Sluigi } 856234228Sluigi 857245835Sluigi len = (ntx + nrx) * sizeof(struct netmap_kring); 858245835Sluigi na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 859245835Sluigi if (na->tx_rings == NULL) { 860245835Sluigi D("Cannot allocate krings for %s", ifname); 861245835Sluigi goto cleanup; 862245835Sluigi } 863245835Sluigi na->rx_rings = na->tx_rings + ntx; 864245835Sluigi 865234228Sluigi /* 866234228Sluigi * First instance, allocate netmap rings and buffers for this card 867234228Sluigi * The rings are contiguous, but have variable size. 868234228Sluigi */ 869234228Sluigi for (i = 0; i < ntx; i++) { /* Transmit rings */ 870234228Sluigi kring = &na->tx_rings[i]; 871234228Sluigi ndesc = na->num_tx_desc; 872234228Sluigi bzero(kring, sizeof(*kring)); 873234228Sluigi len = sizeof(struct netmap_ring) + 874234228Sluigi ndesc * sizeof(struct netmap_slot); 875234228Sluigi ring = netmap_ring_malloc(len); 876234228Sluigi if (ring == NULL) { 877234228Sluigi D("Cannot allocate tx_ring[%d] for %s", i, ifname); 878234228Sluigi goto cleanup; 879234228Sluigi } 880234228Sluigi ND("txring[%d] at %p ofs %d", i, ring); 881234228Sluigi kring->na = na; 882234228Sluigi kring->ring = ring; 883234228Sluigi *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 884234228Sluigi *(ssize_t *)(uintptr_t)&ring->buf_ofs = 885241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 886241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 887234228Sluigi netmap_ring_offset(ring); 888234228Sluigi 889234228Sluigi /* 890234228Sluigi * IMPORTANT: 891234228Sluigi * Always keep one slot empty, so we can detect new 892234228Sluigi * transmissions comparing cur and nr_hwcur (they are 893234228Sluigi * the same only if there are no new transmissions). 894234228Sluigi */ 895234228Sluigi ring->avail = kring->nr_hwavail = ndesc - 1; 896234228Sluigi ring->cur = kring->nr_hwcur = 0; 897234228Sluigi *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 898234228Sluigi ND("initializing slots for txring[%d]", i); 899241719Sluigi if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 900241719Sluigi D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname); 901241719Sluigi goto cleanup; 902241719Sluigi } 903234228Sluigi } 904234228Sluigi 905234228Sluigi for (i = 0; i < nrx; i++) { /* Receive rings */ 906234228Sluigi kring = &na->rx_rings[i]; 907234228Sluigi ndesc = na->num_rx_desc; 908234228Sluigi bzero(kring, sizeof(*kring)); 909234228Sluigi len = sizeof(struct netmap_ring) + 910234228Sluigi ndesc * sizeof(struct netmap_slot); 911234228Sluigi ring = netmap_ring_malloc(len); 912234228Sluigi if (ring == NULL) { 913234228Sluigi D("Cannot allocate rx_ring[%d] for %s", i, ifname); 914234228Sluigi goto cleanup; 915234228Sluigi } 916234228Sluigi ND("rxring[%d] at %p ofs %d", i, ring); 917234228Sluigi 918234228Sluigi kring->na = na; 919234228Sluigi kring->ring = ring; 920234228Sluigi *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 921234228Sluigi *(ssize_t *)(uintptr_t)&ring->buf_ofs = 922241719Sluigi (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 923241719Sluigi nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 924234228Sluigi netmap_ring_offset(ring); 925234228Sluigi 926234228Sluigi ring->cur = kring->nr_hwcur = 0; 927234228Sluigi ring->avail = kring->nr_hwavail = 0; /* empty */ 928234228Sluigi *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 929234228Sluigi ND("initializing slots for rxring[%d]", i); 930241719Sluigi if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 931241719Sluigi D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname); 932241719Sluigi goto cleanup; 933241719Sluigi } 934234228Sluigi } 935234228Sluigi#ifdef linux 936234228Sluigi // XXX initialize the selrecord structs. 937234228Sluigi for (i = 0; i < ntx; i++) 938238812Sluigi init_waitqueue_head(&na->tx_rings[i].si); 939238812Sluigi for (i = 0; i < nrx; i++) 940234228Sluigi init_waitqueue_head(&na->rx_rings[i].si); 941238812Sluigi init_waitqueue_head(&na->tx_si); 942234228Sluigi init_waitqueue_head(&na->rx_si); 943234228Sluigi#endif 944234228Sluigifinal: 945234228Sluigi /* 946234228Sluigi * fill the slots for the rx and tx rings. They contain the offset 947234228Sluigi * between the ring and nifp, so the information is usable in 948234228Sluigi * userspace to reach the ring from the nifp. 949234228Sluigi */ 950234228Sluigi base = netmap_if_offset(nifp); 951234228Sluigi for (i = 0; i < ntx; i++) { 952234228Sluigi *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 953234228Sluigi netmap_ring_offset(na->tx_rings[i].ring) - base; 954234228Sluigi } 955234228Sluigi for (i = 0; i < nrx; i++) { 956234228Sluigi *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 957234228Sluigi netmap_ring_offset(na->rx_rings[i].ring) - base; 958234228Sluigi } 959234228Sluigi return (nifp); 960234228Sluigicleanup: 961241719Sluigi netmap_free_rings(na); 962241719Sluigi netmap_if_free(nifp); 963241719Sluigi (na->refcount)--; 964234228Sluigi return NULL; 965234228Sluigi} 966234228Sluigi 967241719Sluigi/* call with NMA_LOCK held */ 968234228Sluigistatic void 969241719Sluiginetmap_memory_deref(void) 970234228Sluigi{ 971241719Sluigi nm_mem.refcount--; 972245835Sluigi if (netmap_verbose) 973245835Sluigi D("refcount = %d", nm_mem.refcount); 974234228Sluigi} 975