netmap_mem2.c revision 342033
1331722Seadler/* 2341477Svmaffione * Copyright (C) 2012-2014 Matteo Landi 3341477Svmaffione * Copyright (C) 2012-2016 Luigi Rizzo 4341477Svmaffione * Copyright (C) 2012-2016 Giuseppe Lettieri 5341477Svmaffione * All rights reserved. 6234228Sluigi * 7234228Sluigi * Redistribution and use in source and binary forms, with or without 8234228Sluigi * modification, are permitted provided that the following conditions 9234228Sluigi * are met: 10234228Sluigi * 1. Redistributions of source code must retain the above copyright 11234228Sluigi * notice, this list of conditions and the following disclaimer. 12234228Sluigi * 2. Redistributions in binary form must reproduce the above copyright 13234228Sluigi * notice, this list of conditions and the following disclaimer in the 14259412Sluigi * documentation and/or other materials provided with the distribution. 15234228Sluigi * 16234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19234228Sluigi * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26234228Sluigi * SUCH DAMAGE. 27234228Sluigi */ 28234228Sluigi 29257529Sluigi#ifdef linux 30257529Sluigi#include "bsd_glue.h" 31257529Sluigi#endif /* linux */ 32234228Sluigi 33257529Sluigi#ifdef __APPLE__ 34257529Sluigi#include "osx_glue.h" 35257529Sluigi#endif /* __APPLE__ */ 36234228Sluigi 37257529Sluigi#ifdef __FreeBSD__ 38257529Sluigi#include <sys/cdefs.h> /* prerequisite */ 39257529Sluigi__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_mem2.c 342033 2018-12-13 10:13:29Z vmaffione $"); 40234228Sluigi 41257529Sluigi#include <sys/types.h> 42257529Sluigi#include <sys/malloc.h> 43341477Svmaffione#include <sys/kernel.h> /* MALLOC_DEFINE */ 44257529Sluigi#include <sys/proc.h> 45257529Sluigi#include <vm/vm.h> /* vtophys */ 46257529Sluigi#include <vm/pmap.h> /* vtophys */ 47257529Sluigi#include <sys/socket.h> /* sockaddrs */ 48257529Sluigi#include <sys/selinfo.h> 49257529Sluigi#include <sys/sysctl.h> 50257529Sluigi#include <net/if.h> 51257529Sluigi#include <net/if_var.h> 52257529Sluigi#include <net/vnet.h> 53257529Sluigi#include <machine/bus.h> /* bus_dmamap_* */ 54257529Sluigi 55341477Svmaffione/* M_NETMAP only used in here */ 56341477SvmaffioneMALLOC_DECLARE(M_NETMAP); 57341477SvmaffioneMALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 58341477Svmaffione 59257529Sluigi#endif /* __FreeBSD__ */ 60257529Sluigi 61341477Svmaffione#ifdef _WIN32 62341477Svmaffione#include <win_glue.h> 63341477Svmaffione#endif 64341477Svmaffione 65257529Sluigi#include <net/netmap.h> 66257529Sluigi#include <dev/netmap/netmap_kern.h> 67341477Svmaffione#include <net/netmap_virt.h> 68257529Sluigi#include "netmap_mem2.h" 69257529Sluigi 70341477Svmaffione#ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY 71341477Svmaffione#define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ 72341477Svmaffione#else 73341477Svmaffione#define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 74341477Svmaffione#endif 75270063Sluigi 76270063Sluigi#define NETMAP_POOL_MAX_NAMSZ 32 77270063Sluigi 78270063Sluigi 79270063Sluigienum { 80270063Sluigi NETMAP_IF_POOL = 0, 81270063Sluigi NETMAP_RING_POOL, 82270063Sluigi NETMAP_BUF_POOL, 83270063Sluigi NETMAP_POOLS_NR 84270063Sluigi}; 85270063Sluigi 86270063Sluigi 87270063Sluigistruct netmap_obj_params { 88270063Sluigi u_int size; 89270063Sluigi u_int num; 90341477Svmaffione 91341477Svmaffione u_int last_size; 92341477Svmaffione u_int last_num; 93270063Sluigi}; 94285349Sluigi 95270063Sluigistruct netmap_obj_pool { 96270063Sluigi char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 97270063Sluigi 98270063Sluigi /* ---------------------------------------------------*/ 99270063Sluigi /* these are only meaningful if the pool is finalized */ 100270063Sluigi /* (see 'finalized' field in netmap_mem_d) */ 101270063Sluigi u_int objtotal; /* actual total number of objects. */ 102270063Sluigi u_int memtotal; /* actual total memory space */ 103270063Sluigi u_int numclusters; /* actual number of clusters */ 104270063Sluigi 105270063Sluigi u_int objfree; /* number of free objects. */ 106270063Sluigi 107270063Sluigi struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 108270063Sluigi uint32_t *bitmap; /* one bit per buffer, 1 means free */ 109341477Svmaffione uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ 110270063Sluigi uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 111341477Svmaffione int alloc_done; /* we have allocated the memory */ 112270063Sluigi /* ---------------------------------------------------*/ 113270063Sluigi 114270063Sluigi /* limits */ 115270063Sluigi u_int objminsize; /* minimum object size */ 116270063Sluigi u_int objmaxsize; /* maximum object size */ 117270063Sluigi u_int nummin; /* minimum number of objects */ 118270063Sluigi u_int nummax; /* maximum number of objects */ 119270063Sluigi 120270063Sluigi /* these are changed only by config */ 121270063Sluigi u_int _objtotal; /* total number of objects */ 122270063Sluigi u_int _objsize; /* object size */ 123270063Sluigi u_int _clustsize; /* cluster size */ 124270063Sluigi u_int _clustentries; /* objects per cluster */ 125270063Sluigi u_int _numclusters; /* number of clusters */ 126270063Sluigi 127270063Sluigi /* requested values */ 128270063Sluigi u_int r_objtotal; 129270063Sluigi u_int r_objsize; 130270063Sluigi}; 131270063Sluigi 132285349Sluigi#define NMA_LOCK_T NM_MTX_T 133341477Svmaffione#define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 134341477Svmaffione#define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 135341477Svmaffione#define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 136341477Svmaffione#define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) 137341477Svmaffione#define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 138270063Sluigi 139285349Sluigistruct netmap_mem_ops { 140341477Svmaffione int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 141341477Svmaffione int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, 142285349Sluigi u_int *memflags, uint16_t *id); 143285349Sluigi 144285349Sluigi vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 145285349Sluigi int (*nmd_config)(struct netmap_mem_d *); 146285349Sluigi int (*nmd_finalize)(struct netmap_mem_d *); 147285349Sluigi void (*nmd_deref)(struct netmap_mem_d *); 148285349Sluigi ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 149285349Sluigi void (*nmd_delete)(struct netmap_mem_d *); 150285349Sluigi 151341477Svmaffione struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, 152341477Svmaffione struct netmap_priv_d *); 153285349Sluigi void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 154285349Sluigi int (*nmd_rings_create)(struct netmap_adapter *); 155285349Sluigi void (*nmd_rings_delete)(struct netmap_adapter *); 156285349Sluigi}; 157285349Sluigi 158270063Sluigistruct netmap_mem_d { 159270063Sluigi NMA_LOCK_T nm_mtx; /* protect the allocator */ 160270063Sluigi u_int nm_totalsize; /* shorthand */ 161270063Sluigi 162270063Sluigi u_int flags; 163270063Sluigi#define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 164341477Svmaffione#define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ 165270063Sluigi int lasterr; /* last error for curr config */ 166285349Sluigi int active; /* active users */ 167285349Sluigi int refcount; 168270063Sluigi /* the three allocators */ 169270063Sluigi struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 170270063Sluigi 171270063Sluigi nm_memid_t nm_id; /* allocator identifier */ 172270063Sluigi int nm_grp; /* iommu groupd id */ 173270063Sluigi 174270063Sluigi /* list of all existing allocators, sorted by nm_id */ 175270063Sluigi struct netmap_mem_d *prev, *next; 176285349Sluigi 177285349Sluigi struct netmap_mem_ops *ops; 178341477Svmaffione 179341477Svmaffione struct netmap_obj_params params[NETMAP_POOLS_NR]; 180341477Svmaffione 181341477Svmaffione#define NM_MEM_NAMESZ 16 182341477Svmaffione char name[NM_MEM_NAMESZ]; 183270063Sluigi}; 184270063Sluigi 185341477Svmaffioneint 186341477Svmaffionenetmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 187341477Svmaffione{ 188341477Svmaffione int rv; 189341477Svmaffione 190341477Svmaffione NMA_LOCK(nmd); 191341477Svmaffione rv = nmd->ops->nmd_get_lut(nmd, lut); 192341477Svmaffione NMA_UNLOCK(nmd); 193341477Svmaffione 194341477Svmaffione return rv; 195285349Sluigi} 196285349Sluigi 197341477Svmaffioneint 198341477Svmaffionenetmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, 199341477Svmaffione u_int *memflags, nm_memid_t *memid) 200341477Svmaffione{ 201341477Svmaffione int rv; 202341477Svmaffione 203341477Svmaffione NMA_LOCK(nmd); 204341477Svmaffione rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); 205341477Svmaffione NMA_UNLOCK(nmd); 206341477Svmaffione 207341477Svmaffione return rv; 208285349Sluigi} 209285349Sluigi 210341477Svmaffionevm_paddr_t 211341477Svmaffionenetmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 212341477Svmaffione{ 213341477Svmaffione vm_paddr_t pa; 214341477Svmaffione 215341477Svmaffione#if defined(__FreeBSD__) 216341477Svmaffione /* This function is called by netmap_dev_pager_fault(), which holds a 217341477Svmaffione * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we 218341477Svmaffione * spin on the trylock. */ 219341477Svmaffione NMA_SPINLOCK(nmd); 220341477Svmaffione#else 221341477Svmaffione NMA_LOCK(nmd); 222341477Svmaffione#endif 223341477Svmaffione pa = nmd->ops->nmd_ofstophys(nmd, off); 224341477Svmaffione NMA_UNLOCK(nmd); 225341477Svmaffione 226341477Svmaffione return pa; 227285349Sluigi} 228285349Sluigi 229341477Svmaffionestatic int 230341477Svmaffionenetmap_mem_config(struct netmap_mem_d *nmd) 231341477Svmaffione{ 232341477Svmaffione if (nmd->active) { 233341477Svmaffione /* already in use. Not fatal, but we 234341477Svmaffione * cannot change the configuration 235341477Svmaffione */ 236341477Svmaffione return 0; 237341477Svmaffione } 238341477Svmaffione 239341477Svmaffione return nmd->ops->nmd_config(nmd); 240285349Sluigi} 241285349Sluigi 242341477Svmaffionessize_t 243341477Svmaffionenetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) 244341477Svmaffione{ 245341477Svmaffione ssize_t rv; 246341477Svmaffione 247341477Svmaffione NMA_LOCK(nmd); 248341477Svmaffione rv = nmd->ops->nmd_if_offset(nmd, off); 249341477Svmaffione NMA_UNLOCK(nmd); 250341477Svmaffione 251341477Svmaffione return rv; 252285349Sluigi} 253285349Sluigi 254341477Svmaffionestatic void 255341477Svmaffionenetmap_mem_delete(struct netmap_mem_d *nmd) 256341477Svmaffione{ 257341477Svmaffione nmd->ops->nmd_delete(nmd); 258341477Svmaffione} 259285349Sluigi 260341477Svmaffionestruct netmap_if * 261341477Svmaffionenetmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 262341477Svmaffione{ 263341477Svmaffione struct netmap_if *nifp; 264341477Svmaffione struct netmap_mem_d *nmd = na->nm_mem; 265285349Sluigi 266341477Svmaffione NMA_LOCK(nmd); 267341477Svmaffione nifp = nmd->ops->nmd_if_new(na, priv); 268341477Svmaffione NMA_UNLOCK(nmd); 269341477Svmaffione 270341477Svmaffione return nifp; 271341477Svmaffione} 272341477Svmaffione 273341477Svmaffionevoid 274341477Svmaffionenetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) 275341477Svmaffione{ 276341477Svmaffione struct netmap_mem_d *nmd = na->nm_mem; 277341477Svmaffione 278341477Svmaffione NMA_LOCK(nmd); 279341477Svmaffione nmd->ops->nmd_if_delete(na, nif); 280341477Svmaffione NMA_UNLOCK(nmd); 281341477Svmaffione} 282341477Svmaffione 283341477Svmaffioneint 284341477Svmaffionenetmap_mem_rings_create(struct netmap_adapter *na) 285341477Svmaffione{ 286341477Svmaffione int rv; 287341477Svmaffione struct netmap_mem_d *nmd = na->nm_mem; 288341477Svmaffione 289341477Svmaffione NMA_LOCK(nmd); 290341477Svmaffione rv = nmd->ops->nmd_rings_create(na); 291341477Svmaffione NMA_UNLOCK(nmd); 292341477Svmaffione 293341477Svmaffione return rv; 294341477Svmaffione} 295341477Svmaffione 296341477Svmaffionevoid 297341477Svmaffionenetmap_mem_rings_delete(struct netmap_adapter *na) 298341477Svmaffione{ 299341477Svmaffione struct netmap_mem_d *nmd = na->nm_mem; 300341477Svmaffione 301341477Svmaffione NMA_LOCK(nmd); 302341477Svmaffione nmd->ops->nmd_rings_delete(na); 303341477Svmaffione NMA_UNLOCK(nmd); 304341477Svmaffione} 305341477Svmaffione 306285349Sluigistatic int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 307285349Sluigistatic int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 308285349Sluigistatic int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 309341477Svmaffionestatic void nm_mem_release_id(struct netmap_mem_d *); 310285349Sluigi 311341477Svmaffionenm_memid_t 312341477Svmaffionenetmap_mem_get_id(struct netmap_mem_d *nmd) 313341477Svmaffione{ 314341477Svmaffione return nmd->nm_id; 315341477Svmaffione} 316285349Sluigi 317285349Sluigi#ifdef NM_DEBUG_MEM_PUTGET 318285349Sluigi#define NM_DBG_REFC(nmd, func, line) \ 319342033Svmaffione nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount); 320285349Sluigi#else 321285349Sluigi#define NM_DBG_REFC(nmd, func, line) 322285349Sluigi#endif 323285349Sluigi 324341477Svmaffione/* circular list of all existing allocators */ 325341477Svmaffionestatic struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 326341477SvmaffioneNM_MTX_T nm_mem_list_lock; 327341477Svmaffione 328341477Svmaffionestruct netmap_mem_d * 329341477Svmaffione__netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 330270063Sluigi{ 331341477Svmaffione NM_MTX_LOCK(nm_mem_list_lock); 332285349Sluigi nmd->refcount++; 333285349Sluigi NM_DBG_REFC(nmd, func, line); 334341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 335341477Svmaffione return nmd; 336270063Sluigi} 337270063Sluigi 338341477Svmaffionevoid 339341477Svmaffione__netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 340270063Sluigi{ 341285349Sluigi int last; 342341477Svmaffione NM_MTX_LOCK(nm_mem_list_lock); 343285349Sluigi last = (--nmd->refcount == 0); 344341477Svmaffione if (last) 345341477Svmaffione nm_mem_release_id(nmd); 346285349Sluigi NM_DBG_REFC(nmd, func, line); 347341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 348285349Sluigi if (last) 349285349Sluigi netmap_mem_delete(nmd); 350270063Sluigi} 351270063Sluigi 352285349Sluigiint 353285349Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 354270063Sluigi{ 355341477Svmaffione int lasterr = 0; 356285349Sluigi if (nm_mem_assign_group(nmd, na->pdev) < 0) { 357285349Sluigi return ENOMEM; 358341477Svmaffione } 359341477Svmaffione 360341477Svmaffione NMA_LOCK(nmd); 361341477Svmaffione 362341477Svmaffione if (netmap_mem_config(nmd)) 363341477Svmaffione goto out; 364341477Svmaffione 365341477Svmaffione nmd->active++; 366341477Svmaffione 367341477Svmaffione nmd->lasterr = nmd->ops->nmd_finalize(nmd); 368341477Svmaffione 369341477Svmaffione if (!nmd->lasterr && na->pdev) { 370341477Svmaffione nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 371341477Svmaffione } 372341477Svmaffione 373341477Svmaffioneout: 374341477Svmaffione lasterr = nmd->lasterr; 375341477Svmaffione NMA_UNLOCK(nmd); 376341477Svmaffione 377341477Svmaffione if (lasterr) 378341477Svmaffione netmap_mem_deref(nmd, na); 379341477Svmaffione 380341477Svmaffione return lasterr; 381341477Svmaffione} 382341477Svmaffione 383341477Svmaffionestatic int 384341477Svmaffionenm_isset(uint32_t *bitmap, u_int i) 385341477Svmaffione{ 386341477Svmaffione return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); 387341477Svmaffione} 388341477Svmaffione 389341477Svmaffione 390341477Svmaffionestatic int 391341477Svmaffionenetmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) 392341477Svmaffione{ 393341477Svmaffione u_int n, j; 394341477Svmaffione 395341477Svmaffione if (p->bitmap == NULL) { 396341477Svmaffione /* Allocate the bitmap */ 397341477Svmaffione n = (p->objtotal + 31) / 32; 398342033Svmaffione p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n); 399341477Svmaffione if (p->bitmap == NULL) { 400342033Svmaffione nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 401341477Svmaffione p->name); 402341477Svmaffione return ENOMEM; 403341477Svmaffione } 404341477Svmaffione p->bitmap_slots = n; 405285349Sluigi } else { 406342033Svmaffione memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0])); 407285349Sluigi } 408285349Sluigi 409341477Svmaffione p->objfree = 0; 410341477Svmaffione /* 411341477Svmaffione * Set all the bits in the bitmap that have 412341477Svmaffione * corresponding buffers to 1 to indicate they are 413341477Svmaffione * free. 414341477Svmaffione */ 415341477Svmaffione for (j = 0; j < p->objtotal; j++) { 416341477Svmaffione if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { 417342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 418342033Svmaffione nm_prinf("skipping %s %d", p->name, j); 419341477Svmaffione continue; 420341477Svmaffione } 421341477Svmaffione p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); 422341477Svmaffione p->objfree++; 423341477Svmaffione } 424285349Sluigi 425342033Svmaffione if (netmap_verbose) 426342033Svmaffione nm_prinf("%s free %u", p->name, p->objfree); 427342033Svmaffione if (p->objfree == 0) { 428342033Svmaffione if (netmap_verbose) 429342033Svmaffione nm_prerr("%s: no objects available", p->name); 430341477Svmaffione return ENOMEM; 431342033Svmaffione } 432341477Svmaffione 433341477Svmaffione return 0; 434270063Sluigi} 435270063Sluigi 436341477Svmaffionestatic int 437341477Svmaffionenetmap_mem_init_bitmaps(struct netmap_mem_d *nmd) 438341477Svmaffione{ 439341477Svmaffione int i, error = 0; 440341477Svmaffione 441341477Svmaffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 442341477Svmaffione struct netmap_obj_pool *p = &nmd->pools[i]; 443341477Svmaffione 444341477Svmaffione error = netmap_init_obj_allocator_bitmap(p); 445341477Svmaffione if (error) 446341477Svmaffione return error; 447341477Svmaffione } 448341477Svmaffione 449341477Svmaffione /* 450341477Svmaffione * buffers 0 and 1 are reserved 451341477Svmaffione */ 452341477Svmaffione if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { 453342033Svmaffione nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name); 454341477Svmaffione return ENOMEM; 455341477Svmaffione } 456341477Svmaffione 457341477Svmaffione nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 458341477Svmaffione if (nmd->pools[NETMAP_BUF_POOL].bitmap) { 459341477Svmaffione /* XXX This check is a workaround that prevents a 460341477Svmaffione * NULL pointer crash which currently happens only 461341477Svmaffione * with ptnetmap guests. 462341477Svmaffione * Removed shared-info --> is the bug still there? */ 463341477Svmaffione nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; 464341477Svmaffione } 465341477Svmaffione return 0; 466341477Svmaffione} 467341477Svmaffione 468341477Svmaffioneint 469285349Sluiginetmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 470285349Sluigi{ 471341477Svmaffione int last_user = 0; 472285349Sluigi NMA_LOCK(nmd); 473341477Svmaffione if (na->active_fds <= 0) 474341477Svmaffione netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 475341477Svmaffione if (nmd->active == 1) { 476341477Svmaffione last_user = 1; 477341477Svmaffione /* 478341477Svmaffione * Reset the allocator when it falls out of use so that any 479341477Svmaffione * pool resources leaked by unclean application exits are 480341477Svmaffione * reclaimed. 481341477Svmaffione */ 482341477Svmaffione netmap_mem_init_bitmaps(nmd); 483341477Svmaffione } 484341477Svmaffione nmd->ops->nmd_deref(nmd); 485341477Svmaffione 486341477Svmaffione nmd->active--; 487342033Svmaffione if (last_user) { 488341477Svmaffione nmd->nm_grp = -1; 489342033Svmaffione nmd->lasterr = 0; 490342033Svmaffione } 491341477Svmaffione 492285349Sluigi NMA_UNLOCK(nmd); 493341477Svmaffione return last_user; 494285349Sluigi} 495234228Sluigi 496241719Sluigi 497285349Sluigi/* accessor functions */ 498341477Svmaffionestatic int 499285349Sluiginetmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 500285349Sluigi{ 501285349Sluigi lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 502341477Svmaffione#ifdef __FreeBSD__ 503341477Svmaffione lut->plut = lut->lut; 504341477Svmaffione#endif 505285349Sluigi lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 506285349Sluigi lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 507341477Svmaffione 508341477Svmaffione return 0; 509285349Sluigi} 510285349Sluigi 511341477Svmaffionestatic struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 512241719Sluigi [NETMAP_IF_POOL] = { 513241719Sluigi .size = 1024, 514341477Svmaffione .num = 2, 515241719Sluigi }, 516241719Sluigi [NETMAP_RING_POOL] = { 517261909Sluigi .size = 5*PAGE_SIZE, 518261909Sluigi .num = 4, 519261909Sluigi }, 520261909Sluigi [NETMAP_BUF_POOL] = { 521261909Sluigi .size = 2048, 522261909Sluigi .num = 4098, 523261909Sluigi }, 524261909Sluigi}; 525241719Sluigi 526261909Sluigi 527249659Sluigi/* 528249659Sluigi * nm_mem is the memory allocator used for all physical interfaces 529249659Sluigi * running in netmap mode. 530249659Sluigi * Virtual (VALE) ports will have each its own allocator. 531249659Sluigi */ 532285349Sluigiextern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 533257529Sluigistruct netmap_mem_d nm_mem = { /* Our memory allocator. */ 534241719Sluigi .pools = { 535241719Sluigi [NETMAP_IF_POOL] = { 536241719Sluigi .name = "netmap_if", 537241719Sluigi .objminsize = sizeof(struct netmap_if), 538241719Sluigi .objmaxsize = 4096, 539241719Sluigi .nummin = 10, /* don't be stingy */ 540241719Sluigi .nummax = 10000, /* XXX very large */ 541241719Sluigi }, 542241719Sluigi [NETMAP_RING_POOL] = { 543241719Sluigi .name = "netmap_ring", 544241719Sluigi .objminsize = sizeof(struct netmap_ring), 545241719Sluigi .objmaxsize = 32*PAGE_SIZE, 546241719Sluigi .nummin = 2, 547241719Sluigi .nummax = 1024, 548241719Sluigi }, 549241719Sluigi [NETMAP_BUF_POOL] = { 550241719Sluigi .name = "netmap_buf", 551241719Sluigi .objminsize = 64, 552241719Sluigi .objmaxsize = 65536, 553241719Sluigi .nummin = 4, 554241719Sluigi .nummax = 1000000, /* one million! */ 555241719Sluigi }, 556241719Sluigi }, 557261909Sluigi 558341477Svmaffione .params = { 559341477Svmaffione [NETMAP_IF_POOL] = { 560341477Svmaffione .size = 1024, 561341477Svmaffione .num = 100, 562341477Svmaffione }, 563341477Svmaffione [NETMAP_RING_POOL] = { 564341477Svmaffione .size = 9*PAGE_SIZE, 565341477Svmaffione .num = 200, 566341477Svmaffione }, 567341477Svmaffione [NETMAP_BUF_POOL] = { 568341477Svmaffione .size = 2048, 569341477Svmaffione .num = NETMAP_BUF_MAX_NUM, 570341477Svmaffione }, 571341477Svmaffione }, 572341477Svmaffione 573261909Sluigi .nm_id = 1, 574270063Sluigi .nm_grp = -1, 575261909Sluigi 576261909Sluigi .prev = &nm_mem, 577261909Sluigi .next = &nm_mem, 578285349Sluigi 579341477Svmaffione .ops = &netmap_mem_global_ops, 580341477Svmaffione 581341477Svmaffione .name = "1" 582241719Sluigi}; 583241719Sluigi 584257529Sluigi 585257529Sluigi/* blueprint for the private memory allocators */ 586341477Svmaffione/* XXX clang is not happy about using name as a print format */ 587341477Svmaffionestatic const struct netmap_mem_d nm_blueprint = { 588257529Sluigi .pools = { 589257529Sluigi [NETMAP_IF_POOL] = { 590257529Sluigi .name = "%s_if", 591257529Sluigi .objminsize = sizeof(struct netmap_if), 592257529Sluigi .objmaxsize = 4096, 593257529Sluigi .nummin = 1, 594261909Sluigi .nummax = 100, 595257529Sluigi }, 596257529Sluigi [NETMAP_RING_POOL] = { 597257529Sluigi .name = "%s_ring", 598257529Sluigi .objminsize = sizeof(struct netmap_ring), 599257529Sluigi .objmaxsize = 32*PAGE_SIZE, 600257529Sluigi .nummin = 2, 601257529Sluigi .nummax = 1024, 602257529Sluigi }, 603257529Sluigi [NETMAP_BUF_POOL] = { 604257529Sluigi .name = "%s_buf", 605257529Sluigi .objminsize = 64, 606257529Sluigi .objmaxsize = 65536, 607257529Sluigi .nummin = 4, 608257529Sluigi .nummax = 1000000, /* one million! */ 609257529Sluigi }, 610257529Sluigi }, 611257529Sluigi 612341477Svmaffione .nm_grp = -1, 613341477Svmaffione 614257529Sluigi .flags = NETMAP_MEM_PRIVATE, 615285349Sluigi 616341477Svmaffione .ops = &netmap_mem_global_ops, 617257529Sluigi}; 618257529Sluigi 619241719Sluigi/* memory allocator related sysctls */ 620234228Sluigi 621241719Sluigi#define STRINGIFY(x) #x 622241719Sluigi 623257529Sluigi 624241719Sluigi#define DECLARE_SYSCTLS(id, name) \ 625341477Svmaffione SYSBEGIN(mem2_ ## name); \ 626241719Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 627341477Svmaffione CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 628259412Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 629259412Sluigi CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 630259412Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 631341477Svmaffione CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 632259412Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 633261909Sluigi CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 634261909Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 635261909Sluigi CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 636261909Sluigi "Default size of private netmap " STRINGIFY(name) "s"); \ 637261909Sluigi SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 638261909Sluigi CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 639341477Svmaffione "Default number of private netmap " STRINGIFY(name) "s"); \ 640341477Svmaffione SYSEND 641241719Sluigi 642257529SluigiSYSCTL_DECL(_dev_netmap); 643241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if); 644241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 645241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 646241719Sluigi 647341477Svmaffione/* call with nm_mem_list_lock held */ 648261909Sluigistatic int 649341477Svmaffionenm_mem_assign_id_locked(struct netmap_mem_d *nmd) 650261909Sluigi{ 651261909Sluigi nm_memid_t id; 652261909Sluigi struct netmap_mem_d *scan = netmap_last_mem_d; 653261909Sluigi int error = ENOMEM; 654261909Sluigi 655261909Sluigi do { 656261909Sluigi /* we rely on unsigned wrap around */ 657261909Sluigi id = scan->nm_id + 1; 658261909Sluigi if (id == 0) /* reserve 0 as error value */ 659261909Sluigi id = 1; 660261909Sluigi scan = scan->next; 661261909Sluigi if (id != scan->nm_id) { 662261909Sluigi nmd->nm_id = id; 663261909Sluigi nmd->prev = scan->prev; 664261909Sluigi nmd->next = scan; 665261909Sluigi scan->prev->next = nmd; 666261909Sluigi scan->prev = nmd; 667261909Sluigi netmap_last_mem_d = nmd; 668341477Svmaffione nmd->refcount = 1; 669341477Svmaffione NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 670261909Sluigi error = 0; 671261909Sluigi break; 672261909Sluigi } 673261909Sluigi } while (scan != netmap_last_mem_d); 674261909Sluigi 675261909Sluigi return error; 676261909Sluigi} 677261909Sluigi 678341477Svmaffione/* call with nm_mem_list_lock *not* held */ 679341477Svmaffionestatic int 680341477Svmaffionenm_mem_assign_id(struct netmap_mem_d *nmd) 681341477Svmaffione{ 682341477Svmaffione int ret; 683341477Svmaffione 684341477Svmaffione NM_MTX_LOCK(nm_mem_list_lock); 685341477Svmaffione ret = nm_mem_assign_id_locked(nmd); 686341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 687341477Svmaffione 688341477Svmaffione return ret; 689341477Svmaffione} 690341477Svmaffione 691341477Svmaffione/* call with nm_mem_list_lock held */ 692261909Sluigistatic void 693261909Sluiginm_mem_release_id(struct netmap_mem_d *nmd) 694261909Sluigi{ 695261909Sluigi nmd->prev->next = nmd->next; 696261909Sluigi nmd->next->prev = nmd->prev; 697261909Sluigi 698261909Sluigi if (netmap_last_mem_d == nmd) 699261909Sluigi netmap_last_mem_d = nmd->prev; 700261909Sluigi 701261909Sluigi nmd->prev = nmd->next = NULL; 702341477Svmaffione} 703261909Sluigi 704341477Svmaffionestruct netmap_mem_d * 705341477Svmaffionenetmap_mem_find(nm_memid_t id) 706341477Svmaffione{ 707341477Svmaffione struct netmap_mem_d *nmd; 708341477Svmaffione 709341477Svmaffione NM_MTX_LOCK(nm_mem_list_lock); 710341477Svmaffione nmd = netmap_last_mem_d; 711341477Svmaffione do { 712341477Svmaffione if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { 713341477Svmaffione nmd->refcount++; 714341477Svmaffione NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 715341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 716341477Svmaffione return nmd; 717341477Svmaffione } 718341477Svmaffione nmd = nmd->next; 719341477Svmaffione } while (nmd != netmap_last_mem_d); 720341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 721341477Svmaffione return NULL; 722261909Sluigi} 723261909Sluigi 724270063Sluigistatic int 725270063Sluiginm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 726270063Sluigi{ 727270063Sluigi int err = 0, id; 728270063Sluigi id = nm_iommu_group_id(dev); 729342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 730342033Svmaffione nm_prinf("iommu_group %d", id); 731261909Sluigi 732270063Sluigi NMA_LOCK(nmd); 733270063Sluigi 734270063Sluigi if (nmd->nm_grp < 0) 735270063Sluigi nmd->nm_grp = id; 736270063Sluigi 737342033Svmaffione if (nmd->nm_grp != id) { 738342033Svmaffione if (netmap_verbose) 739342033Svmaffione nm_prerr("iommu group mismatch: %u vs %u", 740342033Svmaffione nmd->nm_grp, id); 741270063Sluigi nmd->lasterr = err = ENOMEM; 742342033Svmaffione } 743270063Sluigi 744270063Sluigi NMA_UNLOCK(nmd); 745270063Sluigi return err; 746270063Sluigi} 747270063Sluigi 748341477Svmaffionestatic struct lut_entry * 749341477Svmaffionenm_alloc_lut(u_int nobj) 750341477Svmaffione{ 751341477Svmaffione size_t n = sizeof(struct lut_entry) * nobj; 752341477Svmaffione struct lut_entry *lut; 753341477Svmaffione#ifdef linux 754341477Svmaffione lut = vmalloc(n); 755341477Svmaffione#else 756341477Svmaffione lut = nm_os_malloc(n); 757341477Svmaffione#endif 758341477Svmaffione return lut; 759341477Svmaffione} 760341477Svmaffione 761341477Svmaffionestatic void 762341477Svmaffionenm_free_lut(struct lut_entry *lut, u_int objtotal) 763341477Svmaffione{ 764341477Svmaffione bzero(lut, sizeof(struct lut_entry) * objtotal); 765341477Svmaffione#ifdef linux 766341477Svmaffione vfree(lut); 767341477Svmaffione#else 768341477Svmaffione nm_os_free(lut); 769341477Svmaffione#endif 770341477Svmaffione} 771341477Svmaffione 772341477Svmaffione#if defined(linux) || defined(_WIN32) 773341477Svmaffionestatic struct plut_entry * 774341477Svmaffionenm_alloc_plut(u_int nobj) 775341477Svmaffione{ 776341477Svmaffione size_t n = sizeof(struct plut_entry) * nobj; 777341477Svmaffione struct plut_entry *lut; 778341477Svmaffione lut = vmalloc(n); 779341477Svmaffione return lut; 780341477Svmaffione} 781341477Svmaffione 782341477Svmaffionestatic void 783341477Svmaffionenm_free_plut(struct plut_entry * lut) 784341477Svmaffione{ 785341477Svmaffione vfree(lut); 786341477Svmaffione} 787341477Svmaffione#endif /* linux or _WIN32 */ 788341477Svmaffione 789341477Svmaffione 790234228Sluigi/* 791249659Sluigi * First, find the allocator that contains the requested offset, 792249659Sluigi * then locate the cluster through a lookup table. 793234228Sluigi */ 794285349Sluigistatic vm_paddr_t 795285349Sluiginetmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 796234228Sluigi{ 797234228Sluigi int i; 798257529Sluigi vm_ooffset_t o = offset; 799257529Sluigi vm_paddr_t pa; 800257529Sluigi struct netmap_obj_pool *p; 801234228Sluigi 802257529Sluigi p = nmd->pools; 803257529Sluigi 804257529Sluigi for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 805257529Sluigi if (offset >= p[i].memtotal) 806234228Sluigi continue; 807249659Sluigi // now lookup the cluster's address 808341477Svmaffione#ifndef _WIN32 809270063Sluigi pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 810241719Sluigi offset % p[i]._objsize; 811341477Svmaffione#else 812341477Svmaffione pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); 813341477Svmaffione pa.QuadPart += offset % p[i]._objsize; 814341477Svmaffione#endif 815257529Sluigi return pa; 816234228Sluigi } 817241719Sluigi /* this is only in case of errors */ 818342033Svmaffione nm_prerr("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 819257529Sluigi p[NETMAP_IF_POOL].memtotal, 820257529Sluigi p[NETMAP_IF_POOL].memtotal 821257529Sluigi + p[NETMAP_RING_POOL].memtotal, 822257529Sluigi p[NETMAP_IF_POOL].memtotal 823257529Sluigi + p[NETMAP_RING_POOL].memtotal 824257529Sluigi + p[NETMAP_BUF_POOL].memtotal); 825341477Svmaffione#ifndef _WIN32 826341477Svmaffione return 0; /* bad address */ 827341477Svmaffione#else 828341477Svmaffione vm_paddr_t res; 829341477Svmaffione res.QuadPart = 0; 830341477Svmaffione return res; 831341477Svmaffione#endif 832341477Svmaffione} 833341477Svmaffione 834341477Svmaffione#ifdef _WIN32 835341477Svmaffione 836341477Svmaffione/* 837341477Svmaffione * win32_build_virtual_memory_for_userspace 838341477Svmaffione * 839341477Svmaffione * This function get all the object making part of the pools and maps 840341477Svmaffione * a contiguous virtual memory space for the userspace 841341477Svmaffione * It works this way 842341477Svmaffione * 1 - allocate a Memory Descriptor List wide as the sum 843341477Svmaffione * of the memory needed for the pools 844341477Svmaffione * 2 - cycle all the objects in every pool and for every object do 845341477Svmaffione * 846341477Svmaffione * 2a - cycle all the objects in every pool, get the list 847341477Svmaffione * of the physical address descriptors 848341477Svmaffione * 2b - calculate the offset in the array of pages desciptor in the 849341477Svmaffione * main MDL 850341477Svmaffione * 2c - copy the descriptors of the object in the main MDL 851341477Svmaffione * 852341477Svmaffione * 3 - return the resulting MDL that needs to be mapped in userland 853341477Svmaffione * 854341477Svmaffione * In this way we will have an MDL that describes all the memory for the 855341477Svmaffione * objects in a single object 856341477Svmaffione*/ 857341477Svmaffione 858341477SvmaffionePMDL 859341477Svmaffionewin32_build_user_vm_map(struct netmap_mem_d* nmd) 860341477Svmaffione{ 861341477Svmaffione u_int memflags, ofs = 0; 862341477Svmaffione PMDL mainMdl, tempMdl; 863341477Svmaffione uint64_t memsize; 864341477Svmaffione int i, j; 865341477Svmaffione 866341477Svmaffione if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { 867342033Svmaffione nm_prerr("memory not finalised yet"); 868341477Svmaffione return NULL; 869341477Svmaffione } 870341477Svmaffione 871341477Svmaffione mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); 872341477Svmaffione if (mainMdl == NULL) { 873342033Svmaffione nm_prerr("failed to allocate mdl"); 874341477Svmaffione return NULL; 875341477Svmaffione } 876341477Svmaffione 877341477Svmaffione NMA_LOCK(nmd); 878341477Svmaffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 879341477Svmaffione struct netmap_obj_pool *p = &nmd->pools[i]; 880341477Svmaffione int clsz = p->_clustsize; 881341477Svmaffione int clobjs = p->_clustentries; /* objects per cluster */ 882341477Svmaffione int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); 883341477Svmaffione PPFN_NUMBER pSrc, pDst; 884341477Svmaffione 885341477Svmaffione /* each pool has a different cluster size so we need to reallocate */ 886341477Svmaffione tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); 887341477Svmaffione if (tempMdl == NULL) { 888341477Svmaffione NMA_UNLOCK(nmd); 889342033Svmaffione nm_prerr("fail to allocate tempMdl"); 890341477Svmaffione IoFreeMdl(mainMdl); 891341477Svmaffione return NULL; 892341477Svmaffione } 893341477Svmaffione pSrc = MmGetMdlPfnArray(tempMdl); 894341477Svmaffione /* create one entry per cluster, the lut[] has one entry per object */ 895341477Svmaffione for (j = 0; j < p->numclusters; j++, ofs += clsz) { 896341477Svmaffione pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; 897341477Svmaffione MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); 898341477Svmaffione MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ 899341477Svmaffione RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ 900341477Svmaffione mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ 901341477Svmaffione } 902341477Svmaffione IoFreeMdl(tempMdl); 903341477Svmaffione } 904257529Sluigi NMA_UNLOCK(nmd); 905341477Svmaffione return mainMdl; 906234228Sluigi} 907234228Sluigi 908341477Svmaffione#endif /* _WIN32 */ 909341477Svmaffione 910341477Svmaffione/* 911341477Svmaffione * helper function for OS-specific mmap routines (currently only windows). 912341477Svmaffione * Given an nmd and a pool index, returns the cluster size and number of clusters. 913341477Svmaffione * Returns 0 if memory is finalised and the pool is valid, otherwise 1. 914341477Svmaffione * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. 915341477Svmaffione */ 916341477Svmaffione 917341477Svmaffioneint 918341477Svmaffionenetmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) 919341477Svmaffione{ 920341477Svmaffione if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) 921341477Svmaffione return 1; /* invalid arguments */ 922341477Svmaffione // NMA_LOCK_ASSERT(nmd); 923341477Svmaffione if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 924341477Svmaffione *clustsize = *numclusters = 0; 925341477Svmaffione return 1; /* not ready yet */ 926341477Svmaffione } 927341477Svmaffione *clustsize = nmd->pools[pool]._clustsize; 928341477Svmaffione *numclusters = nmd->pools[pool].numclusters; 929341477Svmaffione return 0; /* success */ 930341477Svmaffione} 931341477Svmaffione 932285349Sluigistatic int 933341477Svmaffionenetmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, 934341477Svmaffione u_int *memflags, nm_memid_t *id) 935257529Sluigi{ 936257529Sluigi int error = 0; 937285349Sluigi error = netmap_mem_config(nmd); 938257529Sluigi if (error) 939257529Sluigi goto out; 940270063Sluigi if (size) { 941270063Sluigi if (nmd->flags & NETMAP_MEM_FINALIZED) { 942270063Sluigi *size = nmd->nm_totalsize; 943270063Sluigi } else { 944270063Sluigi int i; 945270063Sluigi *size = 0; 946270063Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 947270063Sluigi struct netmap_obj_pool *p = nmd->pools + i; 948270063Sluigi *size += (p->_numclusters * p->_clustsize); 949270063Sluigi } 950257529Sluigi } 951257529Sluigi } 952270063Sluigi if (memflags) 953270063Sluigi *memflags = nmd->flags; 954270063Sluigi if (id) 955270063Sluigi *id = nmd->nm_id; 956257529Sluigiout: 957257529Sluigi return error; 958257529Sluigi} 959257529Sluigi 960234228Sluigi/* 961234228Sluigi * we store objects by kernel address, need to find the offset 962234228Sluigi * within the pool to export the value to userspace. 963234228Sluigi * Algorithm: scan until we find the cluster, then add the 964234228Sluigi * actual offset in the cluster 965234228Sluigi */ 966234242Sluigistatic ssize_t 967234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 968234228Sluigi{ 969257529Sluigi int i, k = p->_clustentries, n = p->objtotal; 970234228Sluigi ssize_t ofs = 0; 971234228Sluigi 972234228Sluigi for (i = 0; i < n; i += k, ofs += p->_clustsize) { 973234228Sluigi const char *base = p->lut[i].vaddr; 974234228Sluigi ssize_t relofs = (const char *) vaddr - base; 975234228Sluigi 976249504Sluigi if (relofs < 0 || relofs >= p->_clustsize) 977234228Sluigi continue; 978234228Sluigi 979234228Sluigi ofs = ofs + relofs; 980234228Sluigi ND("%s: return offset %d (cluster %d) for pointer %p", 981234228Sluigi p->name, ofs, i, vaddr); 982234228Sluigi return ofs; 983234228Sluigi } 984342033Svmaffione nm_prerr("address %p is not contained inside any cluster (%s)", 985234228Sluigi vaddr, p->name); 986234228Sluigi return 0; /* An error occurred */ 987234228Sluigi} 988234228Sluigi 989234228Sluigi/* Helper functions which convert virtual addresses to offsets */ 990257529Sluigi#define netmap_if_offset(n, v) \ 991257529Sluigi netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 992234228Sluigi 993257529Sluigi#define netmap_ring_offset(n, v) \ 994257529Sluigi ((n)->pools[NETMAP_IF_POOL].memtotal + \ 995257529Sluigi netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 996234228Sluigi 997285349Sluigistatic ssize_t 998285349Sluiginetmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 999257529Sluigi{ 1000341477Svmaffione return netmap_if_offset(nmd, addr); 1001257529Sluigi} 1002257529Sluigi 1003241719Sluigi/* 1004241719Sluigi * report the index, and use start position as a hint, 1005241719Sluigi * otherwise buffer allocation becomes terribly expensive. 1006241719Sluigi */ 1007234228Sluigistatic void * 1008257529Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 1009234228Sluigi{ 1010234228Sluigi uint32_t i = 0; /* index in the bitmap */ 1011341477Svmaffione uint32_t mask, j = 0; /* slot counter */ 1012234228Sluigi void *vaddr = NULL; 1013234228Sluigi 1014234228Sluigi if (len > p->_objsize) { 1015342033Svmaffione nm_prerr("%s request size %d too large", p->name, len); 1016234228Sluigi return NULL; 1017234228Sluigi } 1018234228Sluigi 1019234228Sluigi if (p->objfree == 0) { 1020342033Svmaffione nm_prerr("no more %s objects", p->name); 1021234228Sluigi return NULL; 1022234228Sluigi } 1023241719Sluigi if (start) 1024241719Sluigi i = *start; 1025234228Sluigi 1026241719Sluigi /* termination is guaranteed by p->free, but better check bounds on i */ 1027241719Sluigi while (vaddr == NULL && i < p->bitmap_slots) { 1028234228Sluigi uint32_t cur = p->bitmap[i]; 1029234228Sluigi if (cur == 0) { /* bitmask is fully used */ 1030234228Sluigi i++; 1031234228Sluigi continue; 1032234228Sluigi } 1033234228Sluigi /* locate a slot */ 1034234228Sluigi for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 1035234228Sluigi ; 1036234228Sluigi 1037234228Sluigi p->bitmap[i] &= ~mask; /* mark object as in use */ 1038234228Sluigi p->objfree--; 1039234228Sluigi 1040234228Sluigi vaddr = p->lut[i * 32 + j].vaddr; 1041241719Sluigi if (index) 1042241719Sluigi *index = i * 32 + j; 1043234228Sluigi } 1044341477Svmaffione ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); 1045234228Sluigi 1046241719Sluigi if (start) 1047241719Sluigi *start = i; 1048234228Sluigi return vaddr; 1049234228Sluigi} 1050234228Sluigi 1051234228Sluigi 1052234228Sluigi/* 1053261909Sluigi * free by index, not by address. 1054261909Sluigi * XXX should we also cleanup the content ? 1055234228Sluigi */ 1056261909Sluigistatic int 1057234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 1058234228Sluigi{ 1059261909Sluigi uint32_t *ptr, mask; 1060261909Sluigi 1061234228Sluigi if (j >= p->objtotal) { 1062342033Svmaffione nm_prerr("invalid index %u, max %u", j, p->objtotal); 1063261909Sluigi return 1; 1064234228Sluigi } 1065261909Sluigi ptr = &p->bitmap[j / 32]; 1066261909Sluigi mask = (1 << (j % 32)); 1067261909Sluigi if (*ptr & mask) { 1068342033Svmaffione nm_prerr("ouch, double free on buffer %d", j); 1069261909Sluigi return 1; 1070261909Sluigi } else { 1071261909Sluigi *ptr |= mask; 1072261909Sluigi p->objfree++; 1073261909Sluigi return 0; 1074261909Sluigi } 1075234228Sluigi} 1076234228Sluigi 1077261909Sluigi/* 1078261909Sluigi * free by address. This is slow but is only used for a few 1079261909Sluigi * objects (rings, nifp) 1080261909Sluigi */ 1081234228Sluigistatic void 1082234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 1083234228Sluigi{ 1084257529Sluigi u_int i, j, n = p->numclusters; 1085234228Sluigi 1086257529Sluigi for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 1087257529Sluigi void *base = p->lut[i * p->_clustentries].vaddr; 1088234228Sluigi ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 1089234228Sluigi 1090234228Sluigi /* Given address, is out of the scope of the current cluster.*/ 1091341477Svmaffione if (base == NULL || vaddr < base || relofs >= p->_clustsize) 1092234228Sluigi continue; 1093234228Sluigi 1094234228Sluigi j = j + relofs / p->_objsize; 1095257529Sluigi /* KASSERT(j != 0, ("Cannot free object 0")); */ 1096234228Sluigi netmap_obj_free(p, j); 1097234228Sluigi return; 1098234228Sluigi } 1099342033Svmaffione nm_prerr("address %p is not contained inside any cluster (%s)", 1100234228Sluigi vaddr, p->name); 1101234228Sluigi} 1102234228Sluigi 1103341477Svmaffioneunsigned 1104341477Svmaffionenetmap_mem_bufsize(struct netmap_mem_d *nmd) 1105341477Svmaffione{ 1106341477Svmaffione return nmd->pools[NETMAP_BUF_POOL]._objsize; 1107341477Svmaffione} 1108270063Sluigi 1109257529Sluigi#define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 1110257529Sluigi#define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 1111257529Sluigi#define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 1112257529Sluigi#define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 1113257529Sluigi#define netmap_buf_malloc(n, _pos, _index) \ 1114270063Sluigi netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 1115234228Sluigi 1116234228Sluigi 1117341477Svmaffione#if 0 /* currently unused */ 1118234228Sluigi/* Return the index associated to the given packet buffer */ 1119257529Sluigi#define netmap_buf_index(n, v) \ 1120257529Sluigi (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 1121261909Sluigi#endif 1122234228Sluigi 1123261909Sluigi/* 1124261909Sluigi * allocate extra buffers in a linked list. 1125261909Sluigi * returns the actual number. 1126261909Sluigi */ 1127261909Sluigiuint32_t 1128261909Sluiginetmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 1129261909Sluigi{ 1130261909Sluigi struct netmap_mem_d *nmd = na->nm_mem; 1131261909Sluigi uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 1132234228Sluigi 1133261909Sluigi NMA_LOCK(nmd); 1134261909Sluigi 1135261909Sluigi *head = 0; /* default, 'null' index ie empty list */ 1136261909Sluigi for (i = 0 ; i < n; i++) { 1137261909Sluigi uint32_t cur = *head; /* save current head */ 1138261909Sluigi uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 1139261909Sluigi if (p == NULL) { 1140342033Svmaffione nm_prerr("no more buffers after %d of %d", i, n); 1141261909Sluigi *head = cur; /* restore */ 1142261909Sluigi break; 1143261909Sluigi } 1144341477Svmaffione ND(5, "allocate buffer %d -> %d", *head, cur); 1145261909Sluigi *p = cur; /* link to previous head */ 1146261909Sluigi } 1147261909Sluigi 1148261909Sluigi NMA_UNLOCK(nmd); 1149261909Sluigi 1150261909Sluigi return i; 1151261909Sluigi} 1152261909Sluigi 1153261909Sluigistatic void 1154261909Sluiginetmap_extra_free(struct netmap_adapter *na, uint32_t head) 1155261909Sluigi{ 1156341477Svmaffione struct lut_entry *lut = na->na_lut.lut; 1157261909Sluigi struct netmap_mem_d *nmd = na->nm_mem; 1158261909Sluigi struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1159261909Sluigi uint32_t i, cur, *buf; 1160261909Sluigi 1161341477Svmaffione ND("freeing the extra list"); 1162261909Sluigi for (i = 0; head >=2 && head < p->objtotal; i++) { 1163261909Sluigi cur = head; 1164261909Sluigi buf = lut[head].vaddr; 1165261909Sluigi head = *buf; 1166261909Sluigi *buf = 0; 1167261909Sluigi if (netmap_obj_free(p, cur)) 1168261909Sluigi break; 1169261909Sluigi } 1170261909Sluigi if (head != 0) 1171342033Svmaffione nm_prerr("breaking with head %d", head); 1172342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1173342033Svmaffione nm_prinf("freed %d buffers", i); 1174261909Sluigi} 1175261909Sluigi 1176261909Sluigi 1177241719Sluigi/* Return nonzero on error */ 1178241719Sluigistatic int 1179259412Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1180234228Sluigi{ 1181257529Sluigi struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1182257529Sluigi u_int i = 0; /* slot counter */ 1183241719Sluigi uint32_t pos = 0; /* slot in p->bitmap */ 1184241719Sluigi uint32_t index = 0; /* buffer index */ 1185234228Sluigi 1186234228Sluigi for (i = 0; i < n; i++) { 1187257529Sluigi void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 1188234228Sluigi if (vaddr == NULL) { 1189342033Svmaffione nm_prerr("no more buffers after %d of %d", i, n); 1190234228Sluigi goto cleanup; 1191234228Sluigi } 1192241719Sluigi slot[i].buf_idx = index; 1193234228Sluigi slot[i].len = p->_objsize; 1194259412Sluigi slot[i].flags = 0; 1195341477Svmaffione slot[i].ptr = 0; 1196234228Sluigi } 1197234228Sluigi 1198341477Svmaffione ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); 1199241719Sluigi return (0); 1200234228Sluigi 1201234228Sluigicleanup: 1202241643Semaste while (i > 0) { 1203241643Semaste i--; 1204241719Sluigi netmap_obj_free(p, slot[i].buf_idx); 1205234228Sluigi } 1206241719Sluigi bzero(slot, n * sizeof(slot[0])); 1207241719Sluigi return (ENOMEM); 1208234228Sluigi} 1209234228Sluigi 1210261909Sluigistatic void 1211261909Sluiginetmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 1212261909Sluigi{ 1213261909Sluigi struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1214261909Sluigi u_int i; 1215234228Sluigi 1216261909Sluigi for (i = 0; i < n; i++) { 1217261909Sluigi slot[i].buf_idx = index; 1218261909Sluigi slot[i].len = p->_objsize; 1219261909Sluigi slot[i].flags = 0; 1220261909Sluigi } 1221261909Sluigi} 1222261909Sluigi 1223261909Sluigi 1224234228Sluigistatic void 1225259412Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 1226234228Sluigi{ 1227257529Sluigi struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1228241719Sluigi 1229234228Sluigi if (i < 2 || i >= p->objtotal) { 1230342033Svmaffione nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 1231234228Sluigi return; 1232234228Sluigi } 1233241719Sluigi netmap_obj_free(p, i); 1234234228Sluigi} 1235234228Sluigi 1236261909Sluigi 1237234228Sluigistatic void 1238261909Sluiginetmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1239261909Sluigi{ 1240261909Sluigi u_int i; 1241261909Sluigi 1242261909Sluigi for (i = 0; i < n; i++) { 1243341477Svmaffione if (slot[i].buf_idx > 1) 1244261909Sluigi netmap_free_buf(nmd, slot[i].buf_idx); 1245261909Sluigi } 1246341477Svmaffione ND("%s: released some buffers, available: %u", 1247341477Svmaffione p->name, p->objfree); 1248261909Sluigi} 1249261909Sluigi 1250261909Sluigistatic void 1251241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p) 1252234228Sluigi{ 1253257529Sluigi 1254234228Sluigi if (p == NULL) 1255234228Sluigi return; 1256234228Sluigi if (p->bitmap) 1257341477Svmaffione nm_os_free(p->bitmap); 1258241719Sluigi p->bitmap = NULL; 1259341477Svmaffione if (p->invalid_bitmap) 1260341477Svmaffione nm_os_free(p->invalid_bitmap); 1261341477Svmaffione p->invalid_bitmap = NULL; 1262341477Svmaffione if (!p->alloc_done) { 1263341477Svmaffione /* allocation was done by somebody else. 1264341477Svmaffione * Let them clean up after themselves. 1265341477Svmaffione */ 1266341477Svmaffione return; 1267341477Svmaffione } 1268234228Sluigi if (p->lut) { 1269257529Sluigi u_int i; 1270257529Sluigi 1271282978Spkelsey /* 1272282978Spkelsey * Free each cluster allocated in 1273282978Spkelsey * netmap_finalize_obj_allocator(). The cluster start 1274282978Spkelsey * addresses are stored at multiples of p->_clusterentries 1275282978Spkelsey * in the lut. 1276282978Spkelsey */ 1277257529Sluigi for (i = 0; i < p->objtotal; i += p->_clustentries) { 1278341477Svmaffione contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 1279234228Sluigi } 1280341477Svmaffione nm_free_lut(p->lut, p->objtotal); 1281234228Sluigi } 1282241719Sluigi p->lut = NULL; 1283257529Sluigi p->objtotal = 0; 1284257529Sluigi p->memtotal = 0; 1285257529Sluigi p->numclusters = 0; 1286257529Sluigi p->objfree = 0; 1287341477Svmaffione p->alloc_done = 0; 1288234228Sluigi} 1289234228Sluigi 1290234228Sluigi/* 1291241719Sluigi * Free all resources related to an allocator. 1292241719Sluigi */ 1293241719Sluigistatic void 1294241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p) 1295241719Sluigi{ 1296241719Sluigi if (p == NULL) 1297241719Sluigi return; 1298241719Sluigi netmap_reset_obj_allocator(p); 1299241719Sluigi} 1300241719Sluigi 1301241719Sluigi/* 1302234228Sluigi * We receive a request for objtotal objects, of size objsize each. 1303234228Sluigi * Internally we may round up both numbers, as we allocate objects 1304234228Sluigi * in small clusters multiple of the page size. 1305257529Sluigi * We need to keep track of objtotal and clustentries, 1306234228Sluigi * as they are needed when freeing memory. 1307234228Sluigi * 1308234228Sluigi * XXX note -- userspace needs the buffers to be contiguous, 1309234228Sluigi * so we cannot afford gaps at the end of a cluster. 1310234228Sluigi */ 1311241719Sluigi 1312241719Sluigi 1313241719Sluigi/* call with NMA_LOCK held */ 1314241719Sluigistatic int 1315241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 1316234228Sluigi{ 1317257529Sluigi int i; 1318234228Sluigi u_int clustsize; /* the cluster size, multiple of page size */ 1319234228Sluigi u_int clustentries; /* how many objects per entry */ 1320234228Sluigi 1321257529Sluigi /* we store the current request, so we can 1322257529Sluigi * detect configuration changes later */ 1323257529Sluigi p->r_objtotal = objtotal; 1324257529Sluigi p->r_objsize = objsize; 1325257529Sluigi 1326270063Sluigi#define MAX_CLUSTSIZE (1<<22) // 4 MB 1327260368Sluigi#define LINE_ROUND NM_CACHE_ALIGN // 64 1328234228Sluigi if (objsize >= MAX_CLUSTSIZE) { 1329234228Sluigi /* we could do it but there is no point */ 1330342033Svmaffione nm_prerr("unsupported allocation for %d bytes", objsize); 1331257529Sluigi return EINVAL; 1332234228Sluigi } 1333234228Sluigi /* make sure objsize is a multiple of LINE_ROUND */ 1334234228Sluigi i = (objsize & (LINE_ROUND - 1)); 1335234228Sluigi if (i) { 1336342033Svmaffione nm_prinf("aligning object by %d bytes", LINE_ROUND - i); 1337234228Sluigi objsize += LINE_ROUND - i; 1338234228Sluigi } 1339241719Sluigi if (objsize < p->objminsize || objsize > p->objmaxsize) { 1340342033Svmaffione nm_prerr("requested objsize %d out of range [%d, %d]", 1341241719Sluigi objsize, p->objminsize, p->objmaxsize); 1342257529Sluigi return EINVAL; 1343241719Sluigi } 1344241719Sluigi if (objtotal < p->nummin || objtotal > p->nummax) { 1345342033Svmaffione nm_prerr("requested objtotal %d out of range [%d, %d]", 1346241719Sluigi objtotal, p->nummin, p->nummax); 1347257529Sluigi return EINVAL; 1348241719Sluigi } 1349234228Sluigi /* 1350234228Sluigi * Compute number of objects using a brute-force approach: 1351234228Sluigi * given a max cluster size, 1352234228Sluigi * we try to fill it with objects keeping track of the 1353234228Sluigi * wasted space to the next page boundary. 1354234228Sluigi */ 1355234228Sluigi for (clustentries = 0, i = 1;; i++) { 1356234228Sluigi u_int delta, used = i * objsize; 1357234228Sluigi if (used > MAX_CLUSTSIZE) 1358234228Sluigi break; 1359234228Sluigi delta = used % PAGE_SIZE; 1360234228Sluigi if (delta == 0) { // exact solution 1361234228Sluigi clustentries = i; 1362234228Sluigi break; 1363234228Sluigi } 1364234228Sluigi } 1365270063Sluigi /* exact solution not found */ 1366270063Sluigi if (clustentries == 0) { 1367342033Svmaffione nm_prerr("unsupported allocation for %d bytes", objsize); 1368270063Sluigi return EINVAL; 1369270063Sluigi } 1370270063Sluigi /* compute clustsize */ 1371234228Sluigi clustsize = clustentries * objsize; 1372342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1373342033Svmaffione nm_prinf("objsize %d clustsize %d objects %d", 1374245835Sluigi objsize, clustsize, clustentries); 1375234228Sluigi 1376234228Sluigi /* 1377234228Sluigi * The number of clusters is n = ceil(objtotal/clustentries) 1378234228Sluigi * objtotal' = n * clustentries 1379234228Sluigi */ 1380257529Sluigi p->_clustentries = clustentries; 1381234228Sluigi p->_clustsize = clustsize; 1382257529Sluigi p->_numclusters = (objtotal + clustentries - 1) / clustentries; 1383257529Sluigi 1384257529Sluigi /* actual values (may be larger than requested) */ 1385234228Sluigi p->_objsize = objsize; 1386257529Sluigi p->_objtotal = p->_numclusters * clustentries; 1387234228Sluigi 1388241719Sluigi return 0; 1389241719Sluigi} 1390241719Sluigi 1391241719Sluigi/* call with NMA_LOCK held */ 1392241719Sluigistatic int 1393241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p) 1394241719Sluigi{ 1395257529Sluigi int i; /* must be signed */ 1396257529Sluigi size_t n; 1397241719Sluigi 1398341477Svmaffione if (p->lut) { 1399341477Svmaffione /* if the lut is already there we assume that also all the 1400341477Svmaffione * clusters have already been allocated, possibily by somebody 1401341477Svmaffione * else (e.g., extmem). In the latter case, the alloc_done flag 1402341477Svmaffione * will remain at zero, so that we will not attempt to 1403341477Svmaffione * deallocate the clusters by ourselves in 1404341477Svmaffione * netmap_reset_obj_allocator. 1405341477Svmaffione */ 1406341477Svmaffione return 0; 1407341477Svmaffione } 1408341477Svmaffione 1409257529Sluigi /* optimistically assume we have enough memory */ 1410257529Sluigi p->numclusters = p->_numclusters; 1411257529Sluigi p->objtotal = p->_objtotal; 1412341477Svmaffione p->alloc_done = 1; 1413257529Sluigi 1414341477Svmaffione p->lut = nm_alloc_lut(p->objtotal); 1415234228Sluigi if (p->lut == NULL) { 1416342033Svmaffione nm_prerr("Unable to create lookup table for '%s'", p->name); 1417234228Sluigi goto clean; 1418234228Sluigi } 1419234228Sluigi 1420234228Sluigi /* 1421341477Svmaffione * Allocate clusters, init pointers 1422234228Sluigi */ 1423257529Sluigi 1424257529Sluigi n = p->_clustsize; 1425257529Sluigi for (i = 0; i < (int)p->objtotal;) { 1426257529Sluigi int lim = i + p->_clustentries; 1427234228Sluigi char *clust; 1428234228Sluigi 1429341477Svmaffione /* 1430341477Svmaffione * XXX Note, we only need contigmalloc() for buffers attached 1431341477Svmaffione * to native interfaces. In all other cases (nifp, netmap rings 1432341477Svmaffione * and even buffers for VALE ports or emulated interfaces) we 1433341477Svmaffione * can live with standard malloc, because the hardware will not 1434341477Svmaffione * access the pages directly. 1435341477Svmaffione */ 1436257529Sluigi clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1437257529Sluigi (size_t)0, -1UL, PAGE_SIZE, 0); 1438234228Sluigi if (clust == NULL) { 1439234228Sluigi /* 1440234228Sluigi * If we get here, there is a severe memory shortage, 1441234228Sluigi * so halve the allocated memory to reclaim some. 1442234228Sluigi */ 1443342033Svmaffione nm_prerr("Unable to create cluster at %d for '%s' allocator", 1444241719Sluigi i, p->name); 1445257529Sluigi if (i < 2) /* nothing to halve */ 1446257529Sluigi goto out; 1447234228Sluigi lim = i / 2; 1448241719Sluigi for (i--; i >= lim; i--) { 1449257529Sluigi if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1450234228Sluigi contigfree(p->lut[i].vaddr, 1451257529Sluigi n, M_NETMAP); 1452282978Spkelsey p->lut[i].vaddr = NULL; 1453234228Sluigi } 1454257529Sluigi out: 1455234228Sluigi p->objtotal = i; 1456257529Sluigi /* we may have stopped in the middle of a cluster */ 1457257529Sluigi p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1458234228Sluigi break; 1459234228Sluigi } 1460282978Spkelsey /* 1461341477Svmaffione * Set lut state for all buffers in the current cluster. 1462282978Spkelsey * 1463282978Spkelsey * [i, lim) is the set of buffer indexes that cover the 1464282978Spkelsey * current cluster. 1465282978Spkelsey * 1466282978Spkelsey * 'clust' is really the address of the current buffer in 1467282978Spkelsey * the current cluster as we index through it with a stride 1468282978Spkelsey * of p->_objsize. 1469282978Spkelsey */ 1470241719Sluigi for (; i < lim; i++, clust += p->_objsize) { 1471234228Sluigi p->lut[i].vaddr = clust; 1472341477Svmaffione#if !defined(linux) && !defined(_WIN32) 1473234228Sluigi p->lut[i].paddr = vtophys(clust); 1474341477Svmaffione#endif 1475234228Sluigi } 1476234228Sluigi } 1477257529Sluigi p->memtotal = p->numclusters * p->_clustsize; 1478245835Sluigi if (netmap_verbose) 1479342033Svmaffione nm_prinf("Pre-allocated %d clusters (%d/%dKB) for '%s'", 1480257529Sluigi p->numclusters, p->_clustsize >> 10, 1481257529Sluigi p->memtotal >> 10, p->name); 1482234228Sluigi 1483241719Sluigi return 0; 1484234228Sluigi 1485234228Sluigiclean: 1486241719Sluigi netmap_reset_obj_allocator(p); 1487241719Sluigi return ENOMEM; 1488234228Sluigi} 1489234228Sluigi 1490241719Sluigi/* call with lock held */ 1491234228Sluigistatic int 1492341477Svmaffionenetmap_mem_params_changed(struct netmap_obj_params* p) 1493234228Sluigi{ 1494341477Svmaffione int i, rv = 0; 1495234228Sluigi 1496241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1497341477Svmaffione if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { 1498341477Svmaffione p[i].last_size = p[i].size; 1499341477Svmaffione p[i].last_num = p[i].num; 1500341477Svmaffione rv = 1; 1501341477Svmaffione } 1502241719Sluigi } 1503341477Svmaffione return rv; 1504241719Sluigi} 1505234228Sluigi 1506257529Sluigistatic void 1507257529Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd) 1508257529Sluigi{ 1509257529Sluigi int i; 1510261909Sluigi 1511342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1512342033Svmaffione nm_prinf("resetting %p", nmd); 1513257529Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1514257529Sluigi netmap_reset_obj_allocator(&nmd->pools[i]); 1515257529Sluigi } 1516257529Sluigi nmd->flags &= ~NETMAP_MEM_FINALIZED; 1517257529Sluigi} 1518234228Sluigi 1519257529Sluigistatic int 1520270063Sluiginetmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 1521270063Sluigi{ 1522341477Svmaffione int i, lim = p->objtotal; 1523341477Svmaffione struct netmap_lut *lut = &na->na_lut; 1524270063Sluigi 1525341477Svmaffione if (na == NULL || na->pdev == NULL) 1526270063Sluigi return 0; 1527270063Sluigi 1528341477Svmaffione#if defined(__FreeBSD__) 1529341477Svmaffione /* On FreeBSD mapping and unmapping is performed by the txsync 1530341477Svmaffione * and rxsync routine, packet by packet. */ 1531270063Sluigi (void)i; 1532270063Sluigi (void)lim; 1533341477Svmaffione (void)lut; 1534341477Svmaffione#elif defined(_WIN32) 1535341477Svmaffione (void)i; 1536341477Svmaffione (void)lim; 1537341477Svmaffione (void)lut; 1538342033Svmaffione nm_prerr("unsupported on Windows"); 1539270063Sluigi#else /* linux */ 1540341477Svmaffione ND("unmapping and freeing plut for %s", na->name); 1541341477Svmaffione if (lut->plut == NULL) 1542341477Svmaffione return 0; 1543341477Svmaffione for (i = 0; i < lim; i += p->_clustentries) { 1544341477Svmaffione if (lut->plut[i].paddr) 1545341477Svmaffione netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); 1546270063Sluigi } 1547341477Svmaffione nm_free_plut(lut->plut); 1548341477Svmaffione lut->plut = NULL; 1549270063Sluigi#endif /* linux */ 1550270063Sluigi 1551270063Sluigi return 0; 1552270063Sluigi} 1553270063Sluigi 1554270063Sluigistatic int 1555270063Sluiginetmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 1556270063Sluigi{ 1557341477Svmaffione int error = 0; 1558341477Svmaffione int i, lim = p->objtotal; 1559341477Svmaffione struct netmap_lut *lut = &na->na_lut; 1560270063Sluigi 1561270063Sluigi if (na->pdev == NULL) 1562270063Sluigi return 0; 1563270063Sluigi 1564341477Svmaffione#if defined(__FreeBSD__) 1565341477Svmaffione /* On FreeBSD mapping and unmapping is performed by the txsync 1566341477Svmaffione * and rxsync routine, packet by packet. */ 1567341477Svmaffione (void)i; 1568341477Svmaffione (void)lim; 1569341477Svmaffione (void)lut; 1570341477Svmaffione#elif defined(_WIN32) 1571341477Svmaffione (void)i; 1572341477Svmaffione (void)lim; 1573341477Svmaffione (void)lut; 1574342033Svmaffione nm_prerr("unsupported on Windows"); 1575341477Svmaffione#else /* linux */ 1576341477Svmaffione 1577341477Svmaffione if (lut->plut != NULL) { 1578341477Svmaffione ND("plut already allocated for %s", na->name); 1579341477Svmaffione return 0; 1580270063Sluigi } 1581341477Svmaffione 1582341477Svmaffione ND("allocating physical lut for %s", na->name); 1583341477Svmaffione lut->plut = nm_alloc_plut(lim); 1584341477Svmaffione if (lut->plut == NULL) { 1585342033Svmaffione nm_prerr("Failed to allocate physical lut for %s", na->name); 1586341477Svmaffione return ENOMEM; 1587341477Svmaffione } 1588341477Svmaffione 1589341477Svmaffione for (i = 0; i < lim; i += p->_clustentries) { 1590341477Svmaffione lut->plut[i].paddr = 0; 1591341477Svmaffione } 1592341477Svmaffione 1593341477Svmaffione for (i = 0; i < lim; i += p->_clustentries) { 1594341477Svmaffione int j; 1595341477Svmaffione 1596341477Svmaffione if (p->lut[i].vaddr == NULL) 1597341477Svmaffione continue; 1598341477Svmaffione 1599341477Svmaffione error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, 1600341477Svmaffione p->lut[i].vaddr, p->_clustsize); 1601341477Svmaffione if (error) { 1602342033Svmaffione nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name); 1603341477Svmaffione break; 1604341477Svmaffione } 1605341477Svmaffione 1606341477Svmaffione for (j = 1; j < p->_clustentries; j++) { 1607341477Svmaffione lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; 1608341477Svmaffione } 1609341477Svmaffione } 1610341477Svmaffione 1611341477Svmaffione if (error) 1612341477Svmaffione netmap_mem_unmap(p, na); 1613341477Svmaffione 1614270063Sluigi#endif /* linux */ 1615270063Sluigi 1616341477Svmaffione return error; 1617270063Sluigi} 1618270063Sluigi 1619270063Sluigistatic int 1620257529Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd) 1621257529Sluigi{ 1622257529Sluigi int i; 1623257529Sluigi if (nmd->flags & NETMAP_MEM_FINALIZED) 1624257529Sluigi return 0; 1625257529Sluigi nmd->lasterr = 0; 1626257529Sluigi nmd->nm_totalsize = 0; 1627257529Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1628257529Sluigi nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1629257529Sluigi if (nmd->lasterr) 1630257529Sluigi goto error; 1631257529Sluigi nmd->nm_totalsize += nmd->pools[i].memtotal; 1632257529Sluigi } 1633341477Svmaffione nmd->lasterr = netmap_mem_init_bitmaps(nmd); 1634341477Svmaffione if (nmd->lasterr) 1635341477Svmaffione goto error; 1636341477Svmaffione 1637257529Sluigi nmd->flags |= NETMAP_MEM_FINALIZED; 1638257529Sluigi 1639261909Sluigi if (netmap_verbose) 1640342033Svmaffione nm_prinf("interfaces %d KB, rings %d KB, buffers %d MB", 1641261909Sluigi nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1642261909Sluigi nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1643261909Sluigi nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1644257529Sluigi 1645261909Sluigi if (netmap_verbose) 1646342033Svmaffione nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1647257529Sluigi 1648257529Sluigi 1649257529Sluigi return 0; 1650257529Sluigierror: 1651257529Sluigi netmap_mem_reset_all(nmd); 1652257529Sluigi return nmd->lasterr; 1653257529Sluigi} 1654257529Sluigi 1655261909Sluigi/* 1656261909Sluigi * allocator for private memory 1657261909Sluigi */ 1658341477Svmaffionestatic void * 1659341477Svmaffione_netmap_mem_private_new(size_t size, struct netmap_obj_params *p, 1660341477Svmaffione struct netmap_mem_ops *ops, int *perr) 1661257529Sluigi{ 1662257529Sluigi struct netmap_mem_d *d = NULL; 1663341477Svmaffione int i, err = 0; 1664257529Sluigi 1665341477Svmaffione d = nm_os_malloc(size); 1666261909Sluigi if (d == NULL) { 1667261909Sluigi err = ENOMEM; 1668261909Sluigi goto error; 1669261909Sluigi } 1670257529Sluigi 1671257529Sluigi *d = nm_blueprint; 1672341477Svmaffione d->ops = ops; 1673257529Sluigi 1674261909Sluigi err = nm_mem_assign_id(d); 1675261909Sluigi if (err) 1676341477Svmaffione goto error_free; 1677341477Svmaffione snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); 1678261909Sluigi 1679341477Svmaffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 1680341477Svmaffione snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1681341477Svmaffione nm_blueprint.pools[i].name, 1682341477Svmaffione d->name); 1683341477Svmaffione d->params[i].num = p[i].num; 1684341477Svmaffione d->params[i].size = p[i].size; 1685341477Svmaffione } 1686341477Svmaffione 1687341477Svmaffione NMA_LOCK_INIT(d); 1688341477Svmaffione 1689341477Svmaffione err = netmap_mem_config(d); 1690341477Svmaffione if (err) 1691341477Svmaffione goto error_rel_id; 1692341477Svmaffione 1693341477Svmaffione d->flags &= ~NETMAP_MEM_FINALIZED; 1694341477Svmaffione 1695341477Svmaffione return d; 1696341477Svmaffione 1697341477Svmaffioneerror_rel_id: 1698341477Svmaffione NMA_LOCK_DESTROY(d); 1699341477Svmaffione nm_mem_release_id(d); 1700341477Svmaffioneerror_free: 1701341477Svmaffione nm_os_free(d); 1702341477Svmaffioneerror: 1703341477Svmaffione if (perr) 1704341477Svmaffione *perr = err; 1705341477Svmaffione return NULL; 1706341477Svmaffione} 1707341477Svmaffione 1708341477Svmaffionestruct netmap_mem_d * 1709341477Svmaffionenetmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, 1710341477Svmaffione u_int extra_bufs, u_int npipes, int *perr) 1711341477Svmaffione{ 1712341477Svmaffione struct netmap_mem_d *d = NULL; 1713341477Svmaffione struct netmap_obj_params p[NETMAP_POOLS_NR]; 1714341477Svmaffione int i; 1715341477Svmaffione u_int v, maxd; 1716261909Sluigi /* account for the fake host rings */ 1717257529Sluigi txr++; 1718257529Sluigi rxr++; 1719261909Sluigi 1720261909Sluigi /* copy the min values */ 1721261909Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1722261909Sluigi p[i] = netmap_min_priv_params[i]; 1723261909Sluigi } 1724261909Sluigi 1725261909Sluigi /* possibly increase them to fit user request */ 1726261909Sluigi v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1727261909Sluigi if (p[NETMAP_IF_POOL].size < v) 1728261909Sluigi p[NETMAP_IF_POOL].size = v; 1729261909Sluigi v = 2 + 4 * npipes; 1730261909Sluigi if (p[NETMAP_IF_POOL].num < v) 1731261909Sluigi p[NETMAP_IF_POOL].num = v; 1732257529Sluigi maxd = (txd > rxd) ? txd : rxd; 1733261909Sluigi v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1734261909Sluigi if (p[NETMAP_RING_POOL].size < v) 1735261909Sluigi p[NETMAP_RING_POOL].size = v; 1736261909Sluigi /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1737341477Svmaffione * and two rx rings (again, 1 normal and 1 fake host) 1738341477Svmaffione */ 1739261909Sluigi v = txr + rxr + 8 * npipes; 1740261909Sluigi if (p[NETMAP_RING_POOL].num < v) 1741261909Sluigi p[NETMAP_RING_POOL].num = v; 1742261909Sluigi /* for each pipe we only need the buffers for the 4 "real" rings. 1743341477Svmaffione * On the other end, the pipe ring dimension may be different from 1744341477Svmaffione * the parent port ring dimension. As a compromise, we allocate twice the 1745341477Svmaffione * space actually needed if the pipe rings were the same size as the parent rings 1746341477Svmaffione */ 1747261909Sluigi v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1748261909Sluigi /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1749261909Sluigi if (p[NETMAP_BUF_POOL].num < v) 1750261909Sluigi p[NETMAP_BUF_POOL].num = v; 1751257529Sluigi 1752261909Sluigi if (netmap_verbose) 1753342033Svmaffione nm_prinf("req if %d*%d ring %d*%d buf %d*%d", 1754257529Sluigi p[NETMAP_IF_POOL].num, 1755257529Sluigi p[NETMAP_IF_POOL].size, 1756257529Sluigi p[NETMAP_RING_POOL].num, 1757257529Sluigi p[NETMAP_RING_POOL].size, 1758257529Sluigi p[NETMAP_BUF_POOL].num, 1759257529Sluigi p[NETMAP_BUF_POOL].size); 1760257529Sluigi 1761341477Svmaffione d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); 1762257529Sluigi 1763257529Sluigi return d; 1764257529Sluigi} 1765257529Sluigi 1766257529Sluigi 1767241719Sluigi/* call with lock held */ 1768241719Sluigistatic int 1769341477Svmaffionenetmap_mem2_config(struct netmap_mem_d *nmd) 1770241719Sluigi{ 1771241719Sluigi int i; 1772234228Sluigi 1773341477Svmaffione if (!netmap_mem_params_changed(nmd->params)) 1774241719Sluigi goto out; 1775234228Sluigi 1776285349Sluigi ND("reconfiguring"); 1777241719Sluigi 1778257529Sluigi if (nmd->flags & NETMAP_MEM_FINALIZED) { 1779241719Sluigi /* reset previous allocation */ 1780241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1781257529Sluigi netmap_reset_obj_allocator(&nmd->pools[i]); 1782250184Sluigi } 1783257529Sluigi nmd->flags &= ~NETMAP_MEM_FINALIZED; 1784259412Sluigi } 1785241719Sluigi 1786241719Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1787257529Sluigi nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1788341477Svmaffione nmd->params[i].num, nmd->params[i].size); 1789257529Sluigi if (nmd->lasterr) 1790241719Sluigi goto out; 1791241719Sluigi } 1792241719Sluigi 1793241719Sluigiout: 1794241719Sluigi 1795257529Sluigi return nmd->lasterr; 1796241719Sluigi} 1797241719Sluigi 1798241719Sluigistatic int 1799341477Svmaffionenetmap_mem2_finalize(struct netmap_mem_d *nmd) 1800241719Sluigi{ 1801341477Svmaffione if (nmd->flags & NETMAP_MEM_FINALIZED) 1802241719Sluigi goto out; 1803241719Sluigi 1804257529Sluigi if (netmap_mem_finalize_all(nmd)) 1805257529Sluigi goto out; 1806241719Sluigi 1807257529Sluigi nmd->lasterr = 0; 1808241719Sluigi 1809241719Sluigiout: 1810341477Svmaffione return nmd->lasterr; 1811234228Sluigi} 1812234228Sluigi 1813285349Sluigistatic void 1814341477Svmaffionenetmap_mem2_delete(struct netmap_mem_d *nmd) 1815285349Sluigi{ 1816285349Sluigi int i; 1817285349Sluigi 1818285349Sluigi for (i = 0; i < NETMAP_POOLS_NR; i++) { 1819341477Svmaffione netmap_destroy_obj_allocator(&nmd->pools[i]); 1820285349Sluigi } 1821285349Sluigi 1822341477Svmaffione NMA_LOCK_DESTROY(nmd); 1823341477Svmaffione if (nmd != &nm_mem) 1824341477Svmaffione nm_os_free(nmd); 1825285349Sluigi} 1826285349Sluigi 1827341477Svmaffione#ifdef WITH_EXTMEM 1828341477Svmaffione/* doubly linekd list of all existing external allocators */ 1829341477Svmaffionestatic struct netmap_mem_ext *netmap_mem_ext_list = NULL; 1830341477SvmaffioneNM_MTX_T nm_mem_ext_list_lock; 1831341477Svmaffione#endif /* WITH_EXTMEM */ 1832341477Svmaffione 1833257529Sluigiint 1834257529Sluiginetmap_mem_init(void) 1835241719Sluigi{ 1836341477Svmaffione NM_MTX_INIT(nm_mem_list_lock); 1837257529Sluigi NMA_LOCK_INIT(&nm_mem); 1838285349Sluigi netmap_mem_get(&nm_mem); 1839341477Svmaffione#ifdef WITH_EXTMEM 1840341477Svmaffione NM_MTX_INIT(nm_mem_ext_list_lock); 1841341477Svmaffione#endif /* WITH_EXTMEM */ 1842241719Sluigi return (0); 1843241719Sluigi} 1844234228Sluigi 1845257529Sluigivoid 1846257529Sluiginetmap_mem_fini(void) 1847234228Sluigi{ 1848285349Sluigi netmap_mem_put(&nm_mem); 1849234228Sluigi} 1850234228Sluigi 1851241719Sluigistatic void 1852241719Sluiginetmap_free_rings(struct netmap_adapter *na) 1853241719Sluigi{ 1854285349Sluigi enum txrx t; 1855285349Sluigi 1856285349Sluigi for_rx_tx(t) { 1857285349Sluigi u_int i; 1858341477Svmaffione for (i = 0; i < netmap_all_rings(na, t); i++) { 1859341477Svmaffione struct netmap_kring *kring = NMR(na, t)[i]; 1860285349Sluigi struct netmap_ring *ring = kring->ring; 1861285349Sluigi 1862341477Svmaffione if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { 1863342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1864342033Svmaffione nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)", 1865341477Svmaffione kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1866285349Sluigi continue; 1867341477Svmaffione } 1868342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1869342033Svmaffione nm_prinf("deleting ring %s", kring->name); 1870341477Svmaffione if (!(kring->nr_kflags & NKR_FAKERING)) { 1871341477Svmaffione ND("freeing bufs for %s", kring->name); 1872341477Svmaffione netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1873341477Svmaffione } else { 1874341477Svmaffione ND("NOT freeing bufs for %s", kring->name); 1875341477Svmaffione } 1876285349Sluigi netmap_ring_free(na->nm_mem, ring); 1877285349Sluigi kring->ring = NULL; 1878285349Sluigi } 1879241719Sluigi } 1880241719Sluigi} 1881234228Sluigi 1882259412Sluigi/* call with NMA_LOCK held * 1883257529Sluigi * 1884259412Sluigi * Allocate netmap rings and buffers for this card 1885259412Sluigi * The rings are contiguous, but have variable size. 1886261909Sluigi * The kring array must follow the layout described 1887261909Sluigi * in netmap_krings_create(). 1888245835Sluigi */ 1889285349Sluigistatic int 1890285349Sluiginetmap_mem2_rings_create(struct netmap_adapter *na) 1891234228Sluigi{ 1892285349Sluigi enum txrx t; 1893234228Sluigi 1894285349Sluigi for_rx_tx(t) { 1895285349Sluigi u_int i; 1896234228Sluigi 1897341477Svmaffione for (i = 0; i < netmap_all_rings(na, t); i++) { 1898341477Svmaffione struct netmap_kring *kring = NMR(na, t)[i]; 1899285349Sluigi struct netmap_ring *ring = kring->ring; 1900285349Sluigi u_int len, ndesc; 1901285349Sluigi 1902341477Svmaffione if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { 1903341477Svmaffione /* uneeded, or already created by somebody else */ 1904342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1905342033Svmaffione nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)", 1906341477Svmaffione kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1907341477Svmaffione continue; 1908285349Sluigi } 1909342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1910342033Svmaffione nm_prinf("creating %s", kring->name); 1911285349Sluigi ndesc = kring->nkr_num_slots; 1912285349Sluigi len = sizeof(struct netmap_ring) + 1913285349Sluigi ndesc * sizeof(struct netmap_slot); 1914285349Sluigi ring = netmap_ring_malloc(na->nm_mem, len); 1915285349Sluigi if (ring == NULL) { 1916342033Svmaffione nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t)); 1917261909Sluigi goto cleanup; 1918261909Sluigi } 1919285349Sluigi ND("txring at %p", ring); 1920285349Sluigi kring->ring = ring; 1921285349Sluigi *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 1922285349Sluigi *(int64_t *)(uintptr_t)&ring->buf_ofs = 1923285349Sluigi (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1924285349Sluigi na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1925285349Sluigi netmap_ring_offset(na->nm_mem, ring); 1926234228Sluigi 1927285349Sluigi /* copy values from kring */ 1928285349Sluigi ring->head = kring->rhead; 1929285349Sluigi ring->cur = kring->rcur; 1930285349Sluigi ring->tail = kring->rtail; 1931341477Svmaffione *(uint32_t *)(uintptr_t)&ring->nr_buf_size = 1932285349Sluigi netmap_mem_bufsize(na->nm_mem); 1933285349Sluigi ND("%s h %d c %d t %d", kring->name, 1934285349Sluigi ring->head, ring->cur, ring->tail); 1935341477Svmaffione ND("initializing slots for %s_ring", nm_txrx2str(t)); 1936341477Svmaffione if (!(kring->nr_kflags & NKR_FAKERING)) { 1937285349Sluigi /* this is a real ring */ 1938342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1939342033Svmaffione nm_prinf("allocating buffers for %s", kring->name); 1940285349Sluigi if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1941342033Svmaffione nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1942285349Sluigi goto cleanup; 1943285349Sluigi } 1944285349Sluigi } else { 1945285349Sluigi /* this is a fake ring, set all indices to 0 */ 1946342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 1947342033Svmaffione nm_prinf("NOT allocating buffers for %s", kring->name); 1948285349Sluigi netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1949261909Sluigi } 1950285349Sluigi /* ring info */ 1951285349Sluigi *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1952285349Sluigi *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1953241719Sluigi } 1954234228Sluigi } 1955259412Sluigi 1956259412Sluigi return 0; 1957259412Sluigi 1958259412Sluigicleanup: 1959341477Svmaffione /* we cannot actually cleanup here, since we don't own kring->users 1960341477Svmaffione * and kring->nr_klags & NKR_NEEDRING. The caller must decrement 1961341477Svmaffione * the first or zero-out the second, then call netmap_free_rings() 1962341477Svmaffione * to do the cleanup 1963341477Svmaffione */ 1964259412Sluigi 1965259412Sluigi return ENOMEM; 1966259412Sluigi} 1967259412Sluigi 1968285349Sluigistatic void 1969285349Sluiginetmap_mem2_rings_delete(struct netmap_adapter *na) 1970259412Sluigi{ 1971259412Sluigi /* last instance, release bufs and rings */ 1972259412Sluigi netmap_free_rings(na); 1973259412Sluigi} 1974259412Sluigi 1975259412Sluigi 1976259412Sluigi/* call with NMA_LOCK held */ 1977259412Sluigi/* 1978259412Sluigi * Allocate the per-fd structure netmap_if. 1979259412Sluigi * 1980259412Sluigi * We assume that the configuration stored in na 1981259412Sluigi * (number of tx/rx rings and descs) does not change while 1982259412Sluigi * the interface is in netmap mode. 1983259412Sluigi */ 1984285349Sluigistatic struct netmap_if * 1985341477Svmaffionenetmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 1986259412Sluigi{ 1987259412Sluigi struct netmap_if *nifp; 1988259412Sluigi ssize_t base; /* handy for relative offsets between rings and nifp */ 1989285349Sluigi u_int i, len, n[NR_TXRX], ntot; 1990285349Sluigi enum txrx t; 1991259412Sluigi 1992285349Sluigi ntot = 0; 1993285349Sluigi for_rx_tx(t) { 1994285349Sluigi /* account for the (eventually fake) host rings */ 1995341477Svmaffione n[t] = netmap_all_rings(na, t); 1996285349Sluigi ntot += n[t]; 1997285349Sluigi } 1998234228Sluigi /* 1999259412Sluigi * the descriptor is followed inline by an array of offsets 2000259412Sluigi * to the tx and rx rings in the shared memory region. 2001259412Sluigi */ 2002259412Sluigi 2003285349Sluigi len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 2004259412Sluigi nifp = netmap_if_malloc(na->nm_mem, len); 2005259412Sluigi if (nifp == NULL) { 2006259412Sluigi NMA_UNLOCK(na->nm_mem); 2007259412Sluigi return NULL; 2008259412Sluigi } 2009259412Sluigi 2010259412Sluigi /* initialize base fields -- override const */ 2011259412Sluigi *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 2012259412Sluigi *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 2013342033Svmaffione strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name)); 2014259412Sluigi 2015259412Sluigi /* 2016234228Sluigi * fill the slots for the rx and tx rings. They contain the offset 2017234228Sluigi * between the ring and nifp, so the information is usable in 2018234228Sluigi * userspace to reach the ring from the nifp. 2019234228Sluigi */ 2020257529Sluigi base = netmap_if_offset(na->nm_mem, nifp); 2021285349Sluigi for (i = 0; i < n[NR_TX]; i++) { 2022341477Svmaffione /* XXX instead of ofs == 0 maybe use the offset of an error 2023341477Svmaffione * ring, like we do for buffers? */ 2024341477Svmaffione ssize_t ofs = 0; 2025341477Svmaffione 2026341477Svmaffione if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] 2027341477Svmaffione && i < priv->np_qlast[NR_TX]) { 2028341477Svmaffione ofs = netmap_ring_offset(na->nm_mem, 2029341477Svmaffione na->tx_rings[i]->ring) - base; 2030341477Svmaffione } 2031341477Svmaffione *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; 2032234228Sluigi } 2033285349Sluigi for (i = 0; i < n[NR_RX]; i++) { 2034341477Svmaffione /* XXX instead of ofs == 0 maybe use the offset of an error 2035341477Svmaffione * ring, like we do for buffers? */ 2036341477Svmaffione ssize_t ofs = 0; 2037341477Svmaffione 2038341477Svmaffione if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] 2039341477Svmaffione && i < priv->np_qlast[NR_RX]) { 2040341477Svmaffione ofs = netmap_ring_offset(na->nm_mem, 2041341477Svmaffione na->rx_rings[i]->ring) - base; 2042341477Svmaffione } 2043341477Svmaffione *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; 2044234228Sluigi } 2045257529Sluigi 2046234228Sluigi return (nifp); 2047234228Sluigi} 2048234228Sluigi 2049285349Sluigistatic void 2050285349Sluiginetmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2051257529Sluigi{ 2052257529Sluigi if (nifp == NULL) 2053257529Sluigi /* nothing to do */ 2054257529Sluigi return; 2055261909Sluigi if (nifp->ni_bufs_head) 2056261909Sluigi netmap_extra_free(na, nifp->ni_bufs_head); 2057257529Sluigi netmap_if_free(na->nm_mem, nifp); 2058257529Sluigi} 2059257529Sluigi 2060234228Sluigistatic void 2061341477Svmaffionenetmap_mem2_deref(struct netmap_mem_d *nmd) 2062234228Sluigi{ 2063257529Sluigi 2064342033Svmaffione if (netmap_debug & NM_DEBUG_MEM) 2065342033Svmaffione nm_prinf("active = %d", nmd->active); 2066257529Sluigi 2067234228Sluigi} 2068257529Sluigi 2069285349Sluigistruct netmap_mem_ops netmap_mem_global_ops = { 2070285349Sluigi .nmd_get_lut = netmap_mem2_get_lut, 2071285349Sluigi .nmd_get_info = netmap_mem2_get_info, 2072285349Sluigi .nmd_ofstophys = netmap_mem2_ofstophys, 2073341477Svmaffione .nmd_config = netmap_mem2_config, 2074341477Svmaffione .nmd_finalize = netmap_mem2_finalize, 2075341477Svmaffione .nmd_deref = netmap_mem2_deref, 2076341477Svmaffione .nmd_delete = netmap_mem2_delete, 2077285349Sluigi .nmd_if_offset = netmap_mem2_if_offset, 2078285349Sluigi .nmd_if_new = netmap_mem2_if_new, 2079285349Sluigi .nmd_if_delete = netmap_mem2_if_delete, 2080285349Sluigi .nmd_rings_create = netmap_mem2_rings_create, 2081285349Sluigi .nmd_rings_delete = netmap_mem2_rings_delete 2082285349Sluigi}; 2083341477Svmaffione 2084341477Svmaffioneint 2085341477Svmaffionenetmap_mem_pools_info_get(struct nmreq_pools_info *req, 2086341477Svmaffione struct netmap_mem_d *nmd) 2087341477Svmaffione{ 2088341477Svmaffione int ret; 2089341477Svmaffione 2090341477Svmaffione ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, 2091341477Svmaffione &req->nr_mem_id); 2092341477Svmaffione if (ret) { 2093341477Svmaffione return ret; 2094341477Svmaffione } 2095341477Svmaffione 2096341477Svmaffione NMA_LOCK(nmd); 2097341477Svmaffione req->nr_if_pool_offset = 0; 2098341477Svmaffione req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; 2099341477Svmaffione req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; 2100341477Svmaffione 2101341477Svmaffione req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; 2102341477Svmaffione req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; 2103341477Svmaffione req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; 2104341477Svmaffione 2105341477Svmaffione req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + 2106341477Svmaffione nmd->pools[NETMAP_RING_POOL].memtotal; 2107341477Svmaffione req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 2108341477Svmaffione req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2109341477Svmaffione NMA_UNLOCK(nmd); 2110341477Svmaffione 2111341477Svmaffione return 0; 2112341477Svmaffione} 2113341477Svmaffione 2114341477Svmaffione#ifdef WITH_EXTMEM 2115341477Svmaffionestruct netmap_mem_ext { 2116341477Svmaffione struct netmap_mem_d up; 2117341477Svmaffione 2118341477Svmaffione struct nm_os_extmem *os; 2119341477Svmaffione struct netmap_mem_ext *next, *prev; 2120341477Svmaffione}; 2121341477Svmaffione 2122341477Svmaffione/* call with nm_mem_list_lock held */ 2123341477Svmaffionestatic void 2124341477Svmaffionenetmap_mem_ext_register(struct netmap_mem_ext *e) 2125341477Svmaffione{ 2126341477Svmaffione NM_MTX_LOCK(nm_mem_ext_list_lock); 2127341477Svmaffione if (netmap_mem_ext_list) 2128341477Svmaffione netmap_mem_ext_list->prev = e; 2129341477Svmaffione e->next = netmap_mem_ext_list; 2130341477Svmaffione netmap_mem_ext_list = e; 2131341477Svmaffione e->prev = NULL; 2132341477Svmaffione NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2133341477Svmaffione} 2134341477Svmaffione 2135341477Svmaffione/* call with nm_mem_list_lock held */ 2136341477Svmaffionestatic void 2137341477Svmaffionenetmap_mem_ext_unregister(struct netmap_mem_ext *e) 2138341477Svmaffione{ 2139341477Svmaffione if (e->prev) 2140341477Svmaffione e->prev->next = e->next; 2141341477Svmaffione else 2142341477Svmaffione netmap_mem_ext_list = e->next; 2143341477Svmaffione if (e->next) 2144341477Svmaffione e->next->prev = e->prev; 2145341477Svmaffione e->prev = e->next = NULL; 2146341477Svmaffione} 2147341477Svmaffione 2148341477Svmaffionestatic struct netmap_mem_ext * 2149341477Svmaffionenetmap_mem_ext_search(struct nm_os_extmem *os) 2150341477Svmaffione{ 2151341477Svmaffione struct netmap_mem_ext *e; 2152341477Svmaffione 2153341477Svmaffione NM_MTX_LOCK(nm_mem_ext_list_lock); 2154341477Svmaffione for (e = netmap_mem_ext_list; e; e = e->next) { 2155341477Svmaffione if (nm_os_extmem_isequal(e->os, os)) { 2156341477Svmaffione netmap_mem_get(&e->up); 2157341477Svmaffione break; 2158341477Svmaffione } 2159341477Svmaffione } 2160341477Svmaffione NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2161341477Svmaffione return e; 2162341477Svmaffione} 2163341477Svmaffione 2164341477Svmaffione 2165341477Svmaffionestatic void 2166341477Svmaffionenetmap_mem_ext_delete(struct netmap_mem_d *d) 2167341477Svmaffione{ 2168341477Svmaffione int i; 2169341477Svmaffione struct netmap_mem_ext *e = 2170341477Svmaffione (struct netmap_mem_ext *)d; 2171341477Svmaffione 2172341477Svmaffione netmap_mem_ext_unregister(e); 2173341477Svmaffione 2174341477Svmaffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 2175341477Svmaffione struct netmap_obj_pool *p = &d->pools[i]; 2176341477Svmaffione 2177341477Svmaffione if (p->lut) { 2178341477Svmaffione nm_free_lut(p->lut, p->objtotal); 2179341477Svmaffione p->lut = NULL; 2180341477Svmaffione } 2181341477Svmaffione } 2182341477Svmaffione if (e->os) 2183341477Svmaffione nm_os_extmem_delete(e->os); 2184341477Svmaffione netmap_mem2_delete(d); 2185341477Svmaffione} 2186341477Svmaffione 2187341477Svmaffionestatic int 2188341477Svmaffionenetmap_mem_ext_config(struct netmap_mem_d *nmd) 2189341477Svmaffione{ 2190341477Svmaffione return 0; 2191341477Svmaffione} 2192341477Svmaffione 2193341477Svmaffionestruct netmap_mem_ops netmap_mem_ext_ops = { 2194285349Sluigi .nmd_get_lut = netmap_mem2_get_lut, 2195285349Sluigi .nmd_get_info = netmap_mem2_get_info, 2196285349Sluigi .nmd_ofstophys = netmap_mem2_ofstophys, 2197341477Svmaffione .nmd_config = netmap_mem_ext_config, 2198341477Svmaffione .nmd_finalize = netmap_mem2_finalize, 2199341477Svmaffione .nmd_deref = netmap_mem2_deref, 2200341477Svmaffione .nmd_delete = netmap_mem_ext_delete, 2201285349Sluigi .nmd_if_offset = netmap_mem2_if_offset, 2202285349Sluigi .nmd_if_new = netmap_mem2_if_new, 2203285349Sluigi .nmd_if_delete = netmap_mem2_if_delete, 2204285349Sluigi .nmd_rings_create = netmap_mem2_rings_create, 2205285349Sluigi .nmd_rings_delete = netmap_mem2_rings_delete 2206285349Sluigi}; 2207341477Svmaffione 2208341477Svmaffionestruct netmap_mem_d * 2209341477Svmaffionenetmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) 2210341477Svmaffione{ 2211341477Svmaffione int error = 0; 2212341477Svmaffione int i, j; 2213341477Svmaffione struct netmap_mem_ext *nme; 2214341477Svmaffione char *clust; 2215341477Svmaffione size_t off; 2216341477Svmaffione struct nm_os_extmem *os = NULL; 2217341477Svmaffione int nr_pages; 2218341477Svmaffione 2219341477Svmaffione // XXX sanity checks 2220341477Svmaffione if (pi->nr_if_pool_objtotal == 0) 2221341477Svmaffione pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; 2222341477Svmaffione if (pi->nr_if_pool_objsize == 0) 2223341477Svmaffione pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; 2224341477Svmaffione if (pi->nr_ring_pool_objtotal == 0) 2225341477Svmaffione pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; 2226341477Svmaffione if (pi->nr_ring_pool_objsize == 0) 2227341477Svmaffione pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; 2228341477Svmaffione if (pi->nr_buf_pool_objtotal == 0) 2229341477Svmaffione pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; 2230341477Svmaffione if (pi->nr_buf_pool_objsize == 0) 2231341477Svmaffione pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; 2232342033Svmaffione if (netmap_verbose & NM_DEBUG_MEM) 2233342033Svmaffione nm_prinf("if %d %d ring %d %d buf %d %d", 2234341477Svmaffione pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, 2235341477Svmaffione pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, 2236341477Svmaffione pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); 2237341477Svmaffione 2238341477Svmaffione os = nm_os_extmem_create(usrptr, pi, &error); 2239341477Svmaffione if (os == NULL) { 2240342033Svmaffione nm_prerr("os extmem creation failed"); 2241341477Svmaffione goto out; 2242341477Svmaffione } 2243341477Svmaffione 2244341477Svmaffione nme = netmap_mem_ext_search(os); 2245341477Svmaffione if (nme) { 2246341477Svmaffione nm_os_extmem_delete(os); 2247341477Svmaffione return &nme->up; 2248341477Svmaffione } 2249342033Svmaffione if (netmap_verbose & NM_DEBUG_MEM) 2250342033Svmaffione nm_prinf("not found, creating new"); 2251341477Svmaffione 2252341477Svmaffione nme = _netmap_mem_private_new(sizeof(*nme), 2253341477Svmaffione (struct netmap_obj_params[]){ 2254341477Svmaffione { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, 2255341477Svmaffione { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, 2256341477Svmaffione { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, 2257341477Svmaffione &netmap_mem_ext_ops, 2258341477Svmaffione &error); 2259341477Svmaffione if (nme == NULL) 2260341477Svmaffione goto out_unmap; 2261341477Svmaffione 2262341477Svmaffione nr_pages = nm_os_extmem_nr_pages(os); 2263341477Svmaffione 2264341477Svmaffione /* from now on pages will be released by nme destructor; 2265341477Svmaffione * we let res = 0 to prevent release in out_unmap below 2266341477Svmaffione */ 2267341477Svmaffione nme->os = os; 2268341477Svmaffione os = NULL; /* pass ownership */ 2269341477Svmaffione 2270341477Svmaffione clust = nm_os_extmem_nextpage(nme->os); 2271341477Svmaffione off = 0; 2272341477Svmaffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 2273341477Svmaffione struct netmap_obj_pool *p = &nme->up.pools[i]; 2274341477Svmaffione struct netmap_obj_params *o = &nme->up.params[i]; 2275341477Svmaffione 2276341477Svmaffione p->_objsize = o->size; 2277341477Svmaffione p->_clustsize = o->size; 2278341477Svmaffione p->_clustentries = 1; 2279341477Svmaffione 2280341477Svmaffione p->lut = nm_alloc_lut(o->num); 2281341477Svmaffione if (p->lut == NULL) { 2282341477Svmaffione error = ENOMEM; 2283341477Svmaffione goto out_delete; 2284341477Svmaffione } 2285341477Svmaffione 2286341477Svmaffione p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); 2287341477Svmaffione p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); 2288341477Svmaffione if (p->invalid_bitmap == NULL) { 2289341477Svmaffione error = ENOMEM; 2290341477Svmaffione goto out_delete; 2291341477Svmaffione } 2292341477Svmaffione 2293341477Svmaffione if (nr_pages == 0) { 2294341477Svmaffione p->objtotal = 0; 2295341477Svmaffione p->memtotal = 0; 2296341477Svmaffione p->objfree = 0; 2297341477Svmaffione continue; 2298341477Svmaffione } 2299341477Svmaffione 2300341477Svmaffione for (j = 0; j < o->num && nr_pages > 0; j++) { 2301341477Svmaffione size_t noff; 2302341477Svmaffione 2303341477Svmaffione p->lut[j].vaddr = clust + off; 2304341477Svmaffione#if !defined(linux) && !defined(_WIN32) 2305341477Svmaffione p->lut[j].paddr = vtophys(p->lut[j].vaddr); 2306341477Svmaffione#endif 2307341477Svmaffione ND("%s %d at %p", p->name, j, p->lut[j].vaddr); 2308341477Svmaffione noff = off + p->_objsize; 2309341477Svmaffione if (noff < PAGE_SIZE) { 2310341477Svmaffione off = noff; 2311341477Svmaffione continue; 2312341477Svmaffione } 2313341477Svmaffione ND("too big, recomputing offset..."); 2314341477Svmaffione while (noff >= PAGE_SIZE) { 2315341477Svmaffione char *old_clust = clust; 2316341477Svmaffione noff -= PAGE_SIZE; 2317341477Svmaffione clust = nm_os_extmem_nextpage(nme->os); 2318341477Svmaffione nr_pages--; 2319341477Svmaffione ND("noff %zu page %p nr_pages %d", noff, 2320341477Svmaffione page_to_virt(*pages), nr_pages); 2321341477Svmaffione if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && 2322341477Svmaffione (nr_pages == 0 || 2323341477Svmaffione old_clust + PAGE_SIZE != clust)) 2324341477Svmaffione { 2325341477Svmaffione /* out of space or non contiguous, 2326341477Svmaffione * drop this object 2327341477Svmaffione * */ 2328341477Svmaffione p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); 2329341477Svmaffione ND("non contiguous at off %zu, drop", noff); 2330341477Svmaffione } 2331341477Svmaffione if (nr_pages == 0) 2332341477Svmaffione break; 2333341477Svmaffione } 2334341477Svmaffione off = noff; 2335341477Svmaffione } 2336341477Svmaffione p->objtotal = j; 2337341477Svmaffione p->numclusters = p->objtotal; 2338341477Svmaffione p->memtotal = j * p->_objsize; 2339341477Svmaffione ND("%d memtotal %u", j, p->memtotal); 2340341477Svmaffione } 2341341477Svmaffione 2342341477Svmaffione netmap_mem_ext_register(nme); 2343341477Svmaffione 2344341477Svmaffione return &nme->up; 2345341477Svmaffione 2346341477Svmaffioneout_delete: 2347341477Svmaffione netmap_mem_put(&nme->up); 2348341477Svmaffioneout_unmap: 2349341477Svmaffione if (os) 2350341477Svmaffione nm_os_extmem_delete(os); 2351341477Svmaffioneout: 2352341477Svmaffione if (perror) 2353341477Svmaffione *perror = error; 2354341477Svmaffione return NULL; 2355341477Svmaffione 2356341477Svmaffione} 2357341477Svmaffione#endif /* WITH_EXTMEM */ 2358341477Svmaffione 2359341477Svmaffione 2360342033Svmaffione#ifdef WITH_PTNETMAP 2361341477Svmaffionestruct mem_pt_if { 2362341477Svmaffione struct mem_pt_if *next; 2363341477Svmaffione struct ifnet *ifp; 2364341477Svmaffione unsigned int nifp_offset; 2365341477Svmaffione}; 2366341477Svmaffione 2367341477Svmaffione/* Netmap allocator for ptnetmap guests. */ 2368341477Svmaffionestruct netmap_mem_ptg { 2369341477Svmaffione struct netmap_mem_d up; 2370341477Svmaffione 2371341477Svmaffione vm_paddr_t nm_paddr; /* physical address in the guest */ 2372341477Svmaffione void *nm_addr; /* virtual address in the guest */ 2373341477Svmaffione struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ 2374341477Svmaffione nm_memid_t host_mem_id; /* allocator identifier in the host */ 2375341477Svmaffione struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ 2376341477Svmaffione struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ 2377341477Svmaffione}; 2378341477Svmaffione 2379341477Svmaffione/* Link a passthrough interface to a passthrough netmap allocator. */ 2380341477Svmaffionestatic int 2381341477Svmaffionenetmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, 2382341477Svmaffione unsigned int nifp_offset) 2383341477Svmaffione{ 2384341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2385341477Svmaffione struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); 2386341477Svmaffione 2387341477Svmaffione if (!ptif) { 2388341477Svmaffione return ENOMEM; 2389341477Svmaffione } 2390341477Svmaffione 2391341477Svmaffione NMA_LOCK(nmd); 2392341477Svmaffione 2393341477Svmaffione ptif->ifp = ifp; 2394341477Svmaffione ptif->nifp_offset = nifp_offset; 2395341477Svmaffione 2396341477Svmaffione if (ptnmd->pt_ifs) { 2397341477Svmaffione ptif->next = ptnmd->pt_ifs; 2398341477Svmaffione } 2399341477Svmaffione ptnmd->pt_ifs = ptif; 2400341477Svmaffione 2401341477Svmaffione NMA_UNLOCK(nmd); 2402341477Svmaffione 2403342033Svmaffione nm_prinf("ifp=%s,nifp_offset=%u", 2404342033Svmaffione ptif->ifp->if_xname, ptif->nifp_offset); 2405341477Svmaffione 2406341477Svmaffione return 0; 2407341477Svmaffione} 2408341477Svmaffione 2409341477Svmaffione/* Called with NMA_LOCK(nmd) held. */ 2410341477Svmaffionestatic struct mem_pt_if * 2411341477Svmaffionenetmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) 2412341477Svmaffione{ 2413341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2414341477Svmaffione struct mem_pt_if *curr; 2415341477Svmaffione 2416341477Svmaffione for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2417341477Svmaffione if (curr->ifp == ifp) { 2418341477Svmaffione return curr; 2419341477Svmaffione } 2420341477Svmaffione } 2421341477Svmaffione 2422341477Svmaffione return NULL; 2423341477Svmaffione} 2424341477Svmaffione 2425341477Svmaffione/* Unlink a passthrough interface from a passthrough netmap allocator. */ 2426341477Svmaffioneint 2427341477Svmaffionenetmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) 2428341477Svmaffione{ 2429341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2430341477Svmaffione struct mem_pt_if *prev = NULL; 2431341477Svmaffione struct mem_pt_if *curr; 2432341477Svmaffione int ret = -1; 2433341477Svmaffione 2434341477Svmaffione NMA_LOCK(nmd); 2435341477Svmaffione 2436341477Svmaffione for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2437341477Svmaffione if (curr->ifp == ifp) { 2438341477Svmaffione if (prev) { 2439341477Svmaffione prev->next = curr->next; 2440341477Svmaffione } else { 2441341477Svmaffione ptnmd->pt_ifs = curr->next; 2442341477Svmaffione } 2443341477Svmaffione D("removed (ifp=%p,nifp_offset=%u)", 2444341477Svmaffione curr->ifp, curr->nifp_offset); 2445341477Svmaffione nm_os_free(curr); 2446341477Svmaffione ret = 0; 2447341477Svmaffione break; 2448341477Svmaffione } 2449341477Svmaffione prev = curr; 2450341477Svmaffione } 2451341477Svmaffione 2452341477Svmaffione NMA_UNLOCK(nmd); 2453341477Svmaffione 2454341477Svmaffione return ret; 2455341477Svmaffione} 2456341477Svmaffione 2457341477Svmaffionestatic int 2458341477Svmaffionenetmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 2459341477Svmaffione{ 2460341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2461341477Svmaffione 2462341477Svmaffione if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 2463341477Svmaffione return EINVAL; 2464341477Svmaffione } 2465341477Svmaffione 2466341477Svmaffione *lut = ptnmd->buf_lut; 2467341477Svmaffione return 0; 2468341477Svmaffione} 2469341477Svmaffione 2470341477Svmaffionestatic int 2471341477Svmaffionenetmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, 2472341477Svmaffione u_int *memflags, uint16_t *id) 2473341477Svmaffione{ 2474341477Svmaffione int error = 0; 2475341477Svmaffione 2476341477Svmaffione error = nmd->ops->nmd_config(nmd); 2477341477Svmaffione if (error) 2478341477Svmaffione goto out; 2479341477Svmaffione 2480341477Svmaffione if (size) 2481341477Svmaffione *size = nmd->nm_totalsize; 2482341477Svmaffione if (memflags) 2483341477Svmaffione *memflags = nmd->flags; 2484341477Svmaffione if (id) 2485341477Svmaffione *id = nmd->nm_id; 2486341477Svmaffione 2487341477Svmaffioneout: 2488341477Svmaffione 2489341477Svmaffione return error; 2490341477Svmaffione} 2491341477Svmaffione 2492341477Svmaffionestatic vm_paddr_t 2493341477Svmaffionenetmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 2494341477Svmaffione{ 2495341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2496341477Svmaffione vm_paddr_t paddr; 2497341477Svmaffione /* if the offset is valid, just return csb->base_addr + off */ 2498341477Svmaffione paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); 2499341477Svmaffione ND("off %lx padr %lx", off, (unsigned long)paddr); 2500341477Svmaffione return paddr; 2501341477Svmaffione} 2502341477Svmaffione 2503341477Svmaffionestatic int 2504341477Svmaffionenetmap_mem_pt_guest_config(struct netmap_mem_d *nmd) 2505341477Svmaffione{ 2506341477Svmaffione /* nothing to do, we are configured on creation 2507341477Svmaffione * and configuration never changes thereafter 2508341477Svmaffione */ 2509341477Svmaffione return 0; 2510341477Svmaffione} 2511341477Svmaffione 2512341477Svmaffionestatic int 2513341477Svmaffionenetmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) 2514341477Svmaffione{ 2515341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2516341477Svmaffione uint64_t mem_size; 2517341477Svmaffione uint32_t bufsize; 2518341477Svmaffione uint32_t nbuffers; 2519341477Svmaffione uint32_t poolofs; 2520341477Svmaffione vm_paddr_t paddr; 2521341477Svmaffione char *vaddr; 2522341477Svmaffione int i; 2523341477Svmaffione int error = 0; 2524341477Svmaffione 2525341477Svmaffione if (nmd->flags & NETMAP_MEM_FINALIZED) 2526341477Svmaffione goto out; 2527341477Svmaffione 2528341477Svmaffione if (ptnmd->ptn_dev == NULL) { 2529341477Svmaffione D("ptnetmap memdev not attached"); 2530341477Svmaffione error = ENOMEM; 2531341477Svmaffione goto out; 2532341477Svmaffione } 2533341477Svmaffione /* Map memory through ptnetmap-memdev BAR. */ 2534341477Svmaffione error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, 2535341477Svmaffione &ptnmd->nm_addr, &mem_size); 2536341477Svmaffione if (error) 2537341477Svmaffione goto out; 2538341477Svmaffione 2539341477Svmaffione /* Initialize the lut using the information contained in the 2540341477Svmaffione * ptnetmap memory device. */ 2541341477Svmaffione bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2542341477Svmaffione PTNET_MDEV_IO_BUF_POOL_OBJSZ); 2543341477Svmaffione nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2544341477Svmaffione PTNET_MDEV_IO_BUF_POOL_OBJNUM); 2545341477Svmaffione 2546341477Svmaffione /* allocate the lut */ 2547341477Svmaffione if (ptnmd->buf_lut.lut == NULL) { 2548341477Svmaffione D("allocating lut"); 2549341477Svmaffione ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); 2550341477Svmaffione if (ptnmd->buf_lut.lut == NULL) { 2551341477Svmaffione D("lut allocation failed"); 2552341477Svmaffione return ENOMEM; 2553341477Svmaffione } 2554341477Svmaffione } 2555341477Svmaffione 2556341477Svmaffione /* we have physically contiguous memory mapped through PCI BAR */ 2557341477Svmaffione poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2558341477Svmaffione PTNET_MDEV_IO_BUF_POOL_OFS); 2559341477Svmaffione vaddr = (char *)(ptnmd->nm_addr) + poolofs; 2560341477Svmaffione paddr = ptnmd->nm_paddr + poolofs; 2561341477Svmaffione 2562341477Svmaffione for (i = 0; i < nbuffers; i++) { 2563341477Svmaffione ptnmd->buf_lut.lut[i].vaddr = vaddr; 2564341477Svmaffione vaddr += bufsize; 2565341477Svmaffione paddr += bufsize; 2566341477Svmaffione } 2567341477Svmaffione 2568341477Svmaffione ptnmd->buf_lut.objtotal = nbuffers; 2569341477Svmaffione ptnmd->buf_lut.objsize = bufsize; 2570341477Svmaffione nmd->nm_totalsize = (unsigned int)mem_size; 2571341477Svmaffione 2572341477Svmaffione /* Initialize these fields as are needed by 2573341477Svmaffione * netmap_mem_bufsize(). 2574341477Svmaffione * XXX please improve this, why do we need this 2575341477Svmaffione * replication? maybe we nmd->pools[] should no be 2576341477Svmaffione * there for the guest allocator? */ 2577341477Svmaffione nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; 2578341477Svmaffione nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; 2579341477Svmaffione 2580341477Svmaffione nmd->flags |= NETMAP_MEM_FINALIZED; 2581341477Svmaffioneout: 2582341477Svmaffione return error; 2583341477Svmaffione} 2584341477Svmaffione 2585341477Svmaffionestatic void 2586341477Svmaffionenetmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) 2587341477Svmaffione{ 2588341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2589341477Svmaffione 2590341477Svmaffione if (nmd->active == 1 && 2591341477Svmaffione (nmd->flags & NETMAP_MEM_FINALIZED)) { 2592341477Svmaffione nmd->flags &= ~NETMAP_MEM_FINALIZED; 2593341477Svmaffione /* unmap ptnetmap-memdev memory */ 2594341477Svmaffione if (ptnmd->ptn_dev) { 2595341477Svmaffione nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); 2596341477Svmaffione } 2597341477Svmaffione ptnmd->nm_addr = NULL; 2598341477Svmaffione ptnmd->nm_paddr = 0; 2599341477Svmaffione } 2600341477Svmaffione} 2601341477Svmaffione 2602341477Svmaffionestatic ssize_t 2603341477Svmaffionenetmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) 2604341477Svmaffione{ 2605341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2606341477Svmaffione 2607341477Svmaffione return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); 2608341477Svmaffione} 2609341477Svmaffione 2610341477Svmaffionestatic void 2611341477Svmaffionenetmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) 2612341477Svmaffione{ 2613341477Svmaffione if (nmd == NULL) 2614341477Svmaffione return; 2615341477Svmaffione if (netmap_verbose) 2616341477Svmaffione D("deleting %p", nmd); 2617341477Svmaffione if (nmd->active > 0) 2618341477Svmaffione D("bug: deleting mem allocator with active=%d!", nmd->active); 2619341477Svmaffione if (netmap_verbose) 2620341477Svmaffione D("done deleting %p", nmd); 2621341477Svmaffione NMA_LOCK_DESTROY(nmd); 2622341477Svmaffione nm_os_free(nmd); 2623341477Svmaffione} 2624341477Svmaffione 2625341477Svmaffionestatic struct netmap_if * 2626341477Svmaffionenetmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 2627341477Svmaffione{ 2628341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2629341477Svmaffione struct mem_pt_if *ptif; 2630341477Svmaffione struct netmap_if *nifp = NULL; 2631341477Svmaffione 2632341477Svmaffione ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2633341477Svmaffione if (ptif == NULL) { 2634341477Svmaffione D("Error: interface %p is not in passthrough", na->ifp); 2635341477Svmaffione goto out; 2636341477Svmaffione } 2637341477Svmaffione 2638341477Svmaffione nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + 2639341477Svmaffione ptif->nifp_offset); 2640341477Svmaffioneout: 2641341477Svmaffione return nifp; 2642341477Svmaffione} 2643341477Svmaffione 2644341477Svmaffionestatic void 2645341477Svmaffionenetmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2646341477Svmaffione{ 2647341477Svmaffione struct mem_pt_if *ptif; 2648341477Svmaffione 2649341477Svmaffione ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2650341477Svmaffione if (ptif == NULL) { 2651341477Svmaffione D("Error: interface %p is not in passthrough", na->ifp); 2652341477Svmaffione } 2653341477Svmaffione} 2654341477Svmaffione 2655341477Svmaffionestatic int 2656341477Svmaffionenetmap_mem_pt_guest_rings_create(struct netmap_adapter *na) 2657341477Svmaffione{ 2658341477Svmaffione struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2659341477Svmaffione struct mem_pt_if *ptif; 2660341477Svmaffione struct netmap_if *nifp; 2661341477Svmaffione int i, error = -1; 2662341477Svmaffione 2663341477Svmaffione ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2664341477Svmaffione if (ptif == NULL) { 2665341477Svmaffione D("Error: interface %p is not in passthrough", na->ifp); 2666341477Svmaffione goto out; 2667341477Svmaffione } 2668341477Svmaffione 2669341477Svmaffione 2670341477Svmaffione /* point each kring to the corresponding backend ring */ 2671341477Svmaffione nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); 2672341477Svmaffione for (i = 0; i < netmap_all_rings(na, NR_TX); i++) { 2673341477Svmaffione struct netmap_kring *kring = na->tx_rings[i]; 2674341477Svmaffione if (kring->ring) 2675341477Svmaffione continue; 2676341477Svmaffione kring->ring = (struct netmap_ring *) 2677341477Svmaffione ((char *)nifp + nifp->ring_ofs[i]); 2678341477Svmaffione } 2679341477Svmaffione for (i = 0; i < netmap_all_rings(na, NR_RX); i++) { 2680341477Svmaffione struct netmap_kring *kring = na->rx_rings[i]; 2681341477Svmaffione if (kring->ring) 2682341477Svmaffione continue; 2683341477Svmaffione kring->ring = (struct netmap_ring *) 2684341477Svmaffione ((char *)nifp + 2685342033Svmaffione nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]); 2686341477Svmaffione } 2687341477Svmaffione 2688341477Svmaffione error = 0; 2689341477Svmaffioneout: 2690341477Svmaffione return error; 2691341477Svmaffione} 2692341477Svmaffione 2693341477Svmaffionestatic void 2694341477Svmaffionenetmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) 2695341477Svmaffione{ 2696341477Svmaffione#if 0 2697341477Svmaffione enum txrx t; 2698341477Svmaffione 2699341477Svmaffione for_rx_tx(t) { 2700341477Svmaffione u_int i; 2701341477Svmaffione for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 2702341477Svmaffione struct netmap_kring *kring = &NMR(na, t)[i]; 2703341477Svmaffione 2704341477Svmaffione kring->ring = NULL; 2705341477Svmaffione } 2706341477Svmaffione } 2707341477Svmaffione#endif 2708341477Svmaffione} 2709341477Svmaffione 2710341477Svmaffionestatic struct netmap_mem_ops netmap_mem_pt_guest_ops = { 2711341477Svmaffione .nmd_get_lut = netmap_mem_pt_guest_get_lut, 2712341477Svmaffione .nmd_get_info = netmap_mem_pt_guest_get_info, 2713341477Svmaffione .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, 2714341477Svmaffione .nmd_config = netmap_mem_pt_guest_config, 2715341477Svmaffione .nmd_finalize = netmap_mem_pt_guest_finalize, 2716341477Svmaffione .nmd_deref = netmap_mem_pt_guest_deref, 2717341477Svmaffione .nmd_if_offset = netmap_mem_pt_guest_if_offset, 2718341477Svmaffione .nmd_delete = netmap_mem_pt_guest_delete, 2719341477Svmaffione .nmd_if_new = netmap_mem_pt_guest_if_new, 2720341477Svmaffione .nmd_if_delete = netmap_mem_pt_guest_if_delete, 2721341477Svmaffione .nmd_rings_create = netmap_mem_pt_guest_rings_create, 2722341477Svmaffione .nmd_rings_delete = netmap_mem_pt_guest_rings_delete 2723341477Svmaffione}; 2724341477Svmaffione 2725341477Svmaffione/* Called with nm_mem_list_lock held. */ 2726341477Svmaffionestatic struct netmap_mem_d * 2727341477Svmaffionenetmap_mem_pt_guest_find_memid(nm_memid_t mem_id) 2728341477Svmaffione{ 2729341477Svmaffione struct netmap_mem_d *mem = NULL; 2730341477Svmaffione struct netmap_mem_d *scan = netmap_last_mem_d; 2731341477Svmaffione 2732341477Svmaffione do { 2733341477Svmaffione /* find ptnetmap allocator through host ID */ 2734341477Svmaffione if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && 2735341477Svmaffione ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { 2736341477Svmaffione mem = scan; 2737341477Svmaffione mem->refcount++; 2738341477Svmaffione NM_DBG_REFC(mem, __FUNCTION__, __LINE__); 2739341477Svmaffione break; 2740341477Svmaffione } 2741341477Svmaffione scan = scan->next; 2742341477Svmaffione } while (scan != netmap_last_mem_d); 2743341477Svmaffione 2744341477Svmaffione return mem; 2745341477Svmaffione} 2746341477Svmaffione 2747341477Svmaffione/* Called with nm_mem_list_lock held. */ 2748341477Svmaffionestatic struct netmap_mem_d * 2749341477Svmaffionenetmap_mem_pt_guest_create(nm_memid_t mem_id) 2750341477Svmaffione{ 2751341477Svmaffione struct netmap_mem_ptg *ptnmd; 2752341477Svmaffione int err = 0; 2753341477Svmaffione 2754341477Svmaffione ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); 2755341477Svmaffione if (ptnmd == NULL) { 2756341477Svmaffione err = ENOMEM; 2757341477Svmaffione goto error; 2758341477Svmaffione } 2759341477Svmaffione 2760341477Svmaffione ptnmd->up.ops = &netmap_mem_pt_guest_ops; 2761341477Svmaffione ptnmd->host_mem_id = mem_id; 2762341477Svmaffione ptnmd->pt_ifs = NULL; 2763341477Svmaffione 2764341477Svmaffione /* Assign new id in the guest (We have the lock) */ 2765341477Svmaffione err = nm_mem_assign_id_locked(&ptnmd->up); 2766341477Svmaffione if (err) 2767341477Svmaffione goto error; 2768341477Svmaffione 2769341477Svmaffione ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; 2770341477Svmaffione ptnmd->up.flags |= NETMAP_MEM_IO; 2771341477Svmaffione 2772341477Svmaffione NMA_LOCK_INIT(&ptnmd->up); 2773341477Svmaffione 2774341477Svmaffione snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); 2775341477Svmaffione 2776341477Svmaffione 2777341477Svmaffione return &ptnmd->up; 2778341477Svmaffioneerror: 2779341477Svmaffione netmap_mem_pt_guest_delete(&ptnmd->up); 2780341477Svmaffione return NULL; 2781341477Svmaffione} 2782341477Svmaffione 2783341477Svmaffione/* 2784341477Svmaffione * find host id in guest allocators and create guest allocator 2785341477Svmaffione * if it is not there 2786341477Svmaffione */ 2787341477Svmaffionestatic struct netmap_mem_d * 2788341477Svmaffionenetmap_mem_pt_guest_get(nm_memid_t mem_id) 2789341477Svmaffione{ 2790341477Svmaffione struct netmap_mem_d *nmd; 2791341477Svmaffione 2792341477Svmaffione NM_MTX_LOCK(nm_mem_list_lock); 2793341477Svmaffione nmd = netmap_mem_pt_guest_find_memid(mem_id); 2794341477Svmaffione if (nmd == NULL) { 2795341477Svmaffione nmd = netmap_mem_pt_guest_create(mem_id); 2796341477Svmaffione } 2797341477Svmaffione NM_MTX_UNLOCK(nm_mem_list_lock); 2798341477Svmaffione 2799341477Svmaffione return nmd; 2800341477Svmaffione} 2801341477Svmaffione 2802341477Svmaffione/* 2803341477Svmaffione * The guest allocator can be created by ptnetmap_memdev (during the device 2804341477Svmaffione * attach) or by ptnetmap device (ptnet), during the netmap_attach. 2805341477Svmaffione * 2806341477Svmaffione * The order is not important (we have different order in LINUX and FreeBSD). 2807341477Svmaffione * The first one, creates the device, and the second one simply attaches it. 2808341477Svmaffione */ 2809341477Svmaffione 2810341477Svmaffione/* Called when ptnetmap_memdev is attaching, to attach a new allocator in 2811341477Svmaffione * the guest */ 2812341477Svmaffionestruct netmap_mem_d * 2813341477Svmaffionenetmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) 2814341477Svmaffione{ 2815341477Svmaffione struct netmap_mem_d *nmd; 2816341477Svmaffione struct netmap_mem_ptg *ptnmd; 2817341477Svmaffione 2818341477Svmaffione nmd = netmap_mem_pt_guest_get(mem_id); 2819341477Svmaffione 2820341477Svmaffione /* assign this device to the guest allocator */ 2821341477Svmaffione if (nmd) { 2822341477Svmaffione ptnmd = (struct netmap_mem_ptg *)nmd; 2823341477Svmaffione ptnmd->ptn_dev = ptn_dev; 2824341477Svmaffione } 2825341477Svmaffione 2826341477Svmaffione return nmd; 2827341477Svmaffione} 2828341477Svmaffione 2829341477Svmaffione/* Called when ptnet device is attaching */ 2830341477Svmaffionestruct netmap_mem_d * 2831341477Svmaffionenetmap_mem_pt_guest_new(struct ifnet *ifp, 2832341477Svmaffione unsigned int nifp_offset, 2833341477Svmaffione unsigned int memid) 2834341477Svmaffione{ 2835341477Svmaffione struct netmap_mem_d *nmd; 2836341477Svmaffione 2837341477Svmaffione if (ifp == NULL) { 2838341477Svmaffione return NULL; 2839341477Svmaffione } 2840341477Svmaffione 2841341477Svmaffione nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); 2842341477Svmaffione 2843341477Svmaffione if (nmd) { 2844341477Svmaffione netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); 2845341477Svmaffione } 2846341477Svmaffione 2847341477Svmaffione return nmd; 2848341477Svmaffione} 2849341477Svmaffione 2850342033Svmaffione#endif /* WITH_PTNETMAP */ 2851