netmap_mem2.c revision 344047
131567Ssef/*
231899Ssef * Copyright (C) 2012-2014 Matteo Landi
331899Ssef * Copyright (C) 2012-2016 Luigi Rizzo
431899Ssef * Copyright (C) 2012-2016 Giuseppe Lettieri
531899Ssef * All rights reserved.
631899Ssef *
731899Ssef * Redistribution and use in source and binary forms, with or without
831899Ssef * modification, are permitted provided that the following conditions
931899Ssef * are met:
1031899Ssef *   1. Redistributions of source code must retain the above copyright
1131899Ssef *      notice, this list of conditions and the following disclaimer.
1231899Ssef *   2. Redistributions in binary form must reproduce the above copyright
1331899Ssef *      notice, this list of conditions and the following disclaimer in the
1431899Ssef *      documentation and/or other materials provided with the distribution.
1531899Ssef *
1631899Ssef * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1731899Ssef * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1831899Ssef * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1931899Ssef * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2031899Ssef * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2131899Ssef * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2231899Ssef * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2331899Ssef * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2431899Ssef * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2531899Ssef * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2631899Ssef * SUCH DAMAGE.
2731899Ssef */
2831899Ssef
2931899Ssef#ifdef linux
3031899Ssef#include "bsd_glue.h"
3131899Ssef#endif /* linux */
32119852Scharnier
33119852Scharnier#ifdef __APPLE__
3432275Scharnier#include "osx_glue.h"
3531899Ssef#endif /* __APPLE__ */
3631567Ssef
3731567Ssef#ifdef __FreeBSD__
3831567Ssef#include <sys/cdefs.h> /* prerequisite */
3931567Ssef__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_mem2.c 344047 2019-02-12 09:26:05Z vmaffione $");
4031567Ssef
4185301Sdes#include <sys/types.h>
4285301Sdes#include <sys/malloc.h>
4385301Sdes#include <sys/kernel.h>		/* MALLOC_DEFINE */
44123916Scracauer#include <sys/proc.h>
45104581Smike#include <vm/vm.h>	/* vtophys */
46123916Scracauer#include <vm/pmap.h>	/* vtophys */
4785301Sdes#include <sys/socket.h> /* sockaddrs */
48132306Salfred#include <sys/selinfo.h>
4932275Scharnier#include <sys/sysctl.h>
5032275Scharnier#include <net/if.h>
5132275Scharnier#include <net/if_var.h>
5232275Scharnier#include <net/vnet.h>
5331567Ssef#include <machine/bus.h>	/* bus_dmamap_* */
5431567Ssef
5531567Ssef/* M_NETMAP only used in here */
56101423SmdoddMALLOC_DECLARE(M_NETMAP);
5731579SpeterMALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
5831567Ssef
59101282Smdodd#endif /* __FreeBSD__ */
6087703Smarkm
6131567Ssef#ifdef _WIN32
6231567Ssef#include <win_glue.h>
63101282Smdodd#endif
64101282Smdodd
6531567Ssef#include <net/netmap.h>
6631567Ssef#include <dev/netmap/netmap_kern.h>
6731567Ssef#include <net/netmap_virt.h>
6831567Ssef#include "netmap_mem2.h"
69144177Salfred
7032275Scharnier#ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
7132275Scharnier#define NETMAP_BUF_MAX_NUM  8*4096      /* if too big takes too much time to allocate */
72144177Salfred#else
73144177Salfred#define NETMAP_BUF_MAX_NUM 20*4096*2	/* large machine */
74144177Salfred#endif
75144177Salfred
7631567Ssef#define NETMAP_POOL_MAX_NAMSZ	32
7731567Ssef
7838897Ssef
7938897Ssefenum {
8038897Ssef	NETMAP_IF_POOL   = 0,
8138897Ssef	NETMAP_RING_POOL,
8231567Ssef	NETMAP_BUF_POOL,
83144177Salfred	NETMAP_POOLS_NR
84144177Salfred};
85144177Salfred
8631567Ssef
8739908Ssefstruct netmap_obj_params {
88144177Salfred	u_int size;
8939908Ssef	u_int num;
90130394Sdwmalone
91144177Salfred	u_int last_size;
92130394Sdwmalone	u_int last_num;
9339908Ssef};
94144177Salfred
95144177Salfredstruct netmap_obj_pool {
96144177Salfred	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
97144177Salfred
9839908Ssef	/* ---------------------------------------------------*/
99106716Smarcel	/* these are only meaningful if the pool is finalized */
100144177Salfred	/* (see 'finalized' field in netmap_mem_d)            */
101106716Smarcel	u_int objtotal;         /* actual total number of objects. */
102101320Sjake	u_int memtotal;		/* actual total memory space */
103144177Salfred	u_int numclusters;	/* actual number of clusters */
104101320Sjake
105144177Salfred	u_int objfree;          /* number of free objects. */
10631567Ssef
10731567Ssef	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
10831567Ssef	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
10931567Ssef	uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
11031567Ssef	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
11131567Ssef	int	alloc_done;	/* we have allocated the memory */
11231567Ssef	/* ---------------------------------------------------*/
11331567Ssef
11431567Ssef	/* limits */
115144177Salfred	u_int objminsize;	/* minimum object size */
116144177Salfred	u_int objmaxsize;	/* maximum object size */
117144177Salfred	u_int nummin;		/* minimum number of objects */
118144177Salfred	u_int nummax;		/* maximum number of objects */
119144177Salfred
120144177Salfred	/* these are changed only by config */
12131567Ssef	u_int _objtotal;	/* total number of objects */
122144177Salfred	u_int _objsize;		/* object size */
123144177Salfred	u_int _clustsize;       /* cluster size */
124144177Salfred	u_int _clustentries;    /* objects per cluster */
125144177Salfred	u_int _numclusters;	/* number of clusters */
126144177Salfred
127144177Salfred	/* requested values */
128144177Salfred	u_int r_objtotal;
129144177Salfred	u_int r_objsize;
13031567Ssef};
131144177Salfred
132144177Salfred#define NMA_LOCK_T		NM_MTX_T
133144177Salfred#define NMA_LOCK_INIT(n)	NM_MTX_INIT((n)->nm_mtx)
13431567Ssef#define NMA_LOCK_DESTROY(n)	NM_MTX_DESTROY((n)->nm_mtx)
135144177Salfred#define NMA_LOCK(n)		NM_MTX_LOCK((n)->nm_mtx)
136144177Salfred#define NMA_SPINLOCK(n)         NM_MTX_SPINLOCK((n)->nm_mtx)
137144177Salfred#define NMA_UNLOCK(n)		NM_MTX_UNLOCK((n)->nm_mtx)
138144177Salfred
139144177Salfredstruct netmap_mem_ops {
140144177Salfred	int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
14131567Ssef	int  (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
14231567Ssef			u_int *memflags, uint16_t *id);
143132306Salfred
144132306Salfred	vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
145132306Salfred	int (*nmd_config)(struct netmap_mem_d *);
146132306Salfred	int (*nmd_finalize)(struct netmap_mem_d *);
147132306Salfred	void (*nmd_deref)(struct netmap_mem_d *);
148132306Salfred	ssize_t  (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
149132306Salfred	void (*nmd_delete)(struct netmap_mem_d *);
150132306Salfred
151132306Salfred	struct netmap_if * (*nmd_if_new)(struct netmap_adapter *,
152132306Salfred					 struct netmap_priv_d *);
153132306Salfred	void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *);
154132306Salfred	int  (*nmd_rings_create)(struct netmap_adapter *);
155132306Salfred	void (*nmd_rings_delete)(struct netmap_adapter *);
156132306Salfred};
157132306Salfred
158132306Salfredstruct netmap_mem_d {
159132306Salfred	NMA_LOCK_T nm_mtx;  /* protect the allocator */
16032275Scharnier	u_int nm_totalsize; /* shorthand */
161144177Salfred
162144177Salfred	u_int flags;
163144177Salfred#define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
164144177Salfred#define NETMAP_MEM_HIDDEN	0x8	/* beeing prepared */
165144177Salfred	int lasterr;		/* last error for curr config */
166144177Salfred	int active;		/* active users */
167144177Salfred	int refcount;
168144177Salfred	/* the three allocators */
169144177Salfred	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
170144177Salfred
171144177Salfred	nm_memid_t nm_id;	/* allocator identifier */
172144177Salfred	int nm_grp;	/* iommu groupd id */
17331567Ssef
174144177Salfred	/* list of all existing allocators, sorted by nm_id */
175144177Salfred	struct netmap_mem_d *prev, *next;
176144177Salfred
177144177Salfred	struct netmap_mem_ops *ops;
178144177Salfred
179144177Salfred	struct netmap_obj_params params[NETMAP_POOLS_NR];
180101282Smdodd
181144177Salfred#define NM_MEM_NAMESZ	16
182144177Salfred	char name[NM_MEM_NAMESZ];
183144177Salfred};
184144177Salfred
185144177Salfredint
186144177Salfrednetmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
187144177Salfred{
188144177Salfred	int rv;
189144177Salfred
190144177Salfred	NMA_LOCK(nmd);
191144177Salfred	rv = nmd->ops->nmd_get_lut(nmd, lut);
192144177Salfred	NMA_UNLOCK(nmd);
193144177Salfred
194144177Salfred	return rv;
195144177Salfred}
196144177Salfred
197144177Salfredint
198144177Salfrednetmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
199144177Salfred		u_int *memflags, nm_memid_t *memid)
200144177Salfred{
201144177Salfred	int rv;
202144177Salfred
203144177Salfred	NMA_LOCK(nmd);
204144177Salfred	rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
205144177Salfred	NMA_UNLOCK(nmd);
206144177Salfred
207144177Salfred	return rv;
208144177Salfred}
209144177Salfred
210144177Salfredvm_paddr_t
21131567Ssefnetmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
212144177Salfred{
213144177Salfred	vm_paddr_t pa;
214144177Salfred
215144177Salfred#if defined(__FreeBSD__)
21631567Ssef	/* This function is called by netmap_dev_pager_fault(), which holds a
217144177Salfred	 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
218144177Salfred	 * spin on the trylock. */
219144177Salfred	NMA_SPINLOCK(nmd);
220144177Salfred#else
22132275Scharnier	NMA_LOCK(nmd);
222144177Salfred#endif
223144177Salfred	pa = nmd->ops->nmd_ofstophys(nmd, off);
224144177Salfred	NMA_UNLOCK(nmd);
225144177Salfred
226144177Salfred	return pa;
227144177Salfred}
22831567Ssef
229144177Salfredstatic int
230144177Salfrednetmap_mem_config(struct netmap_mem_d *nmd)
231144177Salfred{
232144177Salfred	if (nmd->active) {
233144177Salfred		/* already in use. Not fatal, but we
234144177Salfred		 * cannot change the configuration
235144177Salfred		 */
236144177Salfred		return 0;
237144177Salfred	}
238144177Salfred
239144177Salfred	return nmd->ops->nmd_config(nmd);
24031567Ssef}
24131567Ssef
242144177Salfredssize_t
243144177Salfrednetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
244144177Salfred{
245144177Salfred	ssize_t rv;
24631567Ssef
247101283Smdodd	NMA_LOCK(nmd);
248144177Salfred	rv = nmd->ops->nmd_if_offset(nmd, off);
249144177Salfred	NMA_UNLOCK(nmd);
250144177Salfred
251144177Salfred	return rv;
252144177Salfred}
253144177Salfred
25455707Ssefstatic void
255144177Salfrednetmap_mem_delete(struct netmap_mem_d *nmd)
25631567Ssef{
257144177Salfred	nmd->ops->nmd_delete(nmd);
258144177Salfred}
259144177Salfred
260144177Salfredstruct netmap_if *
261144177Salfrednetmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
262144177Salfred{
26331567Ssef	struct netmap_if *nifp;
264144177Salfred	struct netmap_mem_d *nmd = na->nm_mem;
265101285Smdodd
266144177Salfred	NMA_LOCK(nmd);
267144177Salfred	nifp = nmd->ops->nmd_if_new(na, priv);
26831567Ssef	NMA_UNLOCK(nmd);
269144177Salfred
270144177Salfred	return nifp;
271144177Salfred}
272144177Salfred
273144177Salfredvoid
274144177Salfrednetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
275144177Salfred{
276144177Salfred	struct netmap_mem_d *nmd = na->nm_mem;
277144177Salfred
278144177Salfred	NMA_LOCK(nmd);
279144177Salfred	nmd->ops->nmd_if_delete(na, nif);
280144177Salfred	NMA_UNLOCK(nmd);
281144177Salfred}
282144177Salfred
283144177Salfredint
284144177Salfrednetmap_mem_rings_create(struct netmap_adapter *na)
285144177Salfred{
286144177Salfred	int rv;
287144177Salfred	struct netmap_mem_d *nmd = na->nm_mem;
288144177Salfred
289144177Salfred	NMA_LOCK(nmd);
290144177Salfred	rv = nmd->ops->nmd_rings_create(na);
291144177Salfred	NMA_UNLOCK(nmd);
292144177Salfred
29331567Ssef	return rv;
294144177Salfred}
295144177Salfred
296144177Salfredvoid
297101283Smdoddnetmap_mem_rings_delete(struct netmap_adapter *na)
298144177Salfred{
299144177Salfred	struct netmap_mem_d *nmd = na->nm_mem;
300144177Salfred
301144177Salfred	NMA_LOCK(nmd);
302101283Smdodd	nmd->ops->nmd_rings_delete(na);
303144177Salfred	NMA_UNLOCK(nmd);
304144177Salfred}
305144177Salfred
306144177Salfredstatic int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
307144177Salfredstatic int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
308144177Salfredstatic int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
309144177Salfredstatic void nm_mem_release_id(struct netmap_mem_d *);
310144177Salfred
311144177Salfrednm_memid_t
312144177Salfrednetmap_mem_get_id(struct netmap_mem_d *nmd)
313144177Salfred{
314144177Salfred	return nmd->nm_id;
315144177Salfred}
316144177Salfred
317144177Salfred#ifdef NM_DEBUG_MEM_PUTGET
318144177Salfred#define NM_DBG_REFC(nmd, func, line)	\
319144177Salfred	nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount);
320144177Salfred#else
321144177Salfred#define NM_DBG_REFC(nmd, func, line)
322144177Salfred#endif
323144177Salfred
324144177Salfred/* circular list of all existing allocators */
325144177Salfredstatic struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
326144177SalfredNM_MTX_T nm_mem_list_lock;
327144177Salfred
328144177Salfredstruct netmap_mem_d *
329144177Salfred__netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
330144177Salfred{
331144177Salfred	NM_MTX_LOCK(nm_mem_list_lock);
332144177Salfred	nmd->refcount++;
333144177Salfred	NM_DBG_REFC(nmd, func, line);
334144177Salfred	NM_MTX_UNLOCK(nm_mem_list_lock);
335144177Salfred	return nmd;
336144177Salfred}
337144177Salfred
338144177Salfredvoid
339144177Salfred__netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
340144177Salfred{
341144177Salfred	int last;
342144177Salfred	NM_MTX_LOCK(nm_mem_list_lock);
343144177Salfred	last = (--nmd->refcount == 0);
344144177Salfred	if (last)
345144177Salfred		nm_mem_release_id(nmd);
346144177Salfred	NM_DBG_REFC(nmd, func, line);
347144177Salfred	NM_MTX_UNLOCK(nm_mem_list_lock);
348101283Smdodd	if (last)
349144177Salfred		netmap_mem_delete(nmd);
350144177Salfred}
351144177Salfred
352144177Salfredint
353144177Salfrednetmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
354101283Smdodd{
355144177Salfred	int lasterr = 0;
35631567Ssef	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
357		return ENOMEM;
358	}
359
360	NMA_LOCK(nmd);
361
362	if (netmap_mem_config(nmd))
363		goto out;
364
365	nmd->active++;
366
367	nmd->lasterr = nmd->ops->nmd_finalize(nmd);
368
369	if (!nmd->lasterr && na->pdev) {
370		nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
371	}
372
373out:
374	lasterr = nmd->lasterr;
375	NMA_UNLOCK(nmd);
376
377	if (lasterr)
378		netmap_mem_deref(nmd, na);
379
380	return lasterr;
381}
382
383static int
384nm_isset(uint32_t *bitmap, u_int i)
385{
386	return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
387}
388
389
390static int
391netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
392{
393	u_int n, j;
394
395	if (p->bitmap == NULL) {
396		/* Allocate the bitmap */
397		n = (p->objtotal + 31) / 32;
398		p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
399		if (p->bitmap == NULL) {
400			nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
401			    p->name);
402			return ENOMEM;
403		}
404		p->bitmap_slots = n;
405	} else {
406		memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
407	}
408
409	p->objfree = 0;
410	/*
411	 * Set all the bits in the bitmap that have
412	 * corresponding buffers to 1 to indicate they are
413	 * free.
414	 */
415	for (j = 0; j < p->objtotal; j++) {
416		if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
417			if (netmap_debug & NM_DEBUG_MEM)
418				nm_prinf("skipping %s %d", p->name, j);
419			continue;
420		}
421		p->bitmap[ (j>>5) ] |=  ( 1U << (j & 31U) );
422		p->objfree++;
423	}
424
425	if (netmap_verbose)
426		nm_prinf("%s free %u", p->name, p->objfree);
427	if (p->objfree == 0) {
428		if (netmap_verbose)
429			nm_prerr("%s: no objects available", p->name);
430		return ENOMEM;
431	}
432
433	return 0;
434}
435
436static int
437netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
438{
439	int i, error = 0;
440
441	for (i = 0; i < NETMAP_POOLS_NR; i++) {
442		struct netmap_obj_pool *p = &nmd->pools[i];
443
444		error = netmap_init_obj_allocator_bitmap(p);
445		if (error)
446			return error;
447	}
448
449	/*
450	 * buffers 0 and 1 are reserved
451	 */
452	if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
453		nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
454		return ENOMEM;
455	}
456
457	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
458	if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
459		/* XXX This check is a workaround that prevents a
460		 * NULL pointer crash which currently happens only
461		 * with ptnetmap guests.
462		 * Removed shared-info --> is the bug still there? */
463		nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
464	}
465	return 0;
466}
467
468int
469netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
470{
471	int last_user = 0;
472	NMA_LOCK(nmd);
473	if (na->active_fds <= 0)
474		netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
475	if (nmd->active == 1) {
476		last_user = 1;
477		/*
478		 * Reset the allocator when it falls out of use so that any
479		 * pool resources leaked by unclean application exits are
480		 * reclaimed.
481		 */
482		netmap_mem_init_bitmaps(nmd);
483	}
484	nmd->ops->nmd_deref(nmd);
485
486	nmd->active--;
487	if (last_user) {
488		nmd->nm_grp = -1;
489		nmd->lasterr = 0;
490	}
491
492	NMA_UNLOCK(nmd);
493	return last_user;
494}
495
496
497/* accessor functions */
498static int
499netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
500{
501	lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
502#ifdef __FreeBSD__
503	lut->plut = lut->lut;
504#endif
505	lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
506	lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
507
508	return 0;
509}
510
511static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
512	[NETMAP_IF_POOL] = {
513		.size = 1024,
514		.num  = 2,
515	},
516	[NETMAP_RING_POOL] = {
517		.size = 5*PAGE_SIZE,
518		.num  = 4,
519	},
520	[NETMAP_BUF_POOL] = {
521		.size = 2048,
522		.num  = 4098,
523	},
524};
525
526
527/*
528 * nm_mem is the memory allocator used for all physical interfaces
529 * running in netmap mode.
530 * Virtual (VALE) ports will have each its own allocator.
531 */
532extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
533struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
534	.pools = {
535		[NETMAP_IF_POOL] = {
536			.name 	= "netmap_if",
537			.objminsize = sizeof(struct netmap_if),
538			.objmaxsize = 4096,
539			.nummin     = 10,	/* don't be stingy */
540			.nummax	    = 10000,	/* XXX very large */
541		},
542		[NETMAP_RING_POOL] = {
543			.name 	= "netmap_ring",
544			.objminsize = sizeof(struct netmap_ring),
545			.objmaxsize = 32*PAGE_SIZE,
546			.nummin     = 2,
547			.nummax	    = 1024,
548		},
549		[NETMAP_BUF_POOL] = {
550			.name	= "netmap_buf",
551			.objminsize = 64,
552			.objmaxsize = 65536,
553			.nummin     = 4,
554			.nummax	    = 1000000, /* one million! */
555		},
556	},
557
558	.params = {
559		[NETMAP_IF_POOL] = {
560			.size = 1024,
561			.num  = 100,
562		},
563		[NETMAP_RING_POOL] = {
564			.size = 9*PAGE_SIZE,
565			.num  = 200,
566		},
567		[NETMAP_BUF_POOL] = {
568			.size = 2048,
569			.num  = NETMAP_BUF_MAX_NUM,
570		},
571	},
572
573	.nm_id = 1,
574	.nm_grp = -1,
575
576	.prev = &nm_mem,
577	.next = &nm_mem,
578
579	.ops = &netmap_mem_global_ops,
580
581	.name = "1"
582};
583
584
585/* blueprint for the private memory allocators */
586/* XXX clang is not happy about using name as a print format */
587static const struct netmap_mem_d nm_blueprint = {
588	.pools = {
589		[NETMAP_IF_POOL] = {
590			.name 	= "%s_if",
591			.objminsize = sizeof(struct netmap_if),
592			.objmaxsize = 4096,
593			.nummin     = 1,
594			.nummax	    = 100,
595		},
596		[NETMAP_RING_POOL] = {
597			.name 	= "%s_ring",
598			.objminsize = sizeof(struct netmap_ring),
599			.objmaxsize = 32*PAGE_SIZE,
600			.nummin     = 2,
601			.nummax	    = 1024,
602		},
603		[NETMAP_BUF_POOL] = {
604			.name	= "%s_buf",
605			.objminsize = 64,
606			.objmaxsize = 65536,
607			.nummin     = 4,
608			.nummax	    = 1000000, /* one million! */
609		},
610	},
611
612	.nm_grp = -1,
613
614	.flags = NETMAP_MEM_PRIVATE,
615
616	.ops = &netmap_mem_global_ops,
617};
618
619/* memory allocator related sysctls */
620
621#define STRINGIFY(x) #x
622
623
624#define DECLARE_SYSCTLS(id, name) \
625	SYSBEGIN(mem2_ ## name); \
626	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
627	    CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
628	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
629	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
630	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
631	    CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
632	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
633	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
634	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
635	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
636	    "Default size of private netmap " STRINGIFY(name) "s"); \
637	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
638	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
639	    "Default number of private netmap " STRINGIFY(name) "s");	\
640	SYSEND
641
642SYSCTL_DECL(_dev_netmap);
643DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
644DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
645DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
646
647/* call with nm_mem_list_lock held */
648static int
649nm_mem_assign_id_locked(struct netmap_mem_d *nmd)
650{
651	nm_memid_t id;
652	struct netmap_mem_d *scan = netmap_last_mem_d;
653	int error = ENOMEM;
654
655	do {
656		/* we rely on unsigned wrap around */
657		id = scan->nm_id + 1;
658		if (id == 0) /* reserve 0 as error value */
659			id = 1;
660		scan = scan->next;
661		if (id != scan->nm_id) {
662			nmd->nm_id = id;
663			nmd->prev = scan->prev;
664			nmd->next = scan;
665			scan->prev->next = nmd;
666			scan->prev = nmd;
667			netmap_last_mem_d = nmd;
668			nmd->refcount = 1;
669			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
670			error = 0;
671			break;
672		}
673	} while (scan != netmap_last_mem_d);
674
675	return error;
676}
677
678/* call with nm_mem_list_lock *not* held */
679static int
680nm_mem_assign_id(struct netmap_mem_d *nmd)
681{
682	int ret;
683
684	NM_MTX_LOCK(nm_mem_list_lock);
685	ret = nm_mem_assign_id_locked(nmd);
686	NM_MTX_UNLOCK(nm_mem_list_lock);
687
688	return ret;
689}
690
691/* call with nm_mem_list_lock held */
692static void
693nm_mem_release_id(struct netmap_mem_d *nmd)
694{
695	nmd->prev->next = nmd->next;
696	nmd->next->prev = nmd->prev;
697
698	if (netmap_last_mem_d == nmd)
699		netmap_last_mem_d = nmd->prev;
700
701	nmd->prev = nmd->next = NULL;
702}
703
704struct netmap_mem_d *
705netmap_mem_find(nm_memid_t id)
706{
707	struct netmap_mem_d *nmd;
708
709	NM_MTX_LOCK(nm_mem_list_lock);
710	nmd = netmap_last_mem_d;
711	do {
712		if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
713			nmd->refcount++;
714			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
715			NM_MTX_UNLOCK(nm_mem_list_lock);
716			return nmd;
717		}
718		nmd = nmd->next;
719	} while (nmd != netmap_last_mem_d);
720	NM_MTX_UNLOCK(nm_mem_list_lock);
721	return NULL;
722}
723
724static int
725nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
726{
727	int err = 0, id;
728	id = nm_iommu_group_id(dev);
729	if (netmap_debug & NM_DEBUG_MEM)
730		nm_prinf("iommu_group %d", id);
731
732	NMA_LOCK(nmd);
733
734	if (nmd->nm_grp < 0)
735		nmd->nm_grp = id;
736
737	if (nmd->nm_grp != id) {
738		if (netmap_verbose)
739			nm_prerr("iommu group mismatch: %u vs %u",
740					nmd->nm_grp, id);
741		nmd->lasterr = err = ENOMEM;
742	}
743
744	NMA_UNLOCK(nmd);
745	return err;
746}
747
748static struct lut_entry *
749nm_alloc_lut(u_int nobj)
750{
751	size_t n = sizeof(struct lut_entry) * nobj;
752	struct lut_entry *lut;
753#ifdef linux
754	lut = vmalloc(n);
755#else
756	lut = nm_os_malloc(n);
757#endif
758	return lut;
759}
760
761static void
762nm_free_lut(struct lut_entry *lut, u_int objtotal)
763{
764	bzero(lut, sizeof(struct lut_entry) * objtotal);
765#ifdef linux
766	vfree(lut);
767#else
768	nm_os_free(lut);
769#endif
770}
771
772#if defined(linux) || defined(_WIN32)
773static struct plut_entry *
774nm_alloc_plut(u_int nobj)
775{
776	size_t n = sizeof(struct plut_entry) * nobj;
777	struct plut_entry *lut;
778	lut = vmalloc(n);
779	return lut;
780}
781
782static void
783nm_free_plut(struct plut_entry * lut)
784{
785	vfree(lut);
786}
787#endif /* linux or _WIN32 */
788
789
790/*
791 * First, find the allocator that contains the requested offset,
792 * then locate the cluster through a lookup table.
793 */
794static vm_paddr_t
795netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
796{
797	int i;
798	vm_ooffset_t o = offset;
799	vm_paddr_t pa;
800	struct netmap_obj_pool *p;
801
802	p = nmd->pools;
803
804	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
805		if (offset >= p[i].memtotal)
806			continue;
807		// now lookup the cluster's address
808#ifndef _WIN32
809		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
810			offset % p[i]._objsize;
811#else
812		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
813		pa.QuadPart += offset % p[i]._objsize;
814#endif
815		return pa;
816	}
817	/* this is only in case of errors */
818	nm_prerr("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
819		p[NETMAP_IF_POOL].memtotal,
820		p[NETMAP_IF_POOL].memtotal
821			+ p[NETMAP_RING_POOL].memtotal,
822		p[NETMAP_IF_POOL].memtotal
823			+ p[NETMAP_RING_POOL].memtotal
824			+ p[NETMAP_BUF_POOL].memtotal);
825#ifndef _WIN32
826	return 0; /* bad address */
827#else
828	vm_paddr_t res;
829	res.QuadPart = 0;
830	return res;
831#endif
832}
833
834#ifdef _WIN32
835
836/*
837 * win32_build_virtual_memory_for_userspace
838 *
839 * This function get all the object making part of the pools and maps
840 * a contiguous virtual memory space for the userspace
841 * It works this way
842 * 1 - allocate a Memory Descriptor List wide as the sum
843 *		of the memory needed for the pools
844 * 2 - cycle all the objects in every pool and for every object do
845 *
846 *		2a - cycle all the objects in every pool, get the list
847 *				of the physical address descriptors
848 *		2b - calculate the offset in the array of pages desciptor in the
849 *				main MDL
850 *		2c - copy the descriptors of the object in the main MDL
851 *
852 * 3 - return the resulting MDL that needs to be mapped in userland
853 *
854 * In this way we will have an MDL that describes all the memory for the
855 * objects in a single object
856*/
857
858PMDL
859win32_build_user_vm_map(struct netmap_mem_d* nmd)
860{
861	u_int memflags, ofs = 0;
862	PMDL mainMdl, tempMdl;
863	uint64_t memsize;
864	int i, j;
865
866	if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
867		nm_prerr("memory not finalised yet");
868		return NULL;
869	}
870
871	mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
872	if (mainMdl == NULL) {
873		nm_prerr("failed to allocate mdl");
874		return NULL;
875	}
876
877	NMA_LOCK(nmd);
878	for (i = 0; i < NETMAP_POOLS_NR; i++) {
879		struct netmap_obj_pool *p = &nmd->pools[i];
880		int clsz = p->_clustsize;
881		int clobjs = p->_clustentries; /* objects per cluster */
882		int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
883		PPFN_NUMBER pSrc, pDst;
884
885		/* each pool has a different cluster size so we need to reallocate */
886		tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
887		if (tempMdl == NULL) {
888			NMA_UNLOCK(nmd);
889			nm_prerr("fail to allocate tempMdl");
890			IoFreeMdl(mainMdl);
891			return NULL;
892		}
893		pSrc = MmGetMdlPfnArray(tempMdl);
894		/* create one entry per cluster, the lut[] has one entry per object */
895		for (j = 0; j < p->numclusters; j++, ofs += clsz) {
896			pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
897			MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
898			MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
899			RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
900			mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
901		}
902		IoFreeMdl(tempMdl);
903	}
904	NMA_UNLOCK(nmd);
905	return mainMdl;
906}
907
908#endif /* _WIN32 */
909
910/*
911 * helper function for OS-specific mmap routines (currently only windows).
912 * Given an nmd and a pool index, returns the cluster size and number of clusters.
913 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
914 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
915 */
916
917int
918netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
919{
920	if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
921		return 1; /* invalid arguments */
922	// NMA_LOCK_ASSERT(nmd);
923	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
924		*clustsize = *numclusters = 0;
925		return 1; /* not ready yet */
926	}
927	*clustsize = nmd->pools[pool]._clustsize;
928	*numclusters = nmd->pools[pool].numclusters;
929	return 0; /* success */
930}
931
932static int
933netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
934			u_int *memflags, nm_memid_t *id)
935{
936	int error = 0;
937	error = netmap_mem_config(nmd);
938	if (error)
939		goto out;
940	if (size) {
941		if (nmd->flags & NETMAP_MEM_FINALIZED) {
942			*size = nmd->nm_totalsize;
943		} else {
944			int i;
945			*size = 0;
946			for (i = 0; i < NETMAP_POOLS_NR; i++) {
947				struct netmap_obj_pool *p = nmd->pools + i;
948				*size += (p->_numclusters * p->_clustsize);
949			}
950		}
951	}
952	if (memflags)
953		*memflags = nmd->flags;
954	if (id)
955		*id = nmd->nm_id;
956out:
957	return error;
958}
959
960/*
961 * we store objects by kernel address, need to find the offset
962 * within the pool to export the value to userspace.
963 * Algorithm: scan until we find the cluster, then add the
964 * actual offset in the cluster
965 */
966static ssize_t
967netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
968{
969	int i, k = p->_clustentries, n = p->objtotal;
970	ssize_t ofs = 0;
971
972	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
973		const char *base = p->lut[i].vaddr;
974		ssize_t relofs = (const char *) vaddr - base;
975
976		if (relofs < 0 || relofs >= p->_clustsize)
977			continue;
978
979		ofs = ofs + relofs;
980		nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
981		    p->name, ofs, i, vaddr);
982		return ofs;
983	}
984	nm_prerr("address %p is not contained inside any cluster (%s)",
985	    vaddr, p->name);
986	return 0; /* An error occurred */
987}
988
989/* Helper functions which convert virtual addresses to offsets */
990#define netmap_if_offset(n, v)					\
991	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
992
993#define netmap_ring_offset(n, v)				\
994    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
995	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
996
997static ssize_t
998netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
999{
1000	return netmap_if_offset(nmd, addr);
1001}
1002
1003/*
1004 * report the index, and use start position as a hint,
1005 * otherwise buffer allocation becomes terribly expensive.
1006 */
1007static void *
1008netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1009{
1010	uint32_t i = 0;			/* index in the bitmap */
1011	uint32_t mask, j = 0;		/* slot counter */
1012	void *vaddr = NULL;
1013
1014	if (len > p->_objsize) {
1015		nm_prerr("%s request size %d too large", p->name, len);
1016		return NULL;
1017	}
1018
1019	if (p->objfree == 0) {
1020		nm_prerr("no more %s objects", p->name);
1021		return NULL;
1022	}
1023	if (start)
1024		i = *start;
1025
1026	/* termination is guaranteed by p->free, but better check bounds on i */
1027	while (vaddr == NULL && i < p->bitmap_slots)  {
1028		uint32_t cur = p->bitmap[i];
1029		if (cur == 0) { /* bitmask is fully used */
1030			i++;
1031			continue;
1032		}
1033		/* locate a slot */
1034		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1035			;
1036
1037		p->bitmap[i] &= ~mask; /* mark object as in use */
1038		p->objfree--;
1039
1040		vaddr = p->lut[i * 32 + j].vaddr;
1041		if (index)
1042			*index = i * 32 + j;
1043	}
1044	nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1045
1046	if (start)
1047		*start = i;
1048	return vaddr;
1049}
1050
1051
1052/*
1053 * free by index, not by address.
1054 * XXX should we also cleanup the content ?
1055 */
1056static int
1057netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1058{
1059	uint32_t *ptr, mask;
1060
1061	if (j >= p->objtotal) {
1062		nm_prerr("invalid index %u, max %u", j, p->objtotal);
1063		return 1;
1064	}
1065	ptr = &p->bitmap[j / 32];
1066	mask = (1 << (j % 32));
1067	if (*ptr & mask) {
1068		nm_prerr("ouch, double free on buffer %d", j);
1069		return 1;
1070	} else {
1071		*ptr |= mask;
1072		p->objfree++;
1073		return 0;
1074	}
1075}
1076
1077/*
1078 * free by address. This is slow but is only used for a few
1079 * objects (rings, nifp)
1080 */
1081static void
1082netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1083{
1084	u_int i, j, n = p->numclusters;
1085
1086	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1087		void *base = p->lut[i * p->_clustentries].vaddr;
1088		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1089
1090		/* Given address, is out of the scope of the current cluster.*/
1091		if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1092			continue;
1093
1094		j = j + relofs / p->_objsize;
1095		/* KASSERT(j != 0, ("Cannot free object 0")); */
1096		netmap_obj_free(p, j);
1097		return;
1098	}
1099	nm_prerr("address %p is not contained inside any cluster (%s)",
1100	    vaddr, p->name);
1101}
1102
1103unsigned
1104netmap_mem_bufsize(struct netmap_mem_d *nmd)
1105{
1106	return nmd->pools[NETMAP_BUF_POOL]._objsize;
1107}
1108
1109#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1110#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1111#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1112#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1113#define netmap_buf_malloc(n, _pos, _index)			\
1114	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1115
1116
1117#if 0 /* currently unused */
1118/* Return the index associated to the given packet buffer */
1119#define netmap_buf_index(n, v)						\
1120    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1121#endif
1122
1123/*
1124 * allocate extra buffers in a linked list.
1125 * returns the actual number.
1126 */
1127uint32_t
1128netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1129{
1130	struct netmap_mem_d *nmd = na->nm_mem;
1131	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1132
1133	NMA_LOCK(nmd);
1134
1135	*head = 0;	/* default, 'null' index ie empty list */
1136	for (i = 0 ; i < n; i++) {
1137		uint32_t cur = *head;	/* save current head */
1138		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1139		if (p == NULL) {
1140			nm_prerr("no more buffers after %d of %d", i, n);
1141			*head = cur; /* restore */
1142			break;
1143		}
1144		nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1145		*p = cur; /* link to previous head */
1146	}
1147
1148	NMA_UNLOCK(nmd);
1149
1150	return i;
1151}
1152
1153static void
1154netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1155{
1156	struct lut_entry *lut = na->na_lut.lut;
1157	struct netmap_mem_d *nmd = na->nm_mem;
1158	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1159	uint32_t i, cur, *buf;
1160
1161	nm_prdis("freeing the extra list");
1162	for (i = 0; head >=2 && head < p->objtotal; i++) {
1163		cur = head;
1164		buf = lut[head].vaddr;
1165		head = *buf;
1166		*buf = 0;
1167		if (netmap_obj_free(p, cur))
1168			break;
1169	}
1170	if (head != 0)
1171		nm_prerr("breaking with head %d", head);
1172	if (netmap_debug & NM_DEBUG_MEM)
1173		nm_prinf("freed %d buffers", i);
1174}
1175
1176
1177/* Return nonzero on error */
1178static int
1179netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1180{
1181	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1182	u_int i = 0;	/* slot counter */
1183	uint32_t pos = 0;	/* slot in p->bitmap */
1184	uint32_t index = 0;	/* buffer index */
1185
1186	for (i = 0; i < n; i++) {
1187		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1188		if (vaddr == NULL) {
1189			nm_prerr("no more buffers after %d of %d", i, n);
1190			goto cleanup;
1191		}
1192		slot[i].buf_idx = index;
1193		slot[i].len = p->_objsize;
1194		slot[i].flags = 0;
1195		slot[i].ptr = 0;
1196	}
1197
1198	nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1199	return (0);
1200
1201cleanup:
1202	while (i > 0) {
1203		i--;
1204		netmap_obj_free(p, slot[i].buf_idx);
1205	}
1206	bzero(slot, n * sizeof(slot[0]));
1207	return (ENOMEM);
1208}
1209
1210static void
1211netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1212{
1213	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1214	u_int i;
1215
1216	for (i = 0; i < n; i++) {
1217		slot[i].buf_idx = index;
1218		slot[i].len = p->_objsize;
1219		slot[i].flags = 0;
1220	}
1221}
1222
1223
1224static void
1225netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1226{
1227	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1228
1229	if (i < 2 || i >= p->objtotal) {
1230		nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1231		return;
1232	}
1233	netmap_obj_free(p, i);
1234}
1235
1236
1237static void
1238netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1239{
1240	u_int i;
1241
1242	for (i = 0; i < n; i++) {
1243		if (slot[i].buf_idx > 1)
1244			netmap_free_buf(nmd, slot[i].buf_idx);
1245	}
1246	nm_prdis("%s: released some buffers, available: %u",
1247			p->name, p->objfree);
1248}
1249
1250static void
1251netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1252{
1253
1254	if (p == NULL)
1255		return;
1256	if (p->bitmap)
1257		nm_os_free(p->bitmap);
1258	p->bitmap = NULL;
1259	if (p->invalid_bitmap)
1260		nm_os_free(p->invalid_bitmap);
1261	p->invalid_bitmap = NULL;
1262	if (!p->alloc_done) {
1263		/* allocation was done by somebody else.
1264		 * Let them clean up after themselves.
1265		 */
1266		return;
1267	}
1268	if (p->lut) {
1269		u_int i;
1270
1271		/*
1272		 * Free each cluster allocated in
1273		 * netmap_finalize_obj_allocator().  The cluster start
1274		 * addresses are stored at multiples of p->_clusterentries
1275		 * in the lut.
1276		 */
1277		for (i = 0; i < p->objtotal; i += p->_clustentries) {
1278			contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1279		}
1280		nm_free_lut(p->lut, p->objtotal);
1281	}
1282	p->lut = NULL;
1283	p->objtotal = 0;
1284	p->memtotal = 0;
1285	p->numclusters = 0;
1286	p->objfree = 0;
1287	p->alloc_done = 0;
1288}
1289
1290/*
1291 * Free all resources related to an allocator.
1292 */
1293static void
1294netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1295{
1296	if (p == NULL)
1297		return;
1298	netmap_reset_obj_allocator(p);
1299}
1300
1301/*
1302 * We receive a request for objtotal objects, of size objsize each.
1303 * Internally we may round up both numbers, as we allocate objects
1304 * in small clusters multiple of the page size.
1305 * We need to keep track of objtotal and clustentries,
1306 * as they are needed when freeing memory.
1307 *
1308 * XXX note -- userspace needs the buffers to be contiguous,
1309 *	so we cannot afford gaps at the end of a cluster.
1310 */
1311
1312
1313/* call with NMA_LOCK held */
1314static int
1315netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1316{
1317	int i;
1318	u_int clustsize;	/* the cluster size, multiple of page size */
1319	u_int clustentries;	/* how many objects per entry */
1320
1321	/* we store the current request, so we can
1322	 * detect configuration changes later */
1323	p->r_objtotal = objtotal;
1324	p->r_objsize = objsize;
1325
1326#define MAX_CLUSTSIZE	(1<<22)		// 4 MB
1327#define LINE_ROUND	NM_CACHE_ALIGN	// 64
1328	if (objsize >= MAX_CLUSTSIZE) {
1329		/* we could do it but there is no point */
1330		nm_prerr("unsupported allocation for %d bytes", objsize);
1331		return EINVAL;
1332	}
1333	/* make sure objsize is a multiple of LINE_ROUND */
1334	i = (objsize & (LINE_ROUND - 1));
1335	if (i) {
1336		nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1337		objsize += LINE_ROUND - i;
1338	}
1339	if (objsize < p->objminsize || objsize > p->objmaxsize) {
1340		nm_prerr("requested objsize %d out of range [%d, %d]",
1341			objsize, p->objminsize, p->objmaxsize);
1342		return EINVAL;
1343	}
1344	if (objtotal < p->nummin || objtotal > p->nummax) {
1345		nm_prerr("requested objtotal %d out of range [%d, %d]",
1346			objtotal, p->nummin, p->nummax);
1347		return EINVAL;
1348	}
1349	/*
1350	 * Compute number of objects using a brute-force approach:
1351	 * given a max cluster size,
1352	 * we try to fill it with objects keeping track of the
1353	 * wasted space to the next page boundary.
1354	 */
1355	for (clustentries = 0, i = 1;; i++) {
1356		u_int delta, used = i * objsize;
1357		if (used > MAX_CLUSTSIZE)
1358			break;
1359		delta = used % PAGE_SIZE;
1360		if (delta == 0) { // exact solution
1361			clustentries = i;
1362			break;
1363		}
1364	}
1365	/* exact solution not found */
1366	if (clustentries == 0) {
1367		nm_prerr("unsupported allocation for %d bytes", objsize);
1368		return EINVAL;
1369	}
1370	/* compute clustsize */
1371	clustsize = clustentries * objsize;
1372	if (netmap_debug & NM_DEBUG_MEM)
1373		nm_prinf("objsize %d clustsize %d objects %d",
1374			objsize, clustsize, clustentries);
1375
1376	/*
1377	 * The number of clusters is n = ceil(objtotal/clustentries)
1378	 * objtotal' = n * clustentries
1379	 */
1380	p->_clustentries = clustentries;
1381	p->_clustsize = clustsize;
1382	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1383
1384	/* actual values (may be larger than requested) */
1385	p->_objsize = objsize;
1386	p->_objtotal = p->_numclusters * clustentries;
1387
1388	return 0;
1389}
1390
1391/* call with NMA_LOCK held */
1392static int
1393netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1394{
1395	int i; /* must be signed */
1396	size_t n;
1397
1398	if (p->lut) {
1399		/* if the lut is already there we assume that also all the
1400		 * clusters have already been allocated, possibily by somebody
1401		 * else (e.g., extmem). In the latter case, the alloc_done flag
1402		 * will remain at zero, so that we will not attempt to
1403		 * deallocate the clusters by ourselves in
1404		 * netmap_reset_obj_allocator.
1405		 */
1406		return 0;
1407	}
1408
1409	/* optimistically assume we have enough memory */
1410	p->numclusters = p->_numclusters;
1411	p->objtotal = p->_objtotal;
1412	p->alloc_done = 1;
1413
1414	p->lut = nm_alloc_lut(p->objtotal);
1415	if (p->lut == NULL) {
1416		nm_prerr("Unable to create lookup table for '%s'", p->name);
1417		goto clean;
1418	}
1419
1420	/*
1421	 * Allocate clusters, init pointers
1422	 */
1423
1424	n = p->_clustsize;
1425	for (i = 0; i < (int)p->objtotal;) {
1426		int lim = i + p->_clustentries;
1427		char *clust;
1428
1429		/*
1430		 * XXX Note, we only need contigmalloc() for buffers attached
1431		 * to native interfaces. In all other cases (nifp, netmap rings
1432		 * and even buffers for VALE ports or emulated interfaces) we
1433		 * can live with standard malloc, because the hardware will not
1434		 * access the pages directly.
1435		 */
1436		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1437		    (size_t)0, -1UL, PAGE_SIZE, 0);
1438		if (clust == NULL) {
1439			/*
1440			 * If we get here, there is a severe memory shortage,
1441			 * so halve the allocated memory to reclaim some.
1442			 */
1443			nm_prerr("Unable to create cluster at %d for '%s' allocator",
1444			    i, p->name);
1445			if (i < 2) /* nothing to halve */
1446				goto out;
1447			lim = i / 2;
1448			for (i--; i >= lim; i--) {
1449				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1450					contigfree(p->lut[i].vaddr,
1451						n, M_NETMAP);
1452				p->lut[i].vaddr = NULL;
1453			}
1454		out:
1455			p->objtotal = i;
1456			/* we may have stopped in the middle of a cluster */
1457			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1458			break;
1459		}
1460		/*
1461		 * Set lut state for all buffers in the current cluster.
1462		 *
1463		 * [i, lim) is the set of buffer indexes that cover the
1464		 * current cluster.
1465		 *
1466		 * 'clust' is really the address of the current buffer in
1467		 * the current cluster as we index through it with a stride
1468		 * of p->_objsize.
1469		 */
1470		for (; i < lim; i++, clust += p->_objsize) {
1471			p->lut[i].vaddr = clust;
1472#if !defined(linux) && !defined(_WIN32)
1473			p->lut[i].paddr = vtophys(clust);
1474#endif
1475		}
1476	}
1477	p->memtotal = p->numclusters * p->_clustsize;
1478	if (netmap_verbose)
1479		nm_prinf("Pre-allocated %d clusters (%d/%dKB) for '%s'",
1480		    p->numclusters, p->_clustsize >> 10,
1481		    p->memtotal >> 10, p->name);
1482
1483	return 0;
1484
1485clean:
1486	netmap_reset_obj_allocator(p);
1487	return ENOMEM;
1488}
1489
1490/* call with lock held */
1491static int
1492netmap_mem_params_changed(struct netmap_obj_params* p)
1493{
1494	int i, rv = 0;
1495
1496	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1497		if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1498			p[i].last_size = p[i].size;
1499			p[i].last_num = p[i].num;
1500			rv = 1;
1501		}
1502	}
1503	return rv;
1504}
1505
1506static void
1507netmap_mem_reset_all(struct netmap_mem_d *nmd)
1508{
1509	int i;
1510
1511	if (netmap_debug & NM_DEBUG_MEM)
1512		nm_prinf("resetting %p", nmd);
1513	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1514		netmap_reset_obj_allocator(&nmd->pools[i]);
1515	}
1516	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
1517}
1518
1519static int
1520netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1521{
1522	int i, lim = p->objtotal;
1523	struct netmap_lut *lut = &na->na_lut;
1524
1525	if (na == NULL || na->pdev == NULL)
1526		return 0;
1527
1528#if defined(__FreeBSD__)
1529	/* On FreeBSD mapping and unmapping is performed by the txsync
1530	 * and rxsync routine, packet by packet. */
1531	(void)i;
1532	(void)lim;
1533	(void)lut;
1534#elif defined(_WIN32)
1535	(void)i;
1536	(void)lim;
1537	(void)lut;
1538	nm_prerr("unsupported on Windows");
1539#else /* linux */
1540	nm_prdis("unmapping and freeing plut for %s", na->name);
1541	if (lut->plut == NULL)
1542		return 0;
1543	for (i = 0; i < lim; i += p->_clustentries) {
1544		if (lut->plut[i].paddr)
1545			netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1546	}
1547	nm_free_plut(lut->plut);
1548	lut->plut = NULL;
1549#endif /* linux */
1550
1551	return 0;
1552}
1553
1554static int
1555netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1556{
1557	int error = 0;
1558	int i, lim = p->objtotal;
1559	struct netmap_lut *lut = &na->na_lut;
1560
1561	if (na->pdev == NULL)
1562		return 0;
1563
1564#if defined(__FreeBSD__)
1565	/* On FreeBSD mapping and unmapping is performed by the txsync
1566	 * and rxsync routine, packet by packet. */
1567	(void)i;
1568	(void)lim;
1569	(void)lut;
1570#elif defined(_WIN32)
1571	(void)i;
1572	(void)lim;
1573	(void)lut;
1574	nm_prerr("unsupported on Windows");
1575#else /* linux */
1576
1577	if (lut->plut != NULL) {
1578		nm_prdis("plut already allocated for %s", na->name);
1579		return 0;
1580	}
1581
1582	nm_prdis("allocating physical lut for %s", na->name);
1583	lut->plut = nm_alloc_plut(lim);
1584	if (lut->plut == NULL) {
1585		nm_prerr("Failed to allocate physical lut for %s", na->name);
1586		return ENOMEM;
1587	}
1588
1589	for (i = 0; i < lim; i += p->_clustentries) {
1590		lut->plut[i].paddr = 0;
1591	}
1592
1593	for (i = 0; i < lim; i += p->_clustentries) {
1594		int j;
1595
1596		if (p->lut[i].vaddr == NULL)
1597			continue;
1598
1599		error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1600				p->lut[i].vaddr, p->_clustsize);
1601		if (error) {
1602			nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1603			break;
1604		}
1605
1606		for (j = 1; j < p->_clustentries; j++) {
1607			lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1608		}
1609	}
1610
1611	if (error)
1612		netmap_mem_unmap(p, na);
1613
1614#endif /* linux */
1615
1616	return error;
1617}
1618
1619static int
1620netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1621{
1622	int i;
1623	if (nmd->flags & NETMAP_MEM_FINALIZED)
1624		return 0;
1625	nmd->lasterr = 0;
1626	nmd->nm_totalsize = 0;
1627	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1628		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1629		if (nmd->lasterr)
1630			goto error;
1631		nmd->nm_totalsize += nmd->pools[i].memtotal;
1632	}
1633	nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1634	if (nmd->lasterr)
1635		goto error;
1636
1637	nmd->flags |= NETMAP_MEM_FINALIZED;
1638
1639	if (netmap_verbose)
1640		nm_prinf("interfaces %d KB, rings %d KB, buffers %d MB",
1641		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1642		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1643		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1644
1645	if (netmap_verbose)
1646		nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1647
1648
1649	return 0;
1650error:
1651	netmap_mem_reset_all(nmd);
1652	return nmd->lasterr;
1653}
1654
1655/*
1656 * allocator for private memory
1657 */
1658static void *
1659_netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
1660		struct netmap_mem_ops *ops, int *perr)
1661{
1662	struct netmap_mem_d *d = NULL;
1663	int i, err = 0;
1664
1665	d = nm_os_malloc(size);
1666	if (d == NULL) {
1667		err = ENOMEM;
1668		goto error;
1669	}
1670
1671	*d = nm_blueprint;
1672	d->ops = ops;
1673
1674	err = nm_mem_assign_id(d);
1675	if (err)
1676		goto error_free;
1677	snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1678
1679	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1680		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1681				nm_blueprint.pools[i].name,
1682				d->name);
1683		d->params[i].num = p[i].num;
1684		d->params[i].size = p[i].size;
1685	}
1686
1687	NMA_LOCK_INIT(d);
1688
1689	err = netmap_mem_config(d);
1690	if (err)
1691		goto error_rel_id;
1692
1693	d->flags &= ~NETMAP_MEM_FINALIZED;
1694
1695	return d;
1696
1697error_rel_id:
1698	NMA_LOCK_DESTROY(d);
1699	nm_mem_release_id(d);
1700error_free:
1701	nm_os_free(d);
1702error:
1703	if (perr)
1704		*perr = err;
1705	return NULL;
1706}
1707
1708struct netmap_mem_d *
1709netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1710		u_int extra_bufs, u_int npipes, int *perr)
1711{
1712	struct netmap_mem_d *d = NULL;
1713	struct netmap_obj_params p[NETMAP_POOLS_NR];
1714	int i;
1715	u_int v, maxd;
1716	/* account for the fake host rings */
1717	txr++;
1718	rxr++;
1719
1720	/* copy the min values */
1721	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1722		p[i] = netmap_min_priv_params[i];
1723	}
1724
1725	/* possibly increase them to fit user request */
1726	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1727	if (p[NETMAP_IF_POOL].size < v)
1728		p[NETMAP_IF_POOL].size = v;
1729	v = 2 + 4 * npipes;
1730	if (p[NETMAP_IF_POOL].num < v)
1731		p[NETMAP_IF_POOL].num = v;
1732	maxd = (txd > rxd) ? txd : rxd;
1733	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1734	if (p[NETMAP_RING_POOL].size < v)
1735		p[NETMAP_RING_POOL].size = v;
1736	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1737	 * and two rx rings (again, 1 normal and 1 fake host)
1738	 */
1739	v = txr + rxr + 8 * npipes;
1740	if (p[NETMAP_RING_POOL].num < v)
1741		p[NETMAP_RING_POOL].num = v;
1742	/* for each pipe we only need the buffers for the 4 "real" rings.
1743	 * On the other end, the pipe ring dimension may be different from
1744	 * the parent port ring dimension. As a compromise, we allocate twice the
1745	 * space actually needed if the pipe rings were the same size as the parent rings
1746	 */
1747	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1748		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1749	if (p[NETMAP_BUF_POOL].num < v)
1750		p[NETMAP_BUF_POOL].num = v;
1751
1752	if (netmap_verbose)
1753		nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1754			p[NETMAP_IF_POOL].num,
1755			p[NETMAP_IF_POOL].size,
1756			p[NETMAP_RING_POOL].num,
1757			p[NETMAP_RING_POOL].size,
1758			p[NETMAP_BUF_POOL].num,
1759			p[NETMAP_BUF_POOL].size);
1760
1761	d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr);
1762
1763	return d;
1764}
1765
1766
1767/* call with lock held */
1768static int
1769netmap_mem2_config(struct netmap_mem_d *nmd)
1770{
1771	int i;
1772
1773	if (!netmap_mem_params_changed(nmd->params))
1774		goto out;
1775
1776	nm_prdis("reconfiguring");
1777
1778	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1779		/* reset previous allocation */
1780		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1781			netmap_reset_obj_allocator(&nmd->pools[i]);
1782		}
1783		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1784	}
1785
1786	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1787		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1788				nmd->params[i].num, nmd->params[i].size);
1789		if (nmd->lasterr)
1790			goto out;
1791	}
1792
1793out:
1794
1795	return nmd->lasterr;
1796}
1797
1798static int
1799netmap_mem2_finalize(struct netmap_mem_d *nmd)
1800{
1801	if (nmd->flags & NETMAP_MEM_FINALIZED)
1802		goto out;
1803
1804	if (netmap_mem_finalize_all(nmd))
1805		goto out;
1806
1807	nmd->lasterr = 0;
1808
1809out:
1810	return nmd->lasterr;
1811}
1812
1813static void
1814netmap_mem2_delete(struct netmap_mem_d *nmd)
1815{
1816	int i;
1817
1818	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1819	    netmap_destroy_obj_allocator(&nmd->pools[i]);
1820	}
1821
1822	NMA_LOCK_DESTROY(nmd);
1823	if (nmd != &nm_mem)
1824		nm_os_free(nmd);
1825}
1826
1827#ifdef WITH_EXTMEM
1828/* doubly linekd list of all existing external allocators */
1829static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1830NM_MTX_T nm_mem_ext_list_lock;
1831#endif /* WITH_EXTMEM */
1832
1833int
1834netmap_mem_init(void)
1835{
1836	NM_MTX_INIT(nm_mem_list_lock);
1837	NMA_LOCK_INIT(&nm_mem);
1838	netmap_mem_get(&nm_mem);
1839#ifdef WITH_EXTMEM
1840	NM_MTX_INIT(nm_mem_ext_list_lock);
1841#endif /* WITH_EXTMEM */
1842	return (0);
1843}
1844
1845void
1846netmap_mem_fini(void)
1847{
1848	netmap_mem_put(&nm_mem);
1849}
1850
1851static void
1852netmap_free_rings(struct netmap_adapter *na)
1853{
1854	enum txrx t;
1855
1856	for_rx_tx(t) {
1857		u_int i;
1858		for (i = 0; i < netmap_all_rings(na, t); i++) {
1859			struct netmap_kring *kring = NMR(na, t)[i];
1860			struct netmap_ring *ring = kring->ring;
1861
1862			if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) {
1863				if (netmap_debug & NM_DEBUG_MEM)
1864					nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
1865						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1866				continue;
1867			}
1868			if (netmap_debug & NM_DEBUG_MEM)
1869				nm_prinf("deleting ring %s", kring->name);
1870			if (!(kring->nr_kflags & NKR_FAKERING)) {
1871				nm_prdis("freeing bufs for %s", kring->name);
1872				netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1873			} else {
1874				nm_prdis("NOT freeing bufs for %s", kring->name);
1875			}
1876			netmap_ring_free(na->nm_mem, ring);
1877			kring->ring = NULL;
1878		}
1879	}
1880}
1881
1882/* call with NMA_LOCK held *
1883 *
1884 * Allocate netmap rings and buffers for this card
1885 * The rings are contiguous, but have variable size.
1886 * The kring array must follow the layout described
1887 * in netmap_krings_create().
1888 */
1889static int
1890netmap_mem2_rings_create(struct netmap_adapter *na)
1891{
1892	enum txrx t;
1893
1894	for_rx_tx(t) {
1895		u_int i;
1896
1897		for (i = 0; i < netmap_all_rings(na, t); i++) {
1898			struct netmap_kring *kring = NMR(na, t)[i];
1899			struct netmap_ring *ring = kring->ring;
1900			u_int len, ndesc;
1901
1902			if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) {
1903				/* uneeded, or already created by somebody else */
1904				if (netmap_debug & NM_DEBUG_MEM)
1905					nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
1906						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1907				continue;
1908			}
1909			if (netmap_debug & NM_DEBUG_MEM)
1910				nm_prinf("creating %s", kring->name);
1911			ndesc = kring->nkr_num_slots;
1912			len = sizeof(struct netmap_ring) +
1913				  ndesc * sizeof(struct netmap_slot);
1914			ring = netmap_ring_malloc(na->nm_mem, len);
1915			if (ring == NULL) {
1916				nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
1917				goto cleanup;
1918			}
1919			nm_prdis("txring at %p", ring);
1920			kring->ring = ring;
1921			*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1922			*(int64_t *)(uintptr_t)&ring->buf_ofs =
1923			    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1924				na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1925				netmap_ring_offset(na->nm_mem, ring);
1926
1927			/* copy values from kring */
1928			ring->head = kring->rhead;
1929			ring->cur = kring->rcur;
1930			ring->tail = kring->rtail;
1931			*(uint32_t *)(uintptr_t)&ring->nr_buf_size =
1932				netmap_mem_bufsize(na->nm_mem);
1933			nm_prdis("%s h %d c %d t %d", kring->name,
1934				ring->head, ring->cur, ring->tail);
1935			nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
1936			if (!(kring->nr_kflags & NKR_FAKERING)) {
1937				/* this is a real ring */
1938				if (netmap_debug & NM_DEBUG_MEM)
1939					nm_prinf("allocating buffers for %s", kring->name);
1940				if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1941					nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
1942					goto cleanup;
1943				}
1944			} else {
1945				/* this is a fake ring, set all indices to 0 */
1946				if (netmap_debug & NM_DEBUG_MEM)
1947					nm_prinf("NOT allocating buffers for %s", kring->name);
1948				netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1949			}
1950		        /* ring info */
1951		        *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
1952		        *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
1953		}
1954	}
1955
1956	return 0;
1957
1958cleanup:
1959	/* we cannot actually cleanup here, since we don't own kring->users
1960	 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
1961	 * the first or zero-out the second, then call netmap_free_rings()
1962	 * to do the cleanup
1963	 */
1964
1965	return ENOMEM;
1966}
1967
1968static void
1969netmap_mem2_rings_delete(struct netmap_adapter *na)
1970{
1971	/* last instance, release bufs and rings */
1972	netmap_free_rings(na);
1973}
1974
1975
1976/* call with NMA_LOCK held */
1977/*
1978 * Allocate the per-fd structure netmap_if.
1979 *
1980 * We assume that the configuration stored in na
1981 * (number of tx/rx rings and descs) does not change while
1982 * the interface is in netmap mode.
1983 */
1984static struct netmap_if *
1985netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
1986{
1987	struct netmap_if *nifp;
1988	ssize_t base; /* handy for relative offsets between rings and nifp */
1989	u_int i, len, n[NR_TXRX], ntot;
1990	enum txrx t;
1991
1992	ntot = 0;
1993	for_rx_tx(t) {
1994		/* account for the (eventually fake) host rings */
1995		n[t] = netmap_all_rings(na, t);
1996		ntot += n[t];
1997	}
1998	/*
1999	 * the descriptor is followed inline by an array of offsets
2000	 * to the tx and rx rings in the shared memory region.
2001	 */
2002
2003	len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2004	nifp = netmap_if_malloc(na->nm_mem, len);
2005	if (nifp == NULL) {
2006		NMA_UNLOCK(na->nm_mem);
2007		return NULL;
2008	}
2009
2010	/* initialize base fields -- override const */
2011	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2012	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2013	strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2014
2015	/*
2016	 * fill the slots for the rx and tx rings. They contain the offset
2017	 * between the ring and nifp, so the information is usable in
2018	 * userspace to reach the ring from the nifp.
2019	 */
2020	base = netmap_if_offset(na->nm_mem, nifp);
2021	for (i = 0; i < n[NR_TX]; i++) {
2022		/* XXX instead of ofs == 0 maybe use the offset of an error
2023		 * ring, like we do for buffers? */
2024		ssize_t ofs = 0;
2025
2026		if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2027				&& i < priv->np_qlast[NR_TX]) {
2028			ofs = netmap_ring_offset(na->nm_mem,
2029						 na->tx_rings[i]->ring) - base;
2030		}
2031		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2032	}
2033	for (i = 0; i < n[NR_RX]; i++) {
2034		/* XXX instead of ofs == 0 maybe use the offset of an error
2035		 * ring, like we do for buffers? */
2036		ssize_t ofs = 0;
2037
2038		if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2039				&& i < priv->np_qlast[NR_RX]) {
2040			ofs = netmap_ring_offset(na->nm_mem,
2041						 na->rx_rings[i]->ring) - base;
2042		}
2043		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2044	}
2045
2046	return (nifp);
2047}
2048
2049static void
2050netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2051{
2052	if (nifp == NULL)
2053		/* nothing to do */
2054		return;
2055	if (nifp->ni_bufs_head)
2056		netmap_extra_free(na, nifp->ni_bufs_head);
2057	netmap_if_free(na->nm_mem, nifp);
2058}
2059
2060static void
2061netmap_mem2_deref(struct netmap_mem_d *nmd)
2062{
2063
2064	if (netmap_debug & NM_DEBUG_MEM)
2065		nm_prinf("active = %d", nmd->active);
2066
2067}
2068
2069struct netmap_mem_ops netmap_mem_global_ops = {
2070	.nmd_get_lut = netmap_mem2_get_lut,
2071	.nmd_get_info = netmap_mem2_get_info,
2072	.nmd_ofstophys = netmap_mem2_ofstophys,
2073	.nmd_config = netmap_mem2_config,
2074	.nmd_finalize = netmap_mem2_finalize,
2075	.nmd_deref = netmap_mem2_deref,
2076	.nmd_delete = netmap_mem2_delete,
2077	.nmd_if_offset = netmap_mem2_if_offset,
2078	.nmd_if_new = netmap_mem2_if_new,
2079	.nmd_if_delete = netmap_mem2_if_delete,
2080	.nmd_rings_create = netmap_mem2_rings_create,
2081	.nmd_rings_delete = netmap_mem2_rings_delete
2082};
2083
2084int
2085netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2086				struct netmap_mem_d *nmd)
2087{
2088	int ret;
2089
2090	ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2091					&req->nr_mem_id);
2092	if (ret) {
2093		return ret;
2094	}
2095
2096	NMA_LOCK(nmd);
2097	req->nr_if_pool_offset = 0;
2098	req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2099	req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2100
2101	req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2102	req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2103	req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2104
2105	req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2106			     nmd->pools[NETMAP_RING_POOL].memtotal;
2107	req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2108	req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2109	NMA_UNLOCK(nmd);
2110
2111	return 0;
2112}
2113
2114#ifdef WITH_EXTMEM
2115struct netmap_mem_ext {
2116	struct netmap_mem_d up;
2117
2118	struct nm_os_extmem *os;
2119	struct netmap_mem_ext *next, *prev;
2120};
2121
2122/* call with nm_mem_list_lock held */
2123static void
2124netmap_mem_ext_register(struct netmap_mem_ext *e)
2125{
2126	NM_MTX_LOCK(nm_mem_ext_list_lock);
2127	if (netmap_mem_ext_list)
2128		netmap_mem_ext_list->prev = e;
2129	e->next = netmap_mem_ext_list;
2130	netmap_mem_ext_list = e;
2131	e->prev = NULL;
2132	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2133}
2134
2135/* call with nm_mem_list_lock held */
2136static void
2137netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2138{
2139	if (e->prev)
2140		e->prev->next = e->next;
2141	else
2142		netmap_mem_ext_list = e->next;
2143	if (e->next)
2144		e->next->prev = e->prev;
2145	e->prev = e->next = NULL;
2146}
2147
2148static struct netmap_mem_ext *
2149netmap_mem_ext_search(struct nm_os_extmem *os)
2150{
2151	struct netmap_mem_ext *e;
2152
2153	NM_MTX_LOCK(nm_mem_ext_list_lock);
2154	for (e = netmap_mem_ext_list; e; e = e->next) {
2155		if (nm_os_extmem_isequal(e->os, os)) {
2156			netmap_mem_get(&e->up);
2157			break;
2158		}
2159	}
2160	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2161	return e;
2162}
2163
2164
2165static void
2166netmap_mem_ext_delete(struct netmap_mem_d *d)
2167{
2168	int i;
2169	struct netmap_mem_ext *e =
2170		(struct netmap_mem_ext *)d;
2171
2172	netmap_mem_ext_unregister(e);
2173
2174	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2175		struct netmap_obj_pool *p = &d->pools[i];
2176
2177		if (p->lut) {
2178			nm_free_lut(p->lut, p->objtotal);
2179			p->lut = NULL;
2180		}
2181	}
2182	if (e->os)
2183		nm_os_extmem_delete(e->os);
2184	netmap_mem2_delete(d);
2185}
2186
2187static int
2188netmap_mem_ext_config(struct netmap_mem_d *nmd)
2189{
2190	return 0;
2191}
2192
2193struct netmap_mem_ops netmap_mem_ext_ops = {
2194	.nmd_get_lut = netmap_mem2_get_lut,
2195	.nmd_get_info = netmap_mem2_get_info,
2196	.nmd_ofstophys = netmap_mem2_ofstophys,
2197	.nmd_config = netmap_mem_ext_config,
2198	.nmd_finalize = netmap_mem2_finalize,
2199	.nmd_deref = netmap_mem2_deref,
2200	.nmd_delete = netmap_mem_ext_delete,
2201	.nmd_if_offset = netmap_mem2_if_offset,
2202	.nmd_if_new = netmap_mem2_if_new,
2203	.nmd_if_delete = netmap_mem2_if_delete,
2204	.nmd_rings_create = netmap_mem2_rings_create,
2205	.nmd_rings_delete = netmap_mem2_rings_delete
2206};
2207
2208struct netmap_mem_d *
2209netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2210{
2211	int error = 0;
2212	int i, j;
2213	struct netmap_mem_ext *nme;
2214	char *clust;
2215	size_t off;
2216	struct nm_os_extmem *os = NULL;
2217	int nr_pages;
2218
2219	// XXX sanity checks
2220	if (pi->nr_if_pool_objtotal == 0)
2221		pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2222	if (pi->nr_if_pool_objsize == 0)
2223		pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2224	if (pi->nr_ring_pool_objtotal == 0)
2225		pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2226	if (pi->nr_ring_pool_objsize == 0)
2227		pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2228	if (pi->nr_buf_pool_objtotal == 0)
2229		pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2230	if (pi->nr_buf_pool_objsize == 0)
2231		pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2232	if (netmap_verbose & NM_DEBUG_MEM)
2233		nm_prinf("if %d %d ring %d %d buf %d %d",
2234			pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2235			pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2236			pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2237
2238	os = nm_os_extmem_create(usrptr, pi, &error);
2239	if (os == NULL) {
2240		nm_prerr("os extmem creation failed");
2241		goto out;
2242	}
2243
2244	nme = netmap_mem_ext_search(os);
2245	if (nme) {
2246		nm_os_extmem_delete(os);
2247		return &nme->up;
2248	}
2249	if (netmap_verbose & NM_DEBUG_MEM)
2250		nm_prinf("not found, creating new");
2251
2252	nme = _netmap_mem_private_new(sizeof(*nme),
2253			(struct netmap_obj_params[]){
2254				{ pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2255				{ pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2256				{ pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2257			&netmap_mem_ext_ops,
2258			&error);
2259	if (nme == NULL)
2260		goto out_unmap;
2261
2262	nr_pages = nm_os_extmem_nr_pages(os);
2263
2264	/* from now on pages will be released by nme destructor;
2265	 * we let res = 0 to prevent release in out_unmap below
2266	 */
2267	nme->os = os;
2268	os = NULL; /* pass ownership */
2269
2270	clust = nm_os_extmem_nextpage(nme->os);
2271	off = 0;
2272	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2273		struct netmap_obj_pool *p = &nme->up.pools[i];
2274		struct netmap_obj_params *o = &nme->up.params[i];
2275
2276		p->_objsize = o->size;
2277		p->_clustsize = o->size;
2278		p->_clustentries = 1;
2279
2280		p->lut = nm_alloc_lut(o->num);
2281		if (p->lut == NULL) {
2282			error = ENOMEM;
2283			goto out_delete;
2284		}
2285
2286		p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2287		p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2288		if (p->invalid_bitmap == NULL) {
2289			error = ENOMEM;
2290			goto out_delete;
2291		}
2292
2293		if (nr_pages == 0) {
2294			p->objtotal = 0;
2295			p->memtotal = 0;
2296			p->objfree = 0;
2297			continue;
2298		}
2299
2300		for (j = 0; j < o->num && nr_pages > 0; j++) {
2301			size_t noff;
2302
2303			p->lut[j].vaddr = clust + off;
2304#if !defined(linux) && !defined(_WIN32)
2305			p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2306#endif
2307			nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2308			noff = off + p->_objsize;
2309			if (noff < PAGE_SIZE) {
2310				off = noff;
2311				continue;
2312			}
2313			nm_prdis("too big, recomputing offset...");
2314			while (noff >= PAGE_SIZE) {
2315				char *old_clust = clust;
2316				noff -= PAGE_SIZE;
2317				clust = nm_os_extmem_nextpage(nme->os);
2318				nr_pages--;
2319				nm_prdis("noff %zu page %p nr_pages %d", noff,
2320						page_to_virt(*pages), nr_pages);
2321				if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2322					(nr_pages == 0 ||
2323					 old_clust + PAGE_SIZE != clust))
2324				{
2325					/* out of space or non contiguous,
2326					 * drop this object
2327					 * */
2328					p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2329					nm_prdis("non contiguous at off %zu, drop", noff);
2330				}
2331				if (nr_pages == 0)
2332					break;
2333			}
2334			off = noff;
2335		}
2336		p->objtotal = j;
2337		p->numclusters = p->objtotal;
2338		p->memtotal = j * p->_objsize;
2339		nm_prdis("%d memtotal %u", j, p->memtotal);
2340	}
2341
2342	netmap_mem_ext_register(nme);
2343
2344	return &nme->up;
2345
2346out_delete:
2347	netmap_mem_put(&nme->up);
2348out_unmap:
2349	if (os)
2350		nm_os_extmem_delete(os);
2351out:
2352	if (perror)
2353		*perror = error;
2354	return NULL;
2355
2356}
2357#endif /* WITH_EXTMEM */
2358
2359
2360#ifdef WITH_PTNETMAP
2361struct mem_pt_if {
2362	struct mem_pt_if *next;
2363	struct ifnet *ifp;
2364	unsigned int nifp_offset;
2365};
2366
2367/* Netmap allocator for ptnetmap guests. */
2368struct netmap_mem_ptg {
2369	struct netmap_mem_d up;
2370
2371	vm_paddr_t nm_paddr;            /* physical address in the guest */
2372	void *nm_addr;                  /* virtual address in the guest */
2373	struct netmap_lut buf_lut;      /* lookup table for BUF pool in the guest */
2374	nm_memid_t host_mem_id;         /* allocator identifier in the host */
2375	struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2376	struct mem_pt_if *pt_ifs;	/* list of interfaces in passthrough */
2377};
2378
2379/* Link a passthrough interface to a passthrough netmap allocator. */
2380static int
2381netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp,
2382			    unsigned int nifp_offset)
2383{
2384	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2385	struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2386
2387	if (!ptif) {
2388		return ENOMEM;
2389	}
2390
2391	NMA_LOCK(nmd);
2392
2393	ptif->ifp = ifp;
2394	ptif->nifp_offset = nifp_offset;
2395
2396	if (ptnmd->pt_ifs) {
2397		ptif->next = ptnmd->pt_ifs;
2398	}
2399	ptnmd->pt_ifs = ptif;
2400
2401	NMA_UNLOCK(nmd);
2402
2403	nm_prinf("ifp=%s,nifp_offset=%u",
2404		ptif->ifp->if_xname, ptif->nifp_offset);
2405
2406	return 0;
2407}
2408
2409/* Called with NMA_LOCK(nmd) held. */
2410static struct mem_pt_if *
2411netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp)
2412{
2413	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2414	struct mem_pt_if *curr;
2415
2416	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2417		if (curr->ifp == ifp) {
2418			return curr;
2419		}
2420	}
2421
2422	return NULL;
2423}
2424
2425/* Unlink a passthrough interface from a passthrough netmap allocator. */
2426int
2427netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
2428{
2429	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2430	struct mem_pt_if *prev = NULL;
2431	struct mem_pt_if *curr;
2432	int ret = -1;
2433
2434	NMA_LOCK(nmd);
2435
2436	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2437		if (curr->ifp == ifp) {
2438			if (prev) {
2439				prev->next = curr->next;
2440			} else {
2441				ptnmd->pt_ifs = curr->next;
2442			}
2443			nm_prinf("removed (ifp=%p,nifp_offset=%u)",
2444			  curr->ifp, curr->nifp_offset);
2445			nm_os_free(curr);
2446			ret = 0;
2447			break;
2448		}
2449		prev = curr;
2450	}
2451
2452	NMA_UNLOCK(nmd);
2453
2454	return ret;
2455}
2456
2457static int
2458netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2459{
2460	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2461
2462	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2463		return EINVAL;
2464	}
2465
2466	*lut = ptnmd->buf_lut;
2467	return 0;
2468}
2469
2470static int
2471netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2472			     u_int *memflags, uint16_t *id)
2473{
2474	int error = 0;
2475
2476	error = nmd->ops->nmd_config(nmd);
2477	if (error)
2478		goto out;
2479
2480	if (size)
2481		*size = nmd->nm_totalsize;
2482	if (memflags)
2483		*memflags = nmd->flags;
2484	if (id)
2485		*id = nmd->nm_id;
2486
2487out:
2488
2489	return error;
2490}
2491
2492static vm_paddr_t
2493netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2494{
2495	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2496	vm_paddr_t paddr;
2497	/* if the offset is valid, just return csb->base_addr + off */
2498	paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2499	nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2500	return paddr;
2501}
2502
2503static int
2504netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2505{
2506	/* nothing to do, we are configured on creation
2507	 * and configuration never changes thereafter
2508	 */
2509	return 0;
2510}
2511
2512static int
2513netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
2514{
2515	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2516	uint64_t mem_size;
2517	uint32_t bufsize;
2518	uint32_t nbuffers;
2519	uint32_t poolofs;
2520	vm_paddr_t paddr;
2521	char *vaddr;
2522	int i;
2523	int error = 0;
2524
2525	if (nmd->flags & NETMAP_MEM_FINALIZED)
2526		goto out;
2527
2528	if (ptnmd->ptn_dev == NULL) {
2529		nm_prerr("ptnetmap memdev not attached");
2530		error = ENOMEM;
2531		goto out;
2532	}
2533	/* Map memory through ptnetmap-memdev BAR. */
2534	error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2535				      &ptnmd->nm_addr, &mem_size);
2536	if (error)
2537		goto out;
2538
2539	/* Initialize the lut using the information contained in the
2540	 * ptnetmap memory device. */
2541	bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2542					 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2543	nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2544					 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2545
2546	/* allocate the lut */
2547	if (ptnmd->buf_lut.lut == NULL) {
2548		nm_prinf("allocating lut");
2549		ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2550		if (ptnmd->buf_lut.lut == NULL) {
2551			nm_prerr("lut allocation failed");
2552			return ENOMEM;
2553		}
2554	}
2555
2556	/* we have physically contiguous memory mapped through PCI BAR */
2557	poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2558					 PTNET_MDEV_IO_BUF_POOL_OFS);
2559	vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2560	paddr = ptnmd->nm_paddr + poolofs;
2561
2562	for (i = 0; i < nbuffers; i++) {
2563		ptnmd->buf_lut.lut[i].vaddr = vaddr;
2564		vaddr += bufsize;
2565		paddr += bufsize;
2566	}
2567
2568	ptnmd->buf_lut.objtotal = nbuffers;
2569	ptnmd->buf_lut.objsize = bufsize;
2570	nmd->nm_totalsize = (unsigned int)mem_size;
2571
2572	/* Initialize these fields as are needed by
2573	 * netmap_mem_bufsize().
2574	 * XXX please improve this, why do we need this
2575	 * replication? maybe we nmd->pools[] should no be
2576	 * there for the guest allocator? */
2577	nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2578	nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2579
2580	nmd->flags |= NETMAP_MEM_FINALIZED;
2581out:
2582	return error;
2583}
2584
2585static void
2586netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd)
2587{
2588	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2589
2590	if (nmd->active == 1 &&
2591		(nmd->flags & NETMAP_MEM_FINALIZED)) {
2592	    nmd->flags  &= ~NETMAP_MEM_FINALIZED;
2593	    /* unmap ptnetmap-memdev memory */
2594	    if (ptnmd->ptn_dev) {
2595		nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2596	    }
2597	    ptnmd->nm_addr = NULL;
2598	    ptnmd->nm_paddr = 0;
2599	}
2600}
2601
2602static ssize_t
2603netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2604{
2605	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2606
2607	return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2608}
2609
2610static void
2611netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2612{
2613	if (nmd == NULL)
2614		return;
2615	if (netmap_verbose)
2616		nm_prinf("deleting %p", nmd);
2617	if (nmd->active > 0)
2618		nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2619	if (netmap_verbose)
2620		nm_prinf("done deleting %p", nmd);
2621	NMA_LOCK_DESTROY(nmd);
2622	nm_os_free(nmd);
2623}
2624
2625static struct netmap_if *
2626netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
2627{
2628	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2629	struct mem_pt_if *ptif;
2630	struct netmap_if *nifp = NULL;
2631
2632	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2633	if (ptif == NULL) {
2634		nm_prerr("interface %s is not in passthrough", na->name);
2635		goto out;
2636	}
2637
2638	nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2639				    ptif->nifp_offset);
2640out:
2641	return nifp;
2642}
2643
2644static void
2645netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2646{
2647	struct mem_pt_if *ptif;
2648
2649	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2650	if (ptif == NULL) {
2651		nm_prerr("interface %s is not in passthrough", na->name);
2652	}
2653}
2654
2655static int
2656netmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
2657{
2658	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2659	struct mem_pt_if *ptif;
2660	struct netmap_if *nifp;
2661	int i, error = -1;
2662
2663	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2664	if (ptif == NULL) {
2665		nm_prerr("interface %s is not in passthrough", na->name);
2666		goto out;
2667	}
2668
2669
2670	/* point each kring to the corresponding backend ring */
2671	nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2672	for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2673		struct netmap_kring *kring = na->tx_rings[i];
2674		if (kring->ring)
2675			continue;
2676		kring->ring = (struct netmap_ring *)
2677			((char *)nifp + nifp->ring_ofs[i]);
2678	}
2679	for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2680		struct netmap_kring *kring = na->rx_rings[i];
2681		if (kring->ring)
2682			continue;
2683		kring->ring = (struct netmap_ring *)
2684			((char *)nifp +
2685			 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2686	}
2687
2688	error = 0;
2689out:
2690	return error;
2691}
2692
2693static void
2694netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na)
2695{
2696#if 0
2697	enum txrx t;
2698
2699	for_rx_tx(t) {
2700		u_int i;
2701		for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2702			struct netmap_kring *kring = &NMR(na, t)[i];
2703
2704			kring->ring = NULL;
2705		}
2706	}
2707#endif
2708}
2709
2710static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2711	.nmd_get_lut = netmap_mem_pt_guest_get_lut,
2712	.nmd_get_info = netmap_mem_pt_guest_get_info,
2713	.nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2714	.nmd_config = netmap_mem_pt_guest_config,
2715	.nmd_finalize = netmap_mem_pt_guest_finalize,
2716	.nmd_deref = netmap_mem_pt_guest_deref,
2717	.nmd_if_offset = netmap_mem_pt_guest_if_offset,
2718	.nmd_delete = netmap_mem_pt_guest_delete,
2719	.nmd_if_new = netmap_mem_pt_guest_if_new,
2720	.nmd_if_delete = netmap_mem_pt_guest_if_delete,
2721	.nmd_rings_create = netmap_mem_pt_guest_rings_create,
2722	.nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2723};
2724
2725/* Called with nm_mem_list_lock held. */
2726static struct netmap_mem_d *
2727netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2728{
2729	struct netmap_mem_d *mem = NULL;
2730	struct netmap_mem_d *scan = netmap_last_mem_d;
2731
2732	do {
2733		/* find ptnetmap allocator through host ID */
2734		if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2735			((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2736			mem = scan;
2737			mem->refcount++;
2738			NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2739			break;
2740		}
2741		scan = scan->next;
2742	} while (scan != netmap_last_mem_d);
2743
2744	return mem;
2745}
2746
2747/* Called with nm_mem_list_lock held. */
2748static struct netmap_mem_d *
2749netmap_mem_pt_guest_create(nm_memid_t mem_id)
2750{
2751	struct netmap_mem_ptg *ptnmd;
2752	int err = 0;
2753
2754	ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2755	if (ptnmd == NULL) {
2756		err = ENOMEM;
2757		goto error;
2758	}
2759
2760	ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2761	ptnmd->host_mem_id = mem_id;
2762	ptnmd->pt_ifs = NULL;
2763
2764	/* Assign new id in the guest (We have the lock) */
2765	err = nm_mem_assign_id_locked(&ptnmd->up);
2766	if (err)
2767		goto error;
2768
2769	ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2770	ptnmd->up.flags |= NETMAP_MEM_IO;
2771
2772	NMA_LOCK_INIT(&ptnmd->up);
2773
2774	snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2775
2776
2777	return &ptnmd->up;
2778error:
2779	netmap_mem_pt_guest_delete(&ptnmd->up);
2780	return NULL;
2781}
2782
2783/*
2784 * find host id in guest allocators and create guest allocator
2785 * if it is not there
2786 */
2787static struct netmap_mem_d *
2788netmap_mem_pt_guest_get(nm_memid_t mem_id)
2789{
2790	struct netmap_mem_d *nmd;
2791
2792	NM_MTX_LOCK(nm_mem_list_lock);
2793	nmd = netmap_mem_pt_guest_find_memid(mem_id);
2794	if (nmd == NULL) {
2795		nmd = netmap_mem_pt_guest_create(mem_id);
2796	}
2797	NM_MTX_UNLOCK(nm_mem_list_lock);
2798
2799	return nmd;
2800}
2801
2802/*
2803 * The guest allocator can be created by ptnetmap_memdev (during the device
2804 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2805 *
2806 * The order is not important (we have different order in LINUX and FreeBSD).
2807 * The first one, creates the device, and the second one simply attaches it.
2808 */
2809
2810/* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2811 * the guest */
2812struct netmap_mem_d *
2813netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2814{
2815	struct netmap_mem_d *nmd;
2816	struct netmap_mem_ptg *ptnmd;
2817
2818	nmd = netmap_mem_pt_guest_get(mem_id);
2819
2820	/* assign this device to the guest allocator */
2821	if (nmd) {
2822		ptnmd = (struct netmap_mem_ptg *)nmd;
2823		ptnmd->ptn_dev = ptn_dev;
2824	}
2825
2826	return nmd;
2827}
2828
2829/* Called when ptnet device is attaching */
2830struct netmap_mem_d *
2831netmap_mem_pt_guest_new(struct ifnet *ifp,
2832			unsigned int nifp_offset,
2833			unsigned int memid)
2834{
2835	struct netmap_mem_d *nmd;
2836
2837	if (ifp == NULL) {
2838		return NULL;
2839	}
2840
2841	nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2842
2843	if (nmd) {
2844		netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2845	}
2846
2847	return nmd;
2848}
2849
2850#endif /* WITH_PTNETMAP */
2851