netmap_mem2.c revision 341477
1331722Seadler/*
2341477Svmaffione * Copyright (C) 2012-2014 Matteo Landi
3341477Svmaffione * Copyright (C) 2012-2016 Luigi Rizzo
4341477Svmaffione * Copyright (C) 2012-2016 Giuseppe Lettieri
5341477Svmaffione * All rights reserved.
6234228Sluigi *
7234228Sluigi * Redistribution and use in source and binary forms, with or without
8234228Sluigi * modification, are permitted provided that the following conditions
9234228Sluigi * are met:
10234228Sluigi *   1. Redistributions of source code must retain the above copyright
11234228Sluigi *      notice, this list of conditions and the following disclaimer.
12234228Sluigi *   2. Redistributions in binary form must reproduce the above copyright
13234228Sluigi *      notice, this list of conditions and the following disclaimer in the
14259412Sluigi *      documentation and/or other materials provided with the distribution.
15234228Sluigi *
16234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19234228Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26234228Sluigi * SUCH DAMAGE.
27234228Sluigi */
28234228Sluigi
29257529Sluigi#ifdef linux
30257529Sluigi#include "bsd_glue.h"
31257529Sluigi#endif /* linux */
32234228Sluigi
33257529Sluigi#ifdef __APPLE__
34257529Sluigi#include "osx_glue.h"
35257529Sluigi#endif /* __APPLE__ */
36234228Sluigi
37257529Sluigi#ifdef __FreeBSD__
38257529Sluigi#include <sys/cdefs.h> /* prerequisite */
39257529Sluigi__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_mem2.c 341477 2018-12-04 17:40:56Z vmaffione $");
40234228Sluigi
41257529Sluigi#include <sys/types.h>
42257529Sluigi#include <sys/malloc.h>
43341477Svmaffione#include <sys/kernel.h>		/* MALLOC_DEFINE */
44257529Sluigi#include <sys/proc.h>
45257529Sluigi#include <vm/vm.h>	/* vtophys */
46257529Sluigi#include <vm/pmap.h>	/* vtophys */
47257529Sluigi#include <sys/socket.h> /* sockaddrs */
48257529Sluigi#include <sys/selinfo.h>
49257529Sluigi#include <sys/sysctl.h>
50257529Sluigi#include <net/if.h>
51257529Sluigi#include <net/if_var.h>
52257529Sluigi#include <net/vnet.h>
53257529Sluigi#include <machine/bus.h>	/* bus_dmamap_* */
54257529Sluigi
55341477Svmaffione/* M_NETMAP only used in here */
56341477SvmaffioneMALLOC_DECLARE(M_NETMAP);
57341477SvmaffioneMALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
58341477Svmaffione
59257529Sluigi#endif /* __FreeBSD__ */
60257529Sluigi
61341477Svmaffione#ifdef _WIN32
62341477Svmaffione#include <win_glue.h>
63341477Svmaffione#endif
64341477Svmaffione
65257529Sluigi#include <net/netmap.h>
66257529Sluigi#include <dev/netmap/netmap_kern.h>
67341477Svmaffione#include <net/netmap_virt.h>
68257529Sluigi#include "netmap_mem2.h"
69257529Sluigi
70341477Svmaffione#ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
71341477Svmaffione#define NETMAP_BUF_MAX_NUM  8*4096      /* if too big takes too much time to allocate */
72341477Svmaffione#else
73341477Svmaffione#define NETMAP_BUF_MAX_NUM 20*4096*2	/* large machine */
74341477Svmaffione#endif
75270063Sluigi
76270063Sluigi#define NETMAP_POOL_MAX_NAMSZ	32
77270063Sluigi
78270063Sluigi
79270063Sluigienum {
80270063Sluigi	NETMAP_IF_POOL   = 0,
81270063Sluigi	NETMAP_RING_POOL,
82270063Sluigi	NETMAP_BUF_POOL,
83270063Sluigi	NETMAP_POOLS_NR
84270063Sluigi};
85270063Sluigi
86270063Sluigi
87270063Sluigistruct netmap_obj_params {
88270063Sluigi	u_int size;
89270063Sluigi	u_int num;
90341477Svmaffione
91341477Svmaffione	u_int last_size;
92341477Svmaffione	u_int last_num;
93270063Sluigi};
94285349Sluigi
95270063Sluigistruct netmap_obj_pool {
96270063Sluigi	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
97270063Sluigi
98270063Sluigi	/* ---------------------------------------------------*/
99270063Sluigi	/* these are only meaningful if the pool is finalized */
100270063Sluigi	/* (see 'finalized' field in netmap_mem_d)            */
101270063Sluigi	u_int objtotal;         /* actual total number of objects. */
102270063Sluigi	u_int memtotal;		/* actual total memory space */
103270063Sluigi	u_int numclusters;	/* actual number of clusters */
104270063Sluigi
105270063Sluigi	u_int objfree;          /* number of free objects. */
106270063Sluigi
107270063Sluigi	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
108270063Sluigi	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
109341477Svmaffione	uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
110270063Sluigi	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
111341477Svmaffione	int	alloc_done;	/* we have allocated the memory */
112270063Sluigi	/* ---------------------------------------------------*/
113270063Sluigi
114270063Sluigi	/* limits */
115270063Sluigi	u_int objminsize;	/* minimum object size */
116270063Sluigi	u_int objmaxsize;	/* maximum object size */
117270063Sluigi	u_int nummin;		/* minimum number of objects */
118270063Sluigi	u_int nummax;		/* maximum number of objects */
119270063Sluigi
120270063Sluigi	/* these are changed only by config */
121270063Sluigi	u_int _objtotal;	/* total number of objects */
122270063Sluigi	u_int _objsize;		/* object size */
123270063Sluigi	u_int _clustsize;       /* cluster size */
124270063Sluigi	u_int _clustentries;    /* objects per cluster */
125270063Sluigi	u_int _numclusters;	/* number of clusters */
126270063Sluigi
127270063Sluigi	/* requested values */
128270063Sluigi	u_int r_objtotal;
129270063Sluigi	u_int r_objsize;
130270063Sluigi};
131270063Sluigi
132285349Sluigi#define NMA_LOCK_T		NM_MTX_T
133341477Svmaffione#define NMA_LOCK_INIT(n)	NM_MTX_INIT((n)->nm_mtx)
134341477Svmaffione#define NMA_LOCK_DESTROY(n)	NM_MTX_DESTROY((n)->nm_mtx)
135341477Svmaffione#define NMA_LOCK(n)		NM_MTX_LOCK((n)->nm_mtx)
136341477Svmaffione#define NMA_SPINLOCK(n)         NM_MTX_SPINLOCK((n)->nm_mtx)
137341477Svmaffione#define NMA_UNLOCK(n)		NM_MTX_UNLOCK((n)->nm_mtx)
138270063Sluigi
139285349Sluigistruct netmap_mem_ops {
140341477Svmaffione	int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
141341477Svmaffione	int  (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
142285349Sluigi			u_int *memflags, uint16_t *id);
143285349Sluigi
144285349Sluigi	vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
145285349Sluigi	int (*nmd_config)(struct netmap_mem_d *);
146285349Sluigi	int (*nmd_finalize)(struct netmap_mem_d *);
147285349Sluigi	void (*nmd_deref)(struct netmap_mem_d *);
148285349Sluigi	ssize_t  (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
149285349Sluigi	void (*nmd_delete)(struct netmap_mem_d *);
150285349Sluigi
151341477Svmaffione	struct netmap_if * (*nmd_if_new)(struct netmap_adapter *,
152341477Svmaffione					 struct netmap_priv_d *);
153285349Sluigi	void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *);
154285349Sluigi	int  (*nmd_rings_create)(struct netmap_adapter *);
155285349Sluigi	void (*nmd_rings_delete)(struct netmap_adapter *);
156285349Sluigi};
157285349Sluigi
158270063Sluigistruct netmap_mem_d {
159270063Sluigi	NMA_LOCK_T nm_mtx;  /* protect the allocator */
160270063Sluigi	u_int nm_totalsize; /* shorthand */
161270063Sluigi
162270063Sluigi	u_int flags;
163270063Sluigi#define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
164341477Svmaffione#define NETMAP_MEM_HIDDEN	0x8	/* beeing prepared */
165270063Sluigi	int lasterr;		/* last error for curr config */
166285349Sluigi	int active;		/* active users */
167285349Sluigi	int refcount;
168270063Sluigi	/* the three allocators */
169270063Sluigi	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
170270063Sluigi
171270063Sluigi	nm_memid_t nm_id;	/* allocator identifier */
172270063Sluigi	int nm_grp;	/* iommu groupd id */
173270063Sluigi
174270063Sluigi	/* list of all existing allocators, sorted by nm_id */
175270063Sluigi	struct netmap_mem_d *prev, *next;
176285349Sluigi
177285349Sluigi	struct netmap_mem_ops *ops;
178341477Svmaffione
179341477Svmaffione	struct netmap_obj_params params[NETMAP_POOLS_NR];
180341477Svmaffione
181341477Svmaffione#define NM_MEM_NAMESZ	16
182341477Svmaffione	char name[NM_MEM_NAMESZ];
183270063Sluigi};
184270063Sluigi
185341477Svmaffioneint
186341477Svmaffionenetmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
187341477Svmaffione{
188341477Svmaffione	int rv;
189341477Svmaffione
190341477Svmaffione	NMA_LOCK(nmd);
191341477Svmaffione	rv = nmd->ops->nmd_get_lut(nmd, lut);
192341477Svmaffione	NMA_UNLOCK(nmd);
193341477Svmaffione
194341477Svmaffione	return rv;
195285349Sluigi}
196285349Sluigi
197341477Svmaffioneint
198341477Svmaffionenetmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
199341477Svmaffione		u_int *memflags, nm_memid_t *memid)
200341477Svmaffione{
201341477Svmaffione	int rv;
202341477Svmaffione
203341477Svmaffione	NMA_LOCK(nmd);
204341477Svmaffione	rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
205341477Svmaffione	NMA_UNLOCK(nmd);
206341477Svmaffione
207341477Svmaffione	return rv;
208285349Sluigi}
209285349Sluigi
210341477Svmaffionevm_paddr_t
211341477Svmaffionenetmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
212341477Svmaffione{
213341477Svmaffione	vm_paddr_t pa;
214341477Svmaffione
215341477Svmaffione#if defined(__FreeBSD__)
216341477Svmaffione	/* This function is called by netmap_dev_pager_fault(), which holds a
217341477Svmaffione	 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
218341477Svmaffione	 * spin on the trylock. */
219341477Svmaffione	NMA_SPINLOCK(nmd);
220341477Svmaffione#else
221341477Svmaffione	NMA_LOCK(nmd);
222341477Svmaffione#endif
223341477Svmaffione	pa = nmd->ops->nmd_ofstophys(nmd, off);
224341477Svmaffione	NMA_UNLOCK(nmd);
225341477Svmaffione
226341477Svmaffione	return pa;
227285349Sluigi}
228285349Sluigi
229341477Svmaffionestatic int
230341477Svmaffionenetmap_mem_config(struct netmap_mem_d *nmd)
231341477Svmaffione{
232341477Svmaffione	if (nmd->active) {
233341477Svmaffione		/* already in use. Not fatal, but we
234341477Svmaffione		 * cannot change the configuration
235341477Svmaffione		 */
236341477Svmaffione		return 0;
237341477Svmaffione	}
238341477Svmaffione
239341477Svmaffione	return nmd->ops->nmd_config(nmd);
240285349Sluigi}
241285349Sluigi
242341477Svmaffionessize_t
243341477Svmaffionenetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
244341477Svmaffione{
245341477Svmaffione	ssize_t rv;
246341477Svmaffione
247341477Svmaffione	NMA_LOCK(nmd);
248341477Svmaffione	rv = nmd->ops->nmd_if_offset(nmd, off);
249341477Svmaffione	NMA_UNLOCK(nmd);
250341477Svmaffione
251341477Svmaffione	return rv;
252285349Sluigi}
253285349Sluigi
254341477Svmaffionestatic void
255341477Svmaffionenetmap_mem_delete(struct netmap_mem_d *nmd)
256341477Svmaffione{
257341477Svmaffione	nmd->ops->nmd_delete(nmd);
258341477Svmaffione}
259285349Sluigi
260341477Svmaffionestruct netmap_if *
261341477Svmaffionenetmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
262341477Svmaffione{
263341477Svmaffione	struct netmap_if *nifp;
264341477Svmaffione	struct netmap_mem_d *nmd = na->nm_mem;
265285349Sluigi
266341477Svmaffione	NMA_LOCK(nmd);
267341477Svmaffione	nifp = nmd->ops->nmd_if_new(na, priv);
268341477Svmaffione	NMA_UNLOCK(nmd);
269341477Svmaffione
270341477Svmaffione	return nifp;
271341477Svmaffione}
272341477Svmaffione
273341477Svmaffionevoid
274341477Svmaffionenetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
275341477Svmaffione{
276341477Svmaffione	struct netmap_mem_d *nmd = na->nm_mem;
277341477Svmaffione
278341477Svmaffione	NMA_LOCK(nmd);
279341477Svmaffione	nmd->ops->nmd_if_delete(na, nif);
280341477Svmaffione	NMA_UNLOCK(nmd);
281341477Svmaffione}
282341477Svmaffione
283341477Svmaffioneint
284341477Svmaffionenetmap_mem_rings_create(struct netmap_adapter *na)
285341477Svmaffione{
286341477Svmaffione	int rv;
287341477Svmaffione	struct netmap_mem_d *nmd = na->nm_mem;
288341477Svmaffione
289341477Svmaffione	NMA_LOCK(nmd);
290341477Svmaffione	rv = nmd->ops->nmd_rings_create(na);
291341477Svmaffione	NMA_UNLOCK(nmd);
292341477Svmaffione
293341477Svmaffione	return rv;
294341477Svmaffione}
295341477Svmaffione
296341477Svmaffionevoid
297341477Svmaffionenetmap_mem_rings_delete(struct netmap_adapter *na)
298341477Svmaffione{
299341477Svmaffione	struct netmap_mem_d *nmd = na->nm_mem;
300341477Svmaffione
301341477Svmaffione	NMA_LOCK(nmd);
302341477Svmaffione	nmd->ops->nmd_rings_delete(na);
303341477Svmaffione	NMA_UNLOCK(nmd);
304341477Svmaffione}
305341477Svmaffione
306285349Sluigistatic int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
307285349Sluigistatic int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
308285349Sluigistatic int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
309341477Svmaffionestatic void nm_mem_release_id(struct netmap_mem_d *);
310285349Sluigi
311341477Svmaffionenm_memid_t
312341477Svmaffionenetmap_mem_get_id(struct netmap_mem_d *nmd)
313341477Svmaffione{
314341477Svmaffione	return nmd->nm_id;
315341477Svmaffione}
316285349Sluigi
317285349Sluigi#ifdef NM_DEBUG_MEM_PUTGET
318285349Sluigi#define NM_DBG_REFC(nmd, func, line)	\
319341477Svmaffione	nm_prinf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount);
320285349Sluigi#else
321285349Sluigi#define NM_DBG_REFC(nmd, func, line)
322285349Sluigi#endif
323285349Sluigi
324341477Svmaffione/* circular list of all existing allocators */
325341477Svmaffionestatic struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
326341477SvmaffioneNM_MTX_T nm_mem_list_lock;
327341477Svmaffione
328341477Svmaffionestruct netmap_mem_d *
329341477Svmaffione__netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
330270063Sluigi{
331341477Svmaffione	NM_MTX_LOCK(nm_mem_list_lock);
332285349Sluigi	nmd->refcount++;
333285349Sluigi	NM_DBG_REFC(nmd, func, line);
334341477Svmaffione	NM_MTX_UNLOCK(nm_mem_list_lock);
335341477Svmaffione	return nmd;
336270063Sluigi}
337270063Sluigi
338341477Svmaffionevoid
339341477Svmaffione__netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
340270063Sluigi{
341285349Sluigi	int last;
342341477Svmaffione	NM_MTX_LOCK(nm_mem_list_lock);
343285349Sluigi	last = (--nmd->refcount == 0);
344341477Svmaffione	if (last)
345341477Svmaffione		nm_mem_release_id(nmd);
346285349Sluigi	NM_DBG_REFC(nmd, func, line);
347341477Svmaffione	NM_MTX_UNLOCK(nm_mem_list_lock);
348285349Sluigi	if (last)
349285349Sluigi		netmap_mem_delete(nmd);
350270063Sluigi}
351270063Sluigi
352285349Sluigiint
353285349Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
354270063Sluigi{
355341477Svmaffione	int lasterr = 0;
356285349Sluigi	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
357285349Sluigi		return ENOMEM;
358341477Svmaffione	}
359341477Svmaffione
360341477Svmaffione	NMA_LOCK(nmd);
361341477Svmaffione
362341477Svmaffione	if (netmap_mem_config(nmd))
363341477Svmaffione		goto out;
364341477Svmaffione
365341477Svmaffione	nmd->active++;
366341477Svmaffione
367341477Svmaffione	nmd->lasterr = nmd->ops->nmd_finalize(nmd);
368341477Svmaffione
369341477Svmaffione	if (!nmd->lasterr && na->pdev) {
370341477Svmaffione		nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
371341477Svmaffione	}
372341477Svmaffione
373341477Svmaffioneout:
374341477Svmaffione	lasterr = nmd->lasterr;
375341477Svmaffione	NMA_UNLOCK(nmd);
376341477Svmaffione
377341477Svmaffione	if (lasterr)
378341477Svmaffione		netmap_mem_deref(nmd, na);
379341477Svmaffione
380341477Svmaffione	return lasterr;
381341477Svmaffione}
382341477Svmaffione
383341477Svmaffionestatic int
384341477Svmaffionenm_isset(uint32_t *bitmap, u_int i)
385341477Svmaffione{
386341477Svmaffione	return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
387341477Svmaffione}
388341477Svmaffione
389341477Svmaffione
390341477Svmaffionestatic int
391341477Svmaffionenetmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
392341477Svmaffione{
393341477Svmaffione	u_int n, j;
394341477Svmaffione
395341477Svmaffione	if (p->bitmap == NULL) {
396341477Svmaffione		/* Allocate the bitmap */
397341477Svmaffione		n = (p->objtotal + 31) / 32;
398341477Svmaffione		p->bitmap = nm_os_malloc(sizeof(uint32_t) * n);
399341477Svmaffione		if (p->bitmap == NULL) {
400341477Svmaffione			D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
401341477Svmaffione			    p->name);
402341477Svmaffione			return ENOMEM;
403341477Svmaffione		}
404341477Svmaffione		p->bitmap_slots = n;
405285349Sluigi	} else {
406341477Svmaffione		memset(p->bitmap, 0, p->bitmap_slots);
407285349Sluigi	}
408285349Sluigi
409341477Svmaffione	p->objfree = 0;
410341477Svmaffione	/*
411341477Svmaffione	 * Set all the bits in the bitmap that have
412341477Svmaffione	 * corresponding buffers to 1 to indicate they are
413341477Svmaffione	 * free.
414341477Svmaffione	 */
415341477Svmaffione	for (j = 0; j < p->objtotal; j++) {
416341477Svmaffione		if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
417341477Svmaffione			D("skipping %s %d", p->name, j);
418341477Svmaffione			continue;
419341477Svmaffione		}
420341477Svmaffione		p->bitmap[ (j>>5) ] |=  ( 1U << (j & 31U) );
421341477Svmaffione		p->objfree++;
422341477Svmaffione	}
423285349Sluigi
424341477Svmaffione	ND("%s free %u", p->name, p->objfree);
425341477Svmaffione	if (p->objfree == 0)
426341477Svmaffione		return ENOMEM;
427341477Svmaffione
428341477Svmaffione	return 0;
429270063Sluigi}
430270063Sluigi
431341477Svmaffionestatic int
432341477Svmaffionenetmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
433341477Svmaffione{
434341477Svmaffione	int i, error = 0;
435341477Svmaffione
436341477Svmaffione	for (i = 0; i < NETMAP_POOLS_NR; i++) {
437341477Svmaffione		struct netmap_obj_pool *p = &nmd->pools[i];
438341477Svmaffione
439341477Svmaffione		error = netmap_init_obj_allocator_bitmap(p);
440341477Svmaffione		if (error)
441341477Svmaffione			return error;
442341477Svmaffione	}
443341477Svmaffione
444341477Svmaffione	/*
445341477Svmaffione	 * buffers 0 and 1 are reserved
446341477Svmaffione	 */
447341477Svmaffione	if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
448341477Svmaffione		return ENOMEM;
449341477Svmaffione	}
450341477Svmaffione
451341477Svmaffione	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
452341477Svmaffione	if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
453341477Svmaffione		/* XXX This check is a workaround that prevents a
454341477Svmaffione		 * NULL pointer crash which currently happens only
455341477Svmaffione		 * with ptnetmap guests.
456341477Svmaffione		 * Removed shared-info --> is the bug still there? */
457341477Svmaffione		nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
458341477Svmaffione	}
459341477Svmaffione	return 0;
460341477Svmaffione}
461341477Svmaffione
462341477Svmaffioneint
463285349Sluiginetmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
464285349Sluigi{
465341477Svmaffione	int last_user = 0;
466285349Sluigi	NMA_LOCK(nmd);
467341477Svmaffione	if (na->active_fds <= 0)
468341477Svmaffione		netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
469341477Svmaffione	if (nmd->active == 1) {
470341477Svmaffione		last_user = 1;
471341477Svmaffione		/*
472341477Svmaffione		 * Reset the allocator when it falls out of use so that any
473341477Svmaffione		 * pool resources leaked by unclean application exits are
474341477Svmaffione		 * reclaimed.
475341477Svmaffione		 */
476341477Svmaffione		netmap_mem_init_bitmaps(nmd);
477341477Svmaffione	}
478341477Svmaffione	nmd->ops->nmd_deref(nmd);
479341477Svmaffione
480341477Svmaffione	nmd->active--;
481341477Svmaffione	if (!nmd->active)
482341477Svmaffione		nmd->nm_grp = -1;
483341477Svmaffione
484285349Sluigi	NMA_UNLOCK(nmd);
485341477Svmaffione	return last_user;
486285349Sluigi}
487234228Sluigi
488241719Sluigi
489285349Sluigi/* accessor functions */
490341477Svmaffionestatic int
491285349Sluiginetmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
492285349Sluigi{
493285349Sluigi	lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
494341477Svmaffione#ifdef __FreeBSD__
495341477Svmaffione	lut->plut = lut->lut;
496341477Svmaffione#endif
497285349Sluigi	lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
498285349Sluigi	lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
499341477Svmaffione
500341477Svmaffione	return 0;
501285349Sluigi}
502285349Sluigi
503341477Svmaffionestatic struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
504241719Sluigi	[NETMAP_IF_POOL] = {
505241719Sluigi		.size = 1024,
506341477Svmaffione		.num  = 2,
507241719Sluigi	},
508241719Sluigi	[NETMAP_RING_POOL] = {
509261909Sluigi		.size = 5*PAGE_SIZE,
510261909Sluigi		.num  = 4,
511261909Sluigi	},
512261909Sluigi	[NETMAP_BUF_POOL] = {
513261909Sluigi		.size = 2048,
514261909Sluigi		.num  = 4098,
515261909Sluigi	},
516261909Sluigi};
517241719Sluigi
518261909Sluigi
519249659Sluigi/*
520249659Sluigi * nm_mem is the memory allocator used for all physical interfaces
521249659Sluigi * running in netmap mode.
522249659Sluigi * Virtual (VALE) ports will have each its own allocator.
523249659Sluigi */
524285349Sluigiextern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
525257529Sluigistruct netmap_mem_d nm_mem = {	/* Our memory allocator. */
526241719Sluigi	.pools = {
527241719Sluigi		[NETMAP_IF_POOL] = {
528241719Sluigi			.name 	= "netmap_if",
529241719Sluigi			.objminsize = sizeof(struct netmap_if),
530241719Sluigi			.objmaxsize = 4096,
531241719Sluigi			.nummin     = 10,	/* don't be stingy */
532241719Sluigi			.nummax	    = 10000,	/* XXX very large */
533241719Sluigi		},
534241719Sluigi		[NETMAP_RING_POOL] = {
535241719Sluigi			.name 	= "netmap_ring",
536241719Sluigi			.objminsize = sizeof(struct netmap_ring),
537241719Sluigi			.objmaxsize = 32*PAGE_SIZE,
538241719Sluigi			.nummin     = 2,
539241719Sluigi			.nummax	    = 1024,
540241719Sluigi		},
541241719Sluigi		[NETMAP_BUF_POOL] = {
542241719Sluigi			.name	= "netmap_buf",
543241719Sluigi			.objminsize = 64,
544241719Sluigi			.objmaxsize = 65536,
545241719Sluigi			.nummin     = 4,
546241719Sluigi			.nummax	    = 1000000, /* one million! */
547241719Sluigi		},
548241719Sluigi	},
549261909Sluigi
550341477Svmaffione	.params = {
551341477Svmaffione		[NETMAP_IF_POOL] = {
552341477Svmaffione			.size = 1024,
553341477Svmaffione			.num  = 100,
554341477Svmaffione		},
555341477Svmaffione		[NETMAP_RING_POOL] = {
556341477Svmaffione			.size = 9*PAGE_SIZE,
557341477Svmaffione			.num  = 200,
558341477Svmaffione		},
559341477Svmaffione		[NETMAP_BUF_POOL] = {
560341477Svmaffione			.size = 2048,
561341477Svmaffione			.num  = NETMAP_BUF_MAX_NUM,
562341477Svmaffione		},
563341477Svmaffione	},
564341477Svmaffione
565261909Sluigi	.nm_id = 1,
566270063Sluigi	.nm_grp = -1,
567261909Sluigi
568261909Sluigi	.prev = &nm_mem,
569261909Sluigi	.next = &nm_mem,
570285349Sluigi
571341477Svmaffione	.ops = &netmap_mem_global_ops,
572341477Svmaffione
573341477Svmaffione	.name = "1"
574241719Sluigi};
575241719Sluigi
576257529Sluigi
577257529Sluigi/* blueprint for the private memory allocators */
578341477Svmaffione/* XXX clang is not happy about using name as a print format */
579341477Svmaffionestatic const struct netmap_mem_d nm_blueprint = {
580257529Sluigi	.pools = {
581257529Sluigi		[NETMAP_IF_POOL] = {
582257529Sluigi			.name 	= "%s_if",
583257529Sluigi			.objminsize = sizeof(struct netmap_if),
584257529Sluigi			.objmaxsize = 4096,
585257529Sluigi			.nummin     = 1,
586261909Sluigi			.nummax	    = 100,
587257529Sluigi		},
588257529Sluigi		[NETMAP_RING_POOL] = {
589257529Sluigi			.name 	= "%s_ring",
590257529Sluigi			.objminsize = sizeof(struct netmap_ring),
591257529Sluigi			.objmaxsize = 32*PAGE_SIZE,
592257529Sluigi			.nummin     = 2,
593257529Sluigi			.nummax	    = 1024,
594257529Sluigi		},
595257529Sluigi		[NETMAP_BUF_POOL] = {
596257529Sluigi			.name	= "%s_buf",
597257529Sluigi			.objminsize = 64,
598257529Sluigi			.objmaxsize = 65536,
599257529Sluigi			.nummin     = 4,
600257529Sluigi			.nummax	    = 1000000, /* one million! */
601257529Sluigi		},
602257529Sluigi	},
603257529Sluigi
604341477Svmaffione	.nm_grp = -1,
605341477Svmaffione
606257529Sluigi	.flags = NETMAP_MEM_PRIVATE,
607285349Sluigi
608341477Svmaffione	.ops = &netmap_mem_global_ops,
609257529Sluigi};
610257529Sluigi
611241719Sluigi/* memory allocator related sysctls */
612234228Sluigi
613241719Sluigi#define STRINGIFY(x) #x
614241719Sluigi
615257529Sluigi
616241719Sluigi#define DECLARE_SYSCTLS(id, name) \
617341477Svmaffione	SYSBEGIN(mem2_ ## name); \
618241719Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
619341477Svmaffione	    CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
620259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
621259412Sluigi	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
622259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
623341477Svmaffione	    CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
624259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
625261909Sluigi	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
626261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
627261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
628261909Sluigi	    "Default size of private netmap " STRINGIFY(name) "s"); \
629261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
630261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
631341477Svmaffione	    "Default number of private netmap " STRINGIFY(name) "s");	\
632341477Svmaffione	SYSEND
633241719Sluigi
634257529SluigiSYSCTL_DECL(_dev_netmap);
635241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if);
636241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
637241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
638241719Sluigi
639341477Svmaffione/* call with nm_mem_list_lock held */
640261909Sluigistatic int
641341477Svmaffionenm_mem_assign_id_locked(struct netmap_mem_d *nmd)
642261909Sluigi{
643261909Sluigi	nm_memid_t id;
644261909Sluigi	struct netmap_mem_d *scan = netmap_last_mem_d;
645261909Sluigi	int error = ENOMEM;
646261909Sluigi
647261909Sluigi	do {
648261909Sluigi		/* we rely on unsigned wrap around */
649261909Sluigi		id = scan->nm_id + 1;
650261909Sluigi		if (id == 0) /* reserve 0 as error value */
651261909Sluigi			id = 1;
652261909Sluigi		scan = scan->next;
653261909Sluigi		if (id != scan->nm_id) {
654261909Sluigi			nmd->nm_id = id;
655261909Sluigi			nmd->prev = scan->prev;
656261909Sluigi			nmd->next = scan;
657261909Sluigi			scan->prev->next = nmd;
658261909Sluigi			scan->prev = nmd;
659261909Sluigi			netmap_last_mem_d = nmd;
660341477Svmaffione			nmd->refcount = 1;
661341477Svmaffione			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
662261909Sluigi			error = 0;
663261909Sluigi			break;
664261909Sluigi		}
665261909Sluigi	} while (scan != netmap_last_mem_d);
666261909Sluigi
667261909Sluigi	return error;
668261909Sluigi}
669261909Sluigi
670341477Svmaffione/* call with nm_mem_list_lock *not* held */
671341477Svmaffionestatic int
672341477Svmaffionenm_mem_assign_id(struct netmap_mem_d *nmd)
673341477Svmaffione{
674341477Svmaffione	int ret;
675341477Svmaffione
676341477Svmaffione	NM_MTX_LOCK(nm_mem_list_lock);
677341477Svmaffione	ret = nm_mem_assign_id_locked(nmd);
678341477Svmaffione	NM_MTX_UNLOCK(nm_mem_list_lock);
679341477Svmaffione
680341477Svmaffione	return ret;
681341477Svmaffione}
682341477Svmaffione
683341477Svmaffione/* call with nm_mem_list_lock held */
684261909Sluigistatic void
685261909Sluiginm_mem_release_id(struct netmap_mem_d *nmd)
686261909Sluigi{
687261909Sluigi	nmd->prev->next = nmd->next;
688261909Sluigi	nmd->next->prev = nmd->prev;
689261909Sluigi
690261909Sluigi	if (netmap_last_mem_d == nmd)
691261909Sluigi		netmap_last_mem_d = nmd->prev;
692261909Sluigi
693261909Sluigi	nmd->prev = nmd->next = NULL;
694341477Svmaffione}
695261909Sluigi
696341477Svmaffionestruct netmap_mem_d *
697341477Svmaffionenetmap_mem_find(nm_memid_t id)
698341477Svmaffione{
699341477Svmaffione	struct netmap_mem_d *nmd;
700341477Svmaffione
701341477Svmaffione	NM_MTX_LOCK(nm_mem_list_lock);
702341477Svmaffione	nmd = netmap_last_mem_d;
703341477Svmaffione	do {
704341477Svmaffione		if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
705341477Svmaffione			nmd->refcount++;
706341477Svmaffione			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
707341477Svmaffione			NM_MTX_UNLOCK(nm_mem_list_lock);
708341477Svmaffione			return nmd;
709341477Svmaffione		}
710341477Svmaffione		nmd = nmd->next;
711341477Svmaffione	} while (nmd != netmap_last_mem_d);
712341477Svmaffione	NM_MTX_UNLOCK(nm_mem_list_lock);
713341477Svmaffione	return NULL;
714261909Sluigi}
715261909Sluigi
716270063Sluigistatic int
717270063Sluiginm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
718270063Sluigi{
719270063Sluigi	int err = 0, id;
720270063Sluigi	id = nm_iommu_group_id(dev);
721270063Sluigi	if (netmap_verbose)
722270063Sluigi		D("iommu_group %d", id);
723261909Sluigi
724270063Sluigi	NMA_LOCK(nmd);
725270063Sluigi
726270063Sluigi	if (nmd->nm_grp < 0)
727270063Sluigi		nmd->nm_grp = id;
728270063Sluigi
729270063Sluigi	if (nmd->nm_grp != id)
730270063Sluigi		nmd->lasterr = err = ENOMEM;
731270063Sluigi
732270063Sluigi	NMA_UNLOCK(nmd);
733270063Sluigi	return err;
734270063Sluigi}
735270063Sluigi
736341477Svmaffionestatic struct lut_entry *
737341477Svmaffionenm_alloc_lut(u_int nobj)
738341477Svmaffione{
739341477Svmaffione	size_t n = sizeof(struct lut_entry) * nobj;
740341477Svmaffione	struct lut_entry *lut;
741341477Svmaffione#ifdef linux
742341477Svmaffione	lut = vmalloc(n);
743341477Svmaffione#else
744341477Svmaffione	lut = nm_os_malloc(n);
745341477Svmaffione#endif
746341477Svmaffione	return lut;
747341477Svmaffione}
748341477Svmaffione
749341477Svmaffionestatic void
750341477Svmaffionenm_free_lut(struct lut_entry *lut, u_int objtotal)
751341477Svmaffione{
752341477Svmaffione	bzero(lut, sizeof(struct lut_entry) * objtotal);
753341477Svmaffione#ifdef linux
754341477Svmaffione	vfree(lut);
755341477Svmaffione#else
756341477Svmaffione	nm_os_free(lut);
757341477Svmaffione#endif
758341477Svmaffione}
759341477Svmaffione
760341477Svmaffione#if defined(linux) || defined(_WIN32)
761341477Svmaffionestatic struct plut_entry *
762341477Svmaffionenm_alloc_plut(u_int nobj)
763341477Svmaffione{
764341477Svmaffione	size_t n = sizeof(struct plut_entry) * nobj;
765341477Svmaffione	struct plut_entry *lut;
766341477Svmaffione	lut = vmalloc(n);
767341477Svmaffione	return lut;
768341477Svmaffione}
769341477Svmaffione
770341477Svmaffionestatic void
771341477Svmaffionenm_free_plut(struct plut_entry * lut)
772341477Svmaffione{
773341477Svmaffione	vfree(lut);
774341477Svmaffione}
775341477Svmaffione#endif /* linux or _WIN32 */
776341477Svmaffione
777341477Svmaffione
778234228Sluigi/*
779249659Sluigi * First, find the allocator that contains the requested offset,
780249659Sluigi * then locate the cluster through a lookup table.
781234228Sluigi */
782285349Sluigistatic vm_paddr_t
783285349Sluiginetmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
784234228Sluigi{
785234228Sluigi	int i;
786257529Sluigi	vm_ooffset_t o = offset;
787257529Sluigi	vm_paddr_t pa;
788257529Sluigi	struct netmap_obj_pool *p;
789234228Sluigi
790257529Sluigi	p = nmd->pools;
791257529Sluigi
792257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
793257529Sluigi		if (offset >= p[i].memtotal)
794234228Sluigi			continue;
795249659Sluigi		// now lookup the cluster's address
796341477Svmaffione#ifndef _WIN32
797270063Sluigi		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
798241719Sluigi			offset % p[i]._objsize;
799341477Svmaffione#else
800341477Svmaffione		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
801341477Svmaffione		pa.QuadPart += offset % p[i]._objsize;
802341477Svmaffione#endif
803257529Sluigi		return pa;
804234228Sluigi	}
805241719Sluigi	/* this is only in case of errors */
806234290Sluigi	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
807257529Sluigi		p[NETMAP_IF_POOL].memtotal,
808257529Sluigi		p[NETMAP_IF_POOL].memtotal
809257529Sluigi			+ p[NETMAP_RING_POOL].memtotal,
810257529Sluigi		p[NETMAP_IF_POOL].memtotal
811257529Sluigi			+ p[NETMAP_RING_POOL].memtotal
812257529Sluigi			+ p[NETMAP_BUF_POOL].memtotal);
813341477Svmaffione#ifndef _WIN32
814341477Svmaffione	return 0; /* bad address */
815341477Svmaffione#else
816341477Svmaffione	vm_paddr_t res;
817341477Svmaffione	res.QuadPart = 0;
818341477Svmaffione	return res;
819341477Svmaffione#endif
820341477Svmaffione}
821341477Svmaffione
822341477Svmaffione#ifdef _WIN32
823341477Svmaffione
824341477Svmaffione/*
825341477Svmaffione * win32_build_virtual_memory_for_userspace
826341477Svmaffione *
827341477Svmaffione * This function get all the object making part of the pools and maps
828341477Svmaffione * a contiguous virtual memory space for the userspace
829341477Svmaffione * It works this way
830341477Svmaffione * 1 - allocate a Memory Descriptor List wide as the sum
831341477Svmaffione *		of the memory needed for the pools
832341477Svmaffione * 2 - cycle all the objects in every pool and for every object do
833341477Svmaffione *
834341477Svmaffione *		2a - cycle all the objects in every pool, get the list
835341477Svmaffione *				of the physical address descriptors
836341477Svmaffione *		2b - calculate the offset in the array of pages desciptor in the
837341477Svmaffione *				main MDL
838341477Svmaffione *		2c - copy the descriptors of the object in the main MDL
839341477Svmaffione *
840341477Svmaffione * 3 - return the resulting MDL that needs to be mapped in userland
841341477Svmaffione *
842341477Svmaffione * In this way we will have an MDL that describes all the memory for the
843341477Svmaffione * objects in a single object
844341477Svmaffione*/
845341477Svmaffione
846341477SvmaffionePMDL
847341477Svmaffionewin32_build_user_vm_map(struct netmap_mem_d* nmd)
848341477Svmaffione{
849341477Svmaffione	u_int memflags, ofs = 0;
850341477Svmaffione	PMDL mainMdl, tempMdl;
851341477Svmaffione	uint64_t memsize;
852341477Svmaffione	int i, j;
853341477Svmaffione
854341477Svmaffione	if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
855341477Svmaffione		D("memory not finalised yet");
856341477Svmaffione		return NULL;
857341477Svmaffione	}
858341477Svmaffione
859341477Svmaffione	mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
860341477Svmaffione	if (mainMdl == NULL) {
861341477Svmaffione		D("failed to allocate mdl");
862341477Svmaffione		return NULL;
863341477Svmaffione	}
864341477Svmaffione
865341477Svmaffione	NMA_LOCK(nmd);
866341477Svmaffione	for (i = 0; i < NETMAP_POOLS_NR; i++) {
867341477Svmaffione		struct netmap_obj_pool *p = &nmd->pools[i];
868341477Svmaffione		int clsz = p->_clustsize;
869341477Svmaffione		int clobjs = p->_clustentries; /* objects per cluster */
870341477Svmaffione		int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
871341477Svmaffione		PPFN_NUMBER pSrc, pDst;
872341477Svmaffione
873341477Svmaffione		/* each pool has a different cluster size so we need to reallocate */
874341477Svmaffione		tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
875341477Svmaffione		if (tempMdl == NULL) {
876341477Svmaffione			NMA_UNLOCK(nmd);
877341477Svmaffione			D("fail to allocate tempMdl");
878341477Svmaffione			IoFreeMdl(mainMdl);
879341477Svmaffione			return NULL;
880341477Svmaffione		}
881341477Svmaffione		pSrc = MmGetMdlPfnArray(tempMdl);
882341477Svmaffione		/* create one entry per cluster, the lut[] has one entry per object */
883341477Svmaffione		for (j = 0; j < p->numclusters; j++, ofs += clsz) {
884341477Svmaffione			pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
885341477Svmaffione			MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
886341477Svmaffione			MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
887341477Svmaffione			RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
888341477Svmaffione			mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
889341477Svmaffione		}
890341477Svmaffione		IoFreeMdl(tempMdl);
891341477Svmaffione	}
892257529Sluigi	NMA_UNLOCK(nmd);
893341477Svmaffione	return mainMdl;
894234228Sluigi}
895234228Sluigi
896341477Svmaffione#endif /* _WIN32 */
897341477Svmaffione
898341477Svmaffione/*
899341477Svmaffione * helper function for OS-specific mmap routines (currently only windows).
900341477Svmaffione * Given an nmd and a pool index, returns the cluster size and number of clusters.
901341477Svmaffione * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
902341477Svmaffione * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
903341477Svmaffione */
904341477Svmaffione
905341477Svmaffioneint
906341477Svmaffionenetmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
907341477Svmaffione{
908341477Svmaffione	if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
909341477Svmaffione		return 1; /* invalid arguments */
910341477Svmaffione	// NMA_LOCK_ASSERT(nmd);
911341477Svmaffione	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
912341477Svmaffione		*clustsize = *numclusters = 0;
913341477Svmaffione		return 1; /* not ready yet */
914341477Svmaffione	}
915341477Svmaffione	*clustsize = nmd->pools[pool]._clustsize;
916341477Svmaffione	*numclusters = nmd->pools[pool].numclusters;
917341477Svmaffione	return 0; /* success */
918341477Svmaffione}
919341477Svmaffione
920285349Sluigistatic int
921341477Svmaffionenetmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
922341477Svmaffione			u_int *memflags, nm_memid_t *id)
923257529Sluigi{
924257529Sluigi	int error = 0;
925285349Sluigi	error = netmap_mem_config(nmd);
926257529Sluigi	if (error)
927257529Sluigi		goto out;
928270063Sluigi	if (size) {
929270063Sluigi		if (nmd->flags & NETMAP_MEM_FINALIZED) {
930270063Sluigi			*size = nmd->nm_totalsize;
931270063Sluigi		} else {
932270063Sluigi			int i;
933270063Sluigi			*size = 0;
934270063Sluigi			for (i = 0; i < NETMAP_POOLS_NR; i++) {
935270063Sluigi				struct netmap_obj_pool *p = nmd->pools + i;
936270063Sluigi				*size += (p->_numclusters * p->_clustsize);
937270063Sluigi			}
938257529Sluigi		}
939257529Sluigi	}
940270063Sluigi	if (memflags)
941270063Sluigi		*memflags = nmd->flags;
942270063Sluigi	if (id)
943270063Sluigi		*id = nmd->nm_id;
944257529Sluigiout:
945257529Sluigi	return error;
946257529Sluigi}
947257529Sluigi
948234228Sluigi/*
949234228Sluigi * we store objects by kernel address, need to find the offset
950234228Sluigi * within the pool to export the value to userspace.
951234228Sluigi * Algorithm: scan until we find the cluster, then add the
952234228Sluigi * actual offset in the cluster
953234228Sluigi */
954234242Sluigistatic ssize_t
955234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
956234228Sluigi{
957257529Sluigi	int i, k = p->_clustentries, n = p->objtotal;
958234228Sluigi	ssize_t ofs = 0;
959234228Sluigi
960234228Sluigi	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
961234228Sluigi		const char *base = p->lut[i].vaddr;
962234228Sluigi		ssize_t relofs = (const char *) vaddr - base;
963234228Sluigi
964249504Sluigi		if (relofs < 0 || relofs >= p->_clustsize)
965234228Sluigi			continue;
966234228Sluigi
967234228Sluigi		ofs = ofs + relofs;
968234228Sluigi		ND("%s: return offset %d (cluster %d) for pointer %p",
969234228Sluigi		    p->name, ofs, i, vaddr);
970234228Sluigi		return ofs;
971234228Sluigi	}
972234228Sluigi	D("address %p is not contained inside any cluster (%s)",
973234228Sluigi	    vaddr, p->name);
974234228Sluigi	return 0; /* An error occurred */
975234228Sluigi}
976234228Sluigi
977234228Sluigi/* Helper functions which convert virtual addresses to offsets */
978257529Sluigi#define netmap_if_offset(n, v)					\
979257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
980234228Sluigi
981257529Sluigi#define netmap_ring_offset(n, v)				\
982257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
983257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
984234228Sluigi
985285349Sluigistatic ssize_t
986285349Sluiginetmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
987257529Sluigi{
988341477Svmaffione	return netmap_if_offset(nmd, addr);
989257529Sluigi}
990257529Sluigi
991241719Sluigi/*
992241719Sluigi * report the index, and use start position as a hint,
993241719Sluigi * otherwise buffer allocation becomes terribly expensive.
994241719Sluigi */
995234228Sluigistatic void *
996257529Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
997234228Sluigi{
998234228Sluigi	uint32_t i = 0;			/* index in the bitmap */
999341477Svmaffione	uint32_t mask, j = 0;		/* slot counter */
1000234228Sluigi	void *vaddr = NULL;
1001234228Sluigi
1002234228Sluigi	if (len > p->_objsize) {
1003234228Sluigi		D("%s request size %d too large", p->name, len);
1004234228Sluigi		return NULL;
1005234228Sluigi	}
1006234228Sluigi
1007234228Sluigi	if (p->objfree == 0) {
1008259412Sluigi		D("no more %s objects", p->name);
1009234228Sluigi		return NULL;
1010234228Sluigi	}
1011241719Sluigi	if (start)
1012241719Sluigi		i = *start;
1013234228Sluigi
1014241719Sluigi	/* termination is guaranteed by p->free, but better check bounds on i */
1015241719Sluigi	while (vaddr == NULL && i < p->bitmap_slots)  {
1016234228Sluigi		uint32_t cur = p->bitmap[i];
1017234228Sluigi		if (cur == 0) { /* bitmask is fully used */
1018234228Sluigi			i++;
1019234228Sluigi			continue;
1020234228Sluigi		}
1021234228Sluigi		/* locate a slot */
1022234228Sluigi		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1023234228Sluigi			;
1024234228Sluigi
1025234228Sluigi		p->bitmap[i] &= ~mask; /* mark object as in use */
1026234228Sluigi		p->objfree--;
1027234228Sluigi
1028234228Sluigi		vaddr = p->lut[i * 32 + j].vaddr;
1029241719Sluigi		if (index)
1030241719Sluigi			*index = i * 32 + j;
1031234228Sluigi	}
1032341477Svmaffione	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1033234228Sluigi
1034241719Sluigi	if (start)
1035241719Sluigi		*start = i;
1036234228Sluigi	return vaddr;
1037234228Sluigi}
1038234228Sluigi
1039234228Sluigi
1040234228Sluigi/*
1041261909Sluigi * free by index, not by address.
1042261909Sluigi * XXX should we also cleanup the content ?
1043234228Sluigi */
1044261909Sluigistatic int
1045234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1046234228Sluigi{
1047261909Sluigi	uint32_t *ptr, mask;
1048261909Sluigi
1049234228Sluigi	if (j >= p->objtotal) {
1050234228Sluigi		D("invalid index %u, max %u", j, p->objtotal);
1051261909Sluigi		return 1;
1052234228Sluigi	}
1053261909Sluigi	ptr = &p->bitmap[j / 32];
1054261909Sluigi	mask = (1 << (j % 32));
1055261909Sluigi	if (*ptr & mask) {
1056261909Sluigi		D("ouch, double free on buffer %d", j);
1057261909Sluigi		return 1;
1058261909Sluigi	} else {
1059261909Sluigi		*ptr |= mask;
1060261909Sluigi		p->objfree++;
1061261909Sluigi		return 0;
1062261909Sluigi	}
1063234228Sluigi}
1064234228Sluigi
1065261909Sluigi/*
1066261909Sluigi * free by address. This is slow but is only used for a few
1067261909Sluigi * objects (rings, nifp)
1068261909Sluigi */
1069234228Sluigistatic void
1070234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1071234228Sluigi{
1072257529Sluigi	u_int i, j, n = p->numclusters;
1073234228Sluigi
1074257529Sluigi	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1075257529Sluigi		void *base = p->lut[i * p->_clustentries].vaddr;
1076234228Sluigi		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1077234228Sluigi
1078234228Sluigi		/* Given address, is out of the scope of the current cluster.*/
1079341477Svmaffione		if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1080234228Sluigi			continue;
1081234228Sluigi
1082234228Sluigi		j = j + relofs / p->_objsize;
1083257529Sluigi		/* KASSERT(j != 0, ("Cannot free object 0")); */
1084234228Sluigi		netmap_obj_free(p, j);
1085234228Sluigi		return;
1086234228Sluigi	}
1087245835Sluigi	D("address %p is not contained inside any cluster (%s)",
1088234228Sluigi	    vaddr, p->name);
1089234228Sluigi}
1090234228Sluigi
1091341477Svmaffioneunsigned
1092341477Svmaffionenetmap_mem_bufsize(struct netmap_mem_d *nmd)
1093341477Svmaffione{
1094341477Svmaffione	return nmd->pools[NETMAP_BUF_POOL]._objsize;
1095341477Svmaffione}
1096270063Sluigi
1097257529Sluigi#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1098257529Sluigi#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1099257529Sluigi#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1100257529Sluigi#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1101257529Sluigi#define netmap_buf_malloc(n, _pos, _index)			\
1102270063Sluigi	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1103234228Sluigi
1104234228Sluigi
1105341477Svmaffione#if 0 /* currently unused */
1106234228Sluigi/* Return the index associated to the given packet buffer */
1107257529Sluigi#define netmap_buf_index(n, v)						\
1108257529Sluigi    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1109261909Sluigi#endif
1110234228Sluigi
1111261909Sluigi/*
1112261909Sluigi * allocate extra buffers in a linked list.
1113261909Sluigi * returns the actual number.
1114261909Sluigi */
1115261909Sluigiuint32_t
1116261909Sluiginetmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1117261909Sluigi{
1118261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
1119261909Sluigi	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1120234228Sluigi
1121261909Sluigi	NMA_LOCK(nmd);
1122261909Sluigi
1123261909Sluigi	*head = 0;	/* default, 'null' index ie empty list */
1124261909Sluigi	for (i = 0 ; i < n; i++) {
1125261909Sluigi		uint32_t cur = *head;	/* save current head */
1126261909Sluigi		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1127261909Sluigi		if (p == NULL) {
1128261909Sluigi			D("no more buffers after %d of %d", i, n);
1129261909Sluigi			*head = cur; /* restore */
1130261909Sluigi			break;
1131261909Sluigi		}
1132341477Svmaffione		ND(5, "allocate buffer %d -> %d", *head, cur);
1133261909Sluigi		*p = cur; /* link to previous head */
1134261909Sluigi	}
1135261909Sluigi
1136261909Sluigi	NMA_UNLOCK(nmd);
1137261909Sluigi
1138261909Sluigi	return i;
1139261909Sluigi}
1140261909Sluigi
1141261909Sluigistatic void
1142261909Sluiginetmap_extra_free(struct netmap_adapter *na, uint32_t head)
1143261909Sluigi{
1144341477Svmaffione	struct lut_entry *lut = na->na_lut.lut;
1145261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
1146261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1147261909Sluigi	uint32_t i, cur, *buf;
1148261909Sluigi
1149341477Svmaffione	ND("freeing the extra list");
1150261909Sluigi	for (i = 0; head >=2 && head < p->objtotal; i++) {
1151261909Sluigi		cur = head;
1152261909Sluigi		buf = lut[head].vaddr;
1153261909Sluigi		head = *buf;
1154261909Sluigi		*buf = 0;
1155261909Sluigi		if (netmap_obj_free(p, cur))
1156261909Sluigi			break;
1157261909Sluigi	}
1158261909Sluigi	if (head != 0)
1159261909Sluigi		D("breaking with head %d", head);
1160341477Svmaffione	if (netmap_verbose)
1161341477Svmaffione		D("freed %d buffers", i);
1162261909Sluigi}
1163261909Sluigi
1164261909Sluigi
1165241719Sluigi/* Return nonzero on error */
1166241719Sluigistatic int
1167259412Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1168234228Sluigi{
1169257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1170257529Sluigi	u_int i = 0;	/* slot counter */
1171241719Sluigi	uint32_t pos = 0;	/* slot in p->bitmap */
1172241719Sluigi	uint32_t index = 0;	/* buffer index */
1173234228Sluigi
1174234228Sluigi	for (i = 0; i < n; i++) {
1175257529Sluigi		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1176234228Sluigi		if (vaddr == NULL) {
1177259412Sluigi			D("no more buffers after %d of %d", i, n);
1178234228Sluigi			goto cleanup;
1179234228Sluigi		}
1180241719Sluigi		slot[i].buf_idx = index;
1181234228Sluigi		slot[i].len = p->_objsize;
1182259412Sluigi		slot[i].flags = 0;
1183341477Svmaffione		slot[i].ptr = 0;
1184234228Sluigi	}
1185234228Sluigi
1186341477Svmaffione	ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1187241719Sluigi	return (0);
1188234228Sluigi
1189234228Sluigicleanup:
1190241643Semaste	while (i > 0) {
1191241643Semaste		i--;
1192241719Sluigi		netmap_obj_free(p, slot[i].buf_idx);
1193234228Sluigi	}
1194241719Sluigi	bzero(slot, n * sizeof(slot[0]));
1195241719Sluigi	return (ENOMEM);
1196234228Sluigi}
1197234228Sluigi
1198261909Sluigistatic void
1199261909Sluiginetmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1200261909Sluigi{
1201261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1202261909Sluigi	u_int i;
1203234228Sluigi
1204261909Sluigi	for (i = 0; i < n; i++) {
1205261909Sluigi		slot[i].buf_idx = index;
1206261909Sluigi		slot[i].len = p->_objsize;
1207261909Sluigi		slot[i].flags = 0;
1208261909Sluigi	}
1209261909Sluigi}
1210261909Sluigi
1211261909Sluigi
1212234228Sluigistatic void
1213259412Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1214234228Sluigi{
1215257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1216241719Sluigi
1217234228Sluigi	if (i < 2 || i >= p->objtotal) {
1218234228Sluigi		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1219234228Sluigi		return;
1220234228Sluigi	}
1221241719Sluigi	netmap_obj_free(p, i);
1222234228Sluigi}
1223234228Sluigi
1224261909Sluigi
1225234228Sluigistatic void
1226261909Sluiginetmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1227261909Sluigi{
1228261909Sluigi	u_int i;
1229261909Sluigi
1230261909Sluigi	for (i = 0; i < n; i++) {
1231341477Svmaffione		if (slot[i].buf_idx > 1)
1232261909Sluigi			netmap_free_buf(nmd, slot[i].buf_idx);
1233261909Sluigi	}
1234341477Svmaffione	ND("%s: released some buffers, available: %u",
1235341477Svmaffione			p->name, p->objfree);
1236261909Sluigi}
1237261909Sluigi
1238261909Sluigistatic void
1239241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p)
1240234228Sluigi{
1241257529Sluigi
1242234228Sluigi	if (p == NULL)
1243234228Sluigi		return;
1244234228Sluigi	if (p->bitmap)
1245341477Svmaffione		nm_os_free(p->bitmap);
1246241719Sluigi	p->bitmap = NULL;
1247341477Svmaffione	if (p->invalid_bitmap)
1248341477Svmaffione		nm_os_free(p->invalid_bitmap);
1249341477Svmaffione	p->invalid_bitmap = NULL;
1250341477Svmaffione	if (!p->alloc_done) {
1251341477Svmaffione		/* allocation was done by somebody else.
1252341477Svmaffione		 * Let them clean up after themselves.
1253341477Svmaffione		 */
1254341477Svmaffione		return;
1255341477Svmaffione	}
1256234228Sluigi	if (p->lut) {
1257257529Sluigi		u_int i;
1258257529Sluigi
1259282978Spkelsey		/*
1260282978Spkelsey		 * Free each cluster allocated in
1261282978Spkelsey		 * netmap_finalize_obj_allocator().  The cluster start
1262282978Spkelsey		 * addresses are stored at multiples of p->_clusterentries
1263282978Spkelsey		 * in the lut.
1264282978Spkelsey		 */
1265257529Sluigi		for (i = 0; i < p->objtotal; i += p->_clustentries) {
1266341477Svmaffione			contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1267234228Sluigi		}
1268341477Svmaffione		nm_free_lut(p->lut, p->objtotal);
1269234228Sluigi	}
1270241719Sluigi	p->lut = NULL;
1271257529Sluigi	p->objtotal = 0;
1272257529Sluigi	p->memtotal = 0;
1273257529Sluigi	p->numclusters = 0;
1274257529Sluigi	p->objfree = 0;
1275341477Svmaffione	p->alloc_done = 0;
1276234228Sluigi}
1277234228Sluigi
1278234228Sluigi/*
1279241719Sluigi * Free all resources related to an allocator.
1280241719Sluigi */
1281241719Sluigistatic void
1282241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1283241719Sluigi{
1284241719Sluigi	if (p == NULL)
1285241719Sluigi		return;
1286241719Sluigi	netmap_reset_obj_allocator(p);
1287241719Sluigi}
1288241719Sluigi
1289241719Sluigi/*
1290234228Sluigi * We receive a request for objtotal objects, of size objsize each.
1291234228Sluigi * Internally we may round up both numbers, as we allocate objects
1292234228Sluigi * in small clusters multiple of the page size.
1293257529Sluigi * We need to keep track of objtotal and clustentries,
1294234228Sluigi * as they are needed when freeing memory.
1295234228Sluigi *
1296234228Sluigi * XXX note -- userspace needs the buffers to be contiguous,
1297234228Sluigi *	so we cannot afford gaps at the end of a cluster.
1298234228Sluigi */
1299241719Sluigi
1300241719Sluigi
1301241719Sluigi/* call with NMA_LOCK held */
1302241719Sluigistatic int
1303241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1304234228Sluigi{
1305257529Sluigi	int i;
1306234228Sluigi	u_int clustsize;	/* the cluster size, multiple of page size */
1307234228Sluigi	u_int clustentries;	/* how many objects per entry */
1308234228Sluigi
1309257529Sluigi	/* we store the current request, so we can
1310257529Sluigi	 * detect configuration changes later */
1311257529Sluigi	p->r_objtotal = objtotal;
1312257529Sluigi	p->r_objsize = objsize;
1313257529Sluigi
1314270063Sluigi#define MAX_CLUSTSIZE	(1<<22)		// 4 MB
1315260368Sluigi#define LINE_ROUND	NM_CACHE_ALIGN	// 64
1316234228Sluigi	if (objsize >= MAX_CLUSTSIZE) {
1317234228Sluigi		/* we could do it but there is no point */
1318234228Sluigi		D("unsupported allocation for %d bytes", objsize);
1319257529Sluigi		return EINVAL;
1320234228Sluigi	}
1321234228Sluigi	/* make sure objsize is a multiple of LINE_ROUND */
1322234228Sluigi	i = (objsize & (LINE_ROUND - 1));
1323234228Sluigi	if (i) {
1324234228Sluigi		D("XXX aligning object by %d bytes", LINE_ROUND - i);
1325234228Sluigi		objsize += LINE_ROUND - i;
1326234228Sluigi	}
1327241719Sluigi	if (objsize < p->objminsize || objsize > p->objmaxsize) {
1328250184Sluigi		D("requested objsize %d out of range [%d, %d]",
1329241719Sluigi			objsize, p->objminsize, p->objmaxsize);
1330257529Sluigi		return EINVAL;
1331241719Sluigi	}
1332241719Sluigi	if (objtotal < p->nummin || objtotal > p->nummax) {
1333250184Sluigi		D("requested objtotal %d out of range [%d, %d]",
1334241719Sluigi			objtotal, p->nummin, p->nummax);
1335257529Sluigi		return EINVAL;
1336241719Sluigi	}
1337234228Sluigi	/*
1338234228Sluigi	 * Compute number of objects using a brute-force approach:
1339234228Sluigi	 * given a max cluster size,
1340234228Sluigi	 * we try to fill it with objects keeping track of the
1341234228Sluigi	 * wasted space to the next page boundary.
1342234228Sluigi	 */
1343234228Sluigi	for (clustentries = 0, i = 1;; i++) {
1344234228Sluigi		u_int delta, used = i * objsize;
1345234228Sluigi		if (used > MAX_CLUSTSIZE)
1346234228Sluigi			break;
1347234228Sluigi		delta = used % PAGE_SIZE;
1348234228Sluigi		if (delta == 0) { // exact solution
1349234228Sluigi			clustentries = i;
1350234228Sluigi			break;
1351234228Sluigi		}
1352234228Sluigi	}
1353270063Sluigi	/* exact solution not found */
1354270063Sluigi	if (clustentries == 0) {
1355270063Sluigi		D("unsupported allocation for %d bytes", objsize);
1356270063Sluigi		return EINVAL;
1357270063Sluigi	}
1358270063Sluigi	/* compute clustsize */
1359234228Sluigi	clustsize = clustentries * objsize;
1360245835Sluigi	if (netmap_verbose)
1361245835Sluigi		D("objsize %d clustsize %d objects %d",
1362245835Sluigi			objsize, clustsize, clustentries);
1363234228Sluigi
1364234228Sluigi	/*
1365234228Sluigi	 * The number of clusters is n = ceil(objtotal/clustentries)
1366234228Sluigi	 * objtotal' = n * clustentries
1367234228Sluigi	 */
1368257529Sluigi	p->_clustentries = clustentries;
1369234228Sluigi	p->_clustsize = clustsize;
1370257529Sluigi	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1371257529Sluigi
1372257529Sluigi	/* actual values (may be larger than requested) */
1373234228Sluigi	p->_objsize = objsize;
1374257529Sluigi	p->_objtotal = p->_numclusters * clustentries;
1375234228Sluigi
1376241719Sluigi	return 0;
1377241719Sluigi}
1378241719Sluigi
1379241719Sluigi/* call with NMA_LOCK held */
1380241719Sluigistatic int
1381241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1382241719Sluigi{
1383257529Sluigi	int i; /* must be signed */
1384257529Sluigi	size_t n;
1385241719Sluigi
1386341477Svmaffione	if (p->lut) {
1387341477Svmaffione		/* if the lut is already there we assume that also all the
1388341477Svmaffione		 * clusters have already been allocated, possibily by somebody
1389341477Svmaffione		 * else (e.g., extmem). In the latter case, the alloc_done flag
1390341477Svmaffione		 * will remain at zero, so that we will not attempt to
1391341477Svmaffione		 * deallocate the clusters by ourselves in
1392341477Svmaffione		 * netmap_reset_obj_allocator.
1393341477Svmaffione		 */
1394341477Svmaffione		return 0;
1395341477Svmaffione	}
1396341477Svmaffione
1397257529Sluigi	/* optimistically assume we have enough memory */
1398257529Sluigi	p->numclusters = p->_numclusters;
1399257529Sluigi	p->objtotal = p->_objtotal;
1400341477Svmaffione	p->alloc_done = 1;
1401257529Sluigi
1402341477Svmaffione	p->lut = nm_alloc_lut(p->objtotal);
1403234228Sluigi	if (p->lut == NULL) {
1404341477Svmaffione		D("Unable to create lookup table for '%s'", p->name);
1405234228Sluigi		goto clean;
1406234228Sluigi	}
1407234228Sluigi
1408234228Sluigi	/*
1409341477Svmaffione	 * Allocate clusters, init pointers
1410234228Sluigi	 */
1411257529Sluigi
1412257529Sluigi	n = p->_clustsize;
1413257529Sluigi	for (i = 0; i < (int)p->objtotal;) {
1414257529Sluigi		int lim = i + p->_clustentries;
1415234228Sluigi		char *clust;
1416234228Sluigi
1417341477Svmaffione		/*
1418341477Svmaffione		 * XXX Note, we only need contigmalloc() for buffers attached
1419341477Svmaffione		 * to native interfaces. In all other cases (nifp, netmap rings
1420341477Svmaffione		 * and even buffers for VALE ports or emulated interfaces) we
1421341477Svmaffione		 * can live with standard malloc, because the hardware will not
1422341477Svmaffione		 * access the pages directly.
1423341477Svmaffione		 */
1424257529Sluigi		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1425257529Sluigi		    (size_t)0, -1UL, PAGE_SIZE, 0);
1426234228Sluigi		if (clust == NULL) {
1427234228Sluigi			/*
1428234228Sluigi			 * If we get here, there is a severe memory shortage,
1429234228Sluigi			 * so halve the allocated memory to reclaim some.
1430234228Sluigi			 */
1431234228Sluigi			D("Unable to create cluster at %d for '%s' allocator",
1432241719Sluigi			    i, p->name);
1433257529Sluigi			if (i < 2) /* nothing to halve */
1434257529Sluigi				goto out;
1435234228Sluigi			lim = i / 2;
1436241719Sluigi			for (i--; i >= lim; i--) {
1437257529Sluigi				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1438234228Sluigi					contigfree(p->lut[i].vaddr,
1439257529Sluigi						n, M_NETMAP);
1440282978Spkelsey				p->lut[i].vaddr = NULL;
1441234228Sluigi			}
1442257529Sluigi		out:
1443234228Sluigi			p->objtotal = i;
1444257529Sluigi			/* we may have stopped in the middle of a cluster */
1445257529Sluigi			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1446234228Sluigi			break;
1447234228Sluigi		}
1448282978Spkelsey		/*
1449341477Svmaffione		 * Set lut state for all buffers in the current cluster.
1450282978Spkelsey		 *
1451282978Spkelsey		 * [i, lim) is the set of buffer indexes that cover the
1452282978Spkelsey		 * current cluster.
1453282978Spkelsey		 *
1454282978Spkelsey		 * 'clust' is really the address of the current buffer in
1455282978Spkelsey		 * the current cluster as we index through it with a stride
1456282978Spkelsey		 * of p->_objsize.
1457282978Spkelsey		 */
1458241719Sluigi		for (; i < lim; i++, clust += p->_objsize) {
1459234228Sluigi			p->lut[i].vaddr = clust;
1460341477Svmaffione#if !defined(linux) && !defined(_WIN32)
1461234228Sluigi			p->lut[i].paddr = vtophys(clust);
1462341477Svmaffione#endif
1463234228Sluigi		}
1464234228Sluigi	}
1465257529Sluigi	p->memtotal = p->numclusters * p->_clustsize;
1466245835Sluigi	if (netmap_verbose)
1467245835Sluigi		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
1468257529Sluigi		    p->numclusters, p->_clustsize >> 10,
1469257529Sluigi		    p->memtotal >> 10, p->name);
1470234228Sluigi
1471241719Sluigi	return 0;
1472234228Sluigi
1473234228Sluigiclean:
1474241719Sluigi	netmap_reset_obj_allocator(p);
1475241719Sluigi	return ENOMEM;
1476234228Sluigi}
1477234228Sluigi
1478241719Sluigi/* call with lock held */
1479234228Sluigistatic int
1480341477Svmaffionenetmap_mem_params_changed(struct netmap_obj_params* p)
1481234228Sluigi{
1482341477Svmaffione	int i, rv = 0;
1483234228Sluigi
1484241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1485341477Svmaffione		if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1486341477Svmaffione			p[i].last_size = p[i].size;
1487341477Svmaffione			p[i].last_num = p[i].num;
1488341477Svmaffione			rv = 1;
1489341477Svmaffione		}
1490241719Sluigi	}
1491341477Svmaffione	return rv;
1492241719Sluigi}
1493234228Sluigi
1494257529Sluigistatic void
1495257529Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd)
1496257529Sluigi{
1497257529Sluigi	int i;
1498261909Sluigi
1499261909Sluigi	if (netmap_verbose)
1500261909Sluigi		D("resetting %p", nmd);
1501257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1502257529Sluigi		netmap_reset_obj_allocator(&nmd->pools[i]);
1503257529Sluigi	}
1504257529Sluigi	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
1505257529Sluigi}
1506234228Sluigi
1507257529Sluigistatic int
1508270063Sluiginetmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1509270063Sluigi{
1510341477Svmaffione	int i, lim = p->objtotal;
1511341477Svmaffione	struct netmap_lut *lut = &na->na_lut;
1512270063Sluigi
1513341477Svmaffione	if (na == NULL || na->pdev == NULL)
1514270063Sluigi		return 0;
1515270063Sluigi
1516341477Svmaffione#if defined(__FreeBSD__)
1517341477Svmaffione	/* On FreeBSD mapping and unmapping is performed by the txsync
1518341477Svmaffione	 * and rxsync routine, packet by packet. */
1519270063Sluigi	(void)i;
1520270063Sluigi	(void)lim;
1521341477Svmaffione	(void)lut;
1522341477Svmaffione#elif defined(_WIN32)
1523341477Svmaffione	(void)i;
1524341477Svmaffione	(void)lim;
1525341477Svmaffione	(void)lut;
1526341477Svmaffione	D("unsupported on Windows");
1527270063Sluigi#else /* linux */
1528341477Svmaffione	ND("unmapping and freeing plut for %s", na->name);
1529341477Svmaffione	if (lut->plut == NULL)
1530341477Svmaffione		return 0;
1531341477Svmaffione	for (i = 0; i < lim; i += p->_clustentries) {
1532341477Svmaffione		if (lut->plut[i].paddr)
1533341477Svmaffione			netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1534270063Sluigi	}
1535341477Svmaffione	nm_free_plut(lut->plut);
1536341477Svmaffione	lut->plut = NULL;
1537270063Sluigi#endif /* linux */
1538270063Sluigi
1539270063Sluigi	return 0;
1540270063Sluigi}
1541270063Sluigi
1542270063Sluigistatic int
1543270063Sluiginetmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1544270063Sluigi{
1545341477Svmaffione	int error = 0;
1546341477Svmaffione	int i, lim = p->objtotal;
1547341477Svmaffione	struct netmap_lut *lut = &na->na_lut;
1548270063Sluigi
1549270063Sluigi	if (na->pdev == NULL)
1550270063Sluigi		return 0;
1551270063Sluigi
1552341477Svmaffione#if defined(__FreeBSD__)
1553341477Svmaffione	/* On FreeBSD mapping and unmapping is performed by the txsync
1554341477Svmaffione	 * and rxsync routine, packet by packet. */
1555341477Svmaffione	(void)i;
1556341477Svmaffione	(void)lim;
1557341477Svmaffione	(void)lut;
1558341477Svmaffione#elif defined(_WIN32)
1559341477Svmaffione	(void)i;
1560341477Svmaffione	(void)lim;
1561341477Svmaffione	(void)lut;
1562341477Svmaffione	D("unsupported on Windows");
1563341477Svmaffione#else /* linux */
1564341477Svmaffione
1565341477Svmaffione	if (lut->plut != NULL) {
1566341477Svmaffione		ND("plut already allocated for %s", na->name);
1567341477Svmaffione		return 0;
1568270063Sluigi	}
1569341477Svmaffione
1570341477Svmaffione	ND("allocating physical lut for %s", na->name);
1571341477Svmaffione	lut->plut = nm_alloc_plut(lim);
1572341477Svmaffione	if (lut->plut == NULL) {
1573341477Svmaffione		D("Failed to allocate physical lut for %s", na->name);
1574341477Svmaffione		return ENOMEM;
1575341477Svmaffione	}
1576341477Svmaffione
1577341477Svmaffione	for (i = 0; i < lim; i += p->_clustentries) {
1578341477Svmaffione		lut->plut[i].paddr = 0;
1579341477Svmaffione	}
1580341477Svmaffione
1581341477Svmaffione	for (i = 0; i < lim; i += p->_clustentries) {
1582341477Svmaffione		int j;
1583341477Svmaffione
1584341477Svmaffione		if (p->lut[i].vaddr == NULL)
1585341477Svmaffione			continue;
1586341477Svmaffione
1587341477Svmaffione		error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1588341477Svmaffione				p->lut[i].vaddr, p->_clustsize);
1589341477Svmaffione		if (error) {
1590341477Svmaffione			D("Failed to map cluster #%d from the %s pool", i, p->name);
1591341477Svmaffione			break;
1592341477Svmaffione		}
1593341477Svmaffione
1594341477Svmaffione		for (j = 1; j < p->_clustentries; j++) {
1595341477Svmaffione			lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1596341477Svmaffione		}
1597341477Svmaffione	}
1598341477Svmaffione
1599341477Svmaffione	if (error)
1600341477Svmaffione		netmap_mem_unmap(p, na);
1601341477Svmaffione
1602270063Sluigi#endif /* linux */
1603270063Sluigi
1604341477Svmaffione	return error;
1605270063Sluigi}
1606270063Sluigi
1607270063Sluigistatic int
1608257529Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd)
1609257529Sluigi{
1610257529Sluigi	int i;
1611257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED)
1612257529Sluigi		return 0;
1613257529Sluigi	nmd->lasterr = 0;
1614257529Sluigi	nmd->nm_totalsize = 0;
1615257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1616257529Sluigi		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1617257529Sluigi		if (nmd->lasterr)
1618257529Sluigi			goto error;
1619257529Sluigi		nmd->nm_totalsize += nmd->pools[i].memtotal;
1620257529Sluigi	}
1621341477Svmaffione	nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1622341477Svmaffione	if (nmd->lasterr)
1623341477Svmaffione		goto error;
1624341477Svmaffione
1625257529Sluigi	nmd->flags |= NETMAP_MEM_FINALIZED;
1626257529Sluigi
1627261909Sluigi	if (netmap_verbose)
1628261909Sluigi		D("interfaces %d KB, rings %d KB, buffers %d MB",
1629261909Sluigi		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1630261909Sluigi		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1631261909Sluigi		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1632257529Sluigi
1633261909Sluigi	if (netmap_verbose)
1634261909Sluigi		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1635257529Sluigi
1636257529Sluigi
1637257529Sluigi	return 0;
1638257529Sluigierror:
1639257529Sluigi	netmap_mem_reset_all(nmd);
1640257529Sluigi	return nmd->lasterr;
1641257529Sluigi}
1642257529Sluigi
1643261909Sluigi/*
1644261909Sluigi * allocator for private memory
1645261909Sluigi */
1646341477Svmaffionestatic void *
1647341477Svmaffione_netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
1648341477Svmaffione		struct netmap_mem_ops *ops, int *perr)
1649257529Sluigi{
1650257529Sluigi	struct netmap_mem_d *d = NULL;
1651341477Svmaffione	int i, err = 0;
1652257529Sluigi
1653341477Svmaffione	d = nm_os_malloc(size);
1654261909Sluigi	if (d == NULL) {
1655261909Sluigi		err = ENOMEM;
1656261909Sluigi		goto error;
1657261909Sluigi	}
1658257529Sluigi
1659257529Sluigi	*d = nm_blueprint;
1660341477Svmaffione	d->ops = ops;
1661257529Sluigi
1662261909Sluigi	err = nm_mem_assign_id(d);
1663261909Sluigi	if (err)
1664341477Svmaffione		goto error_free;
1665341477Svmaffione	snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1666261909Sluigi
1667341477Svmaffione	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1668341477Svmaffione		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1669341477Svmaffione				nm_blueprint.pools[i].name,
1670341477Svmaffione				d->name);
1671341477Svmaffione		d->params[i].num = p[i].num;
1672341477Svmaffione		d->params[i].size = p[i].size;
1673341477Svmaffione	}
1674341477Svmaffione
1675341477Svmaffione	NMA_LOCK_INIT(d);
1676341477Svmaffione
1677341477Svmaffione	err = netmap_mem_config(d);
1678341477Svmaffione	if (err)
1679341477Svmaffione		goto error_rel_id;
1680341477Svmaffione
1681341477Svmaffione	d->flags &= ~NETMAP_MEM_FINALIZED;
1682341477Svmaffione
1683341477Svmaffione	return d;
1684341477Svmaffione
1685341477Svmaffioneerror_rel_id:
1686341477Svmaffione	NMA_LOCK_DESTROY(d);
1687341477Svmaffione	nm_mem_release_id(d);
1688341477Svmaffioneerror_free:
1689341477Svmaffione	nm_os_free(d);
1690341477Svmaffioneerror:
1691341477Svmaffione	if (perr)
1692341477Svmaffione		*perr = err;
1693341477Svmaffione	return NULL;
1694341477Svmaffione}
1695341477Svmaffione
1696341477Svmaffionestruct netmap_mem_d *
1697341477Svmaffionenetmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1698341477Svmaffione		u_int extra_bufs, u_int npipes, int *perr)
1699341477Svmaffione{
1700341477Svmaffione	struct netmap_mem_d *d = NULL;
1701341477Svmaffione	struct netmap_obj_params p[NETMAP_POOLS_NR];
1702341477Svmaffione	int i;
1703341477Svmaffione	u_int v, maxd;
1704261909Sluigi	/* account for the fake host rings */
1705257529Sluigi	txr++;
1706257529Sluigi	rxr++;
1707261909Sluigi
1708261909Sluigi	/* copy the min values */
1709261909Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1710261909Sluigi		p[i] = netmap_min_priv_params[i];
1711261909Sluigi	}
1712261909Sluigi
1713261909Sluigi	/* possibly increase them to fit user request */
1714261909Sluigi	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1715261909Sluigi	if (p[NETMAP_IF_POOL].size < v)
1716261909Sluigi		p[NETMAP_IF_POOL].size = v;
1717261909Sluigi	v = 2 + 4 * npipes;
1718261909Sluigi	if (p[NETMAP_IF_POOL].num < v)
1719261909Sluigi		p[NETMAP_IF_POOL].num = v;
1720257529Sluigi	maxd = (txd > rxd) ? txd : rxd;
1721261909Sluigi	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1722261909Sluigi	if (p[NETMAP_RING_POOL].size < v)
1723261909Sluigi		p[NETMAP_RING_POOL].size = v;
1724261909Sluigi	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1725341477Svmaffione	 * and two rx rings (again, 1 normal and 1 fake host)
1726341477Svmaffione	 */
1727261909Sluigi	v = txr + rxr + 8 * npipes;
1728261909Sluigi	if (p[NETMAP_RING_POOL].num < v)
1729261909Sluigi		p[NETMAP_RING_POOL].num = v;
1730261909Sluigi	/* for each pipe we only need the buffers for the 4 "real" rings.
1731341477Svmaffione	 * On the other end, the pipe ring dimension may be different from
1732341477Svmaffione	 * the parent port ring dimension. As a compromise, we allocate twice the
1733341477Svmaffione	 * space actually needed if the pipe rings were the same size as the parent rings
1734341477Svmaffione	 */
1735261909Sluigi	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1736261909Sluigi		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1737261909Sluigi	if (p[NETMAP_BUF_POOL].num < v)
1738261909Sluigi		p[NETMAP_BUF_POOL].num = v;
1739257529Sluigi
1740261909Sluigi	if (netmap_verbose)
1741261909Sluigi		D("req if %d*%d ring %d*%d buf %d*%d",
1742257529Sluigi			p[NETMAP_IF_POOL].num,
1743257529Sluigi			p[NETMAP_IF_POOL].size,
1744257529Sluigi			p[NETMAP_RING_POOL].num,
1745257529Sluigi			p[NETMAP_RING_POOL].size,
1746257529Sluigi			p[NETMAP_BUF_POOL].num,
1747257529Sluigi			p[NETMAP_BUF_POOL].size);
1748257529Sluigi
1749341477Svmaffione	d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr);
1750257529Sluigi
1751257529Sluigi	return d;
1752257529Sluigi}
1753257529Sluigi
1754257529Sluigi
1755241719Sluigi/* call with lock held */
1756241719Sluigistatic int
1757341477Svmaffionenetmap_mem2_config(struct netmap_mem_d *nmd)
1758241719Sluigi{
1759241719Sluigi	int i;
1760234228Sluigi
1761341477Svmaffione	if (!netmap_mem_params_changed(nmd->params))
1762241719Sluigi		goto out;
1763234228Sluigi
1764285349Sluigi	ND("reconfiguring");
1765241719Sluigi
1766257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1767241719Sluigi		/* reset previous allocation */
1768241719Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1769257529Sluigi			netmap_reset_obj_allocator(&nmd->pools[i]);
1770250184Sluigi		}
1771257529Sluigi		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1772259412Sluigi	}
1773241719Sluigi
1774241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1775257529Sluigi		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1776341477Svmaffione				nmd->params[i].num, nmd->params[i].size);
1777257529Sluigi		if (nmd->lasterr)
1778241719Sluigi			goto out;
1779241719Sluigi	}
1780241719Sluigi
1781241719Sluigiout:
1782241719Sluigi
1783257529Sluigi	return nmd->lasterr;
1784241719Sluigi}
1785241719Sluigi
1786241719Sluigistatic int
1787341477Svmaffionenetmap_mem2_finalize(struct netmap_mem_d *nmd)
1788241719Sluigi{
1789341477Svmaffione	if (nmd->flags & NETMAP_MEM_FINALIZED)
1790241719Sluigi		goto out;
1791241719Sluigi
1792257529Sluigi	if (netmap_mem_finalize_all(nmd))
1793257529Sluigi		goto out;
1794241719Sluigi
1795257529Sluigi	nmd->lasterr = 0;
1796241719Sluigi
1797241719Sluigiout:
1798341477Svmaffione	return nmd->lasterr;
1799234228Sluigi}
1800234228Sluigi
1801285349Sluigistatic void
1802341477Svmaffionenetmap_mem2_delete(struct netmap_mem_d *nmd)
1803285349Sluigi{
1804285349Sluigi	int i;
1805285349Sluigi
1806285349Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1807341477Svmaffione	    netmap_destroy_obj_allocator(&nmd->pools[i]);
1808285349Sluigi	}
1809285349Sluigi
1810341477Svmaffione	NMA_LOCK_DESTROY(nmd);
1811341477Svmaffione	if (nmd != &nm_mem)
1812341477Svmaffione		nm_os_free(nmd);
1813285349Sluigi}
1814285349Sluigi
1815341477Svmaffione#ifdef WITH_EXTMEM
1816341477Svmaffione/* doubly linekd list of all existing external allocators */
1817341477Svmaffionestatic struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1818341477SvmaffioneNM_MTX_T nm_mem_ext_list_lock;
1819341477Svmaffione#endif /* WITH_EXTMEM */
1820341477Svmaffione
1821257529Sluigiint
1822257529Sluiginetmap_mem_init(void)
1823241719Sluigi{
1824341477Svmaffione	NM_MTX_INIT(nm_mem_list_lock);
1825257529Sluigi	NMA_LOCK_INIT(&nm_mem);
1826285349Sluigi	netmap_mem_get(&nm_mem);
1827341477Svmaffione#ifdef WITH_EXTMEM
1828341477Svmaffione	NM_MTX_INIT(nm_mem_ext_list_lock);
1829341477Svmaffione#endif /* WITH_EXTMEM */
1830241719Sluigi	return (0);
1831241719Sluigi}
1832234228Sluigi
1833257529Sluigivoid
1834257529Sluiginetmap_mem_fini(void)
1835234228Sluigi{
1836285349Sluigi	netmap_mem_put(&nm_mem);
1837234228Sluigi}
1838234228Sluigi
1839241719Sluigistatic void
1840241719Sluiginetmap_free_rings(struct netmap_adapter *na)
1841241719Sluigi{
1842285349Sluigi	enum txrx t;
1843285349Sluigi
1844285349Sluigi	for_rx_tx(t) {
1845285349Sluigi		u_int i;
1846341477Svmaffione		for (i = 0; i < netmap_all_rings(na, t); i++) {
1847341477Svmaffione			struct netmap_kring *kring = NMR(na, t)[i];
1848285349Sluigi			struct netmap_ring *ring = kring->ring;
1849285349Sluigi
1850341477Svmaffione			if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) {
1851341477Svmaffione				if (netmap_verbose)
1852341477Svmaffione					D("NOT deleting ring %s (ring %p, users %d neekring %d)",
1853341477Svmaffione						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1854285349Sluigi				continue;
1855341477Svmaffione			}
1856341477Svmaffione			if (netmap_verbose)
1857341477Svmaffione				D("deleting ring %s", kring->name);
1858341477Svmaffione			if (!(kring->nr_kflags & NKR_FAKERING)) {
1859341477Svmaffione				ND("freeing bufs for %s", kring->name);
1860341477Svmaffione				netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1861341477Svmaffione			} else {
1862341477Svmaffione				ND("NOT freeing bufs for %s", kring->name);
1863341477Svmaffione			}
1864285349Sluigi			netmap_ring_free(na->nm_mem, ring);
1865285349Sluigi			kring->ring = NULL;
1866285349Sluigi		}
1867241719Sluigi	}
1868241719Sluigi}
1869234228Sluigi
1870259412Sluigi/* call with NMA_LOCK held *
1871257529Sluigi *
1872259412Sluigi * Allocate netmap rings and buffers for this card
1873259412Sluigi * The rings are contiguous, but have variable size.
1874261909Sluigi * The kring array must follow the layout described
1875261909Sluigi * in netmap_krings_create().
1876245835Sluigi */
1877285349Sluigistatic int
1878285349Sluiginetmap_mem2_rings_create(struct netmap_adapter *na)
1879234228Sluigi{
1880285349Sluigi	enum txrx t;
1881234228Sluigi
1882285349Sluigi	for_rx_tx(t) {
1883285349Sluigi		u_int i;
1884234228Sluigi
1885341477Svmaffione		for (i = 0; i < netmap_all_rings(na, t); i++) {
1886341477Svmaffione			struct netmap_kring *kring = NMR(na, t)[i];
1887285349Sluigi			struct netmap_ring *ring = kring->ring;
1888285349Sluigi			u_int len, ndesc;
1889285349Sluigi
1890341477Svmaffione			if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) {
1891341477Svmaffione				/* uneeded, or already created by somebody else */
1892341477Svmaffione				if (netmap_verbose)
1893341477Svmaffione					D("NOT creating ring %s (ring %p, users %d neekring %d)",
1894341477Svmaffione						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1895341477Svmaffione				continue;
1896285349Sluigi			}
1897341477Svmaffione			if (netmap_verbose)
1898341477Svmaffione				D("creating %s", kring->name);
1899285349Sluigi			ndesc = kring->nkr_num_slots;
1900285349Sluigi			len = sizeof(struct netmap_ring) +
1901285349Sluigi				  ndesc * sizeof(struct netmap_slot);
1902285349Sluigi			ring = netmap_ring_malloc(na->nm_mem, len);
1903285349Sluigi			if (ring == NULL) {
1904285349Sluigi				D("Cannot allocate %s_ring", nm_txrx2str(t));
1905261909Sluigi				goto cleanup;
1906261909Sluigi			}
1907285349Sluigi			ND("txring at %p", ring);
1908285349Sluigi			kring->ring = ring;
1909285349Sluigi			*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1910285349Sluigi			*(int64_t *)(uintptr_t)&ring->buf_ofs =
1911285349Sluigi			    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1912285349Sluigi				na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1913285349Sluigi				netmap_ring_offset(na->nm_mem, ring);
1914234228Sluigi
1915285349Sluigi			/* copy values from kring */
1916285349Sluigi			ring->head = kring->rhead;
1917285349Sluigi			ring->cur = kring->rcur;
1918285349Sluigi			ring->tail = kring->rtail;
1919341477Svmaffione			*(uint32_t *)(uintptr_t)&ring->nr_buf_size =
1920285349Sluigi				netmap_mem_bufsize(na->nm_mem);
1921285349Sluigi			ND("%s h %d c %d t %d", kring->name,
1922285349Sluigi				ring->head, ring->cur, ring->tail);
1923341477Svmaffione			ND("initializing slots for %s_ring", nm_txrx2str(t));
1924341477Svmaffione			if (!(kring->nr_kflags & NKR_FAKERING)) {
1925285349Sluigi				/* this is a real ring */
1926341477Svmaffione				ND("allocating buffers for %s", kring->name);
1927285349Sluigi				if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1928285349Sluigi					D("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
1929285349Sluigi					goto cleanup;
1930285349Sluigi				}
1931285349Sluigi			} else {
1932285349Sluigi				/* this is a fake ring, set all indices to 0 */
1933341477Svmaffione				ND("NOT allocating buffers for %s", kring->name);
1934285349Sluigi				netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1935261909Sluigi			}
1936285349Sluigi		        /* ring info */
1937285349Sluigi		        *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
1938285349Sluigi		        *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
1939241719Sluigi		}
1940234228Sluigi	}
1941259412Sluigi
1942259412Sluigi	return 0;
1943259412Sluigi
1944259412Sluigicleanup:
1945341477Svmaffione	/* we cannot actually cleanup here, since we don't own kring->users
1946341477Svmaffione	 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
1947341477Svmaffione	 * the first or zero-out the second, then call netmap_free_rings()
1948341477Svmaffione	 * to do the cleanup
1949341477Svmaffione	 */
1950259412Sluigi
1951259412Sluigi	return ENOMEM;
1952259412Sluigi}
1953259412Sluigi
1954285349Sluigistatic void
1955285349Sluiginetmap_mem2_rings_delete(struct netmap_adapter *na)
1956259412Sluigi{
1957259412Sluigi	/* last instance, release bufs and rings */
1958259412Sluigi	netmap_free_rings(na);
1959259412Sluigi}
1960259412Sluigi
1961259412Sluigi
1962259412Sluigi/* call with NMA_LOCK held */
1963259412Sluigi/*
1964259412Sluigi * Allocate the per-fd structure netmap_if.
1965259412Sluigi *
1966259412Sluigi * We assume that the configuration stored in na
1967259412Sluigi * (number of tx/rx rings and descs) does not change while
1968259412Sluigi * the interface is in netmap mode.
1969259412Sluigi */
1970285349Sluigistatic struct netmap_if *
1971341477Svmaffionenetmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
1972259412Sluigi{
1973259412Sluigi	struct netmap_if *nifp;
1974259412Sluigi	ssize_t base; /* handy for relative offsets between rings and nifp */
1975285349Sluigi	u_int i, len, n[NR_TXRX], ntot;
1976285349Sluigi	enum txrx t;
1977259412Sluigi
1978285349Sluigi	ntot = 0;
1979285349Sluigi	for_rx_tx(t) {
1980285349Sluigi		/* account for the (eventually fake) host rings */
1981341477Svmaffione		n[t] = netmap_all_rings(na, t);
1982285349Sluigi		ntot += n[t];
1983285349Sluigi	}
1984234228Sluigi	/*
1985259412Sluigi	 * the descriptor is followed inline by an array of offsets
1986259412Sluigi	 * to the tx and rx rings in the shared memory region.
1987259412Sluigi	 */
1988259412Sluigi
1989285349Sluigi	len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
1990259412Sluigi	nifp = netmap_if_malloc(na->nm_mem, len);
1991259412Sluigi	if (nifp == NULL) {
1992259412Sluigi		NMA_UNLOCK(na->nm_mem);
1993259412Sluigi		return NULL;
1994259412Sluigi	}
1995259412Sluigi
1996259412Sluigi	/* initialize base fields -- override const */
1997259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1998259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1999270063Sluigi	strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ);
2000259412Sluigi
2001259412Sluigi	/*
2002234228Sluigi	 * fill the slots for the rx and tx rings. They contain the offset
2003234228Sluigi	 * between the ring and nifp, so the information is usable in
2004234228Sluigi	 * userspace to reach the ring from the nifp.
2005234228Sluigi	 */
2006257529Sluigi	base = netmap_if_offset(na->nm_mem, nifp);
2007285349Sluigi	for (i = 0; i < n[NR_TX]; i++) {
2008341477Svmaffione		/* XXX instead of ofs == 0 maybe use the offset of an error
2009341477Svmaffione		 * ring, like we do for buffers? */
2010341477Svmaffione		ssize_t ofs = 0;
2011341477Svmaffione
2012341477Svmaffione		if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2013341477Svmaffione				&& i < priv->np_qlast[NR_TX]) {
2014341477Svmaffione			ofs = netmap_ring_offset(na->nm_mem,
2015341477Svmaffione						 na->tx_rings[i]->ring) - base;
2016341477Svmaffione		}
2017341477Svmaffione		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2018234228Sluigi	}
2019285349Sluigi	for (i = 0; i < n[NR_RX]; i++) {
2020341477Svmaffione		/* XXX instead of ofs == 0 maybe use the offset of an error
2021341477Svmaffione		 * ring, like we do for buffers? */
2022341477Svmaffione		ssize_t ofs = 0;
2023341477Svmaffione
2024341477Svmaffione		if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2025341477Svmaffione				&& i < priv->np_qlast[NR_RX]) {
2026341477Svmaffione			ofs = netmap_ring_offset(na->nm_mem,
2027341477Svmaffione						 na->rx_rings[i]->ring) - base;
2028341477Svmaffione		}
2029341477Svmaffione		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2030234228Sluigi	}
2031257529Sluigi
2032234228Sluigi	return (nifp);
2033234228Sluigi}
2034234228Sluigi
2035285349Sluigistatic void
2036285349Sluiginetmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2037257529Sluigi{
2038257529Sluigi	if (nifp == NULL)
2039257529Sluigi		/* nothing to do */
2040257529Sluigi		return;
2041261909Sluigi	if (nifp->ni_bufs_head)
2042261909Sluigi		netmap_extra_free(na, nifp->ni_bufs_head);
2043257529Sluigi	netmap_if_free(na->nm_mem, nifp);
2044257529Sluigi}
2045257529Sluigi
2046234228Sluigistatic void
2047341477Svmaffionenetmap_mem2_deref(struct netmap_mem_d *nmd)
2048234228Sluigi{
2049257529Sluigi
2050245835Sluigi	if (netmap_verbose)
2051285349Sluigi		D("active = %d", nmd->active);
2052257529Sluigi
2053234228Sluigi}
2054257529Sluigi
2055285349Sluigistruct netmap_mem_ops netmap_mem_global_ops = {
2056285349Sluigi	.nmd_get_lut = netmap_mem2_get_lut,
2057285349Sluigi	.nmd_get_info = netmap_mem2_get_info,
2058285349Sluigi	.nmd_ofstophys = netmap_mem2_ofstophys,
2059341477Svmaffione	.nmd_config = netmap_mem2_config,
2060341477Svmaffione	.nmd_finalize = netmap_mem2_finalize,
2061341477Svmaffione	.nmd_deref = netmap_mem2_deref,
2062341477Svmaffione	.nmd_delete = netmap_mem2_delete,
2063285349Sluigi	.nmd_if_offset = netmap_mem2_if_offset,
2064285349Sluigi	.nmd_if_new = netmap_mem2_if_new,
2065285349Sluigi	.nmd_if_delete = netmap_mem2_if_delete,
2066285349Sluigi	.nmd_rings_create = netmap_mem2_rings_create,
2067285349Sluigi	.nmd_rings_delete = netmap_mem2_rings_delete
2068285349Sluigi};
2069341477Svmaffione
2070341477Svmaffioneint
2071341477Svmaffionenetmap_mem_pools_info_get(struct nmreq_pools_info *req,
2072341477Svmaffione				struct netmap_mem_d *nmd)
2073341477Svmaffione{
2074341477Svmaffione	int ret;
2075341477Svmaffione
2076341477Svmaffione	ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2077341477Svmaffione					&req->nr_mem_id);
2078341477Svmaffione	if (ret) {
2079341477Svmaffione		return ret;
2080341477Svmaffione	}
2081341477Svmaffione
2082341477Svmaffione	NMA_LOCK(nmd);
2083341477Svmaffione	req->nr_if_pool_offset = 0;
2084341477Svmaffione	req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2085341477Svmaffione	req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2086341477Svmaffione
2087341477Svmaffione	req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2088341477Svmaffione	req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2089341477Svmaffione	req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2090341477Svmaffione
2091341477Svmaffione	req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2092341477Svmaffione			     nmd->pools[NETMAP_RING_POOL].memtotal;
2093341477Svmaffione	req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2094341477Svmaffione	req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2095341477Svmaffione	NMA_UNLOCK(nmd);
2096341477Svmaffione
2097341477Svmaffione	return 0;
2098341477Svmaffione}
2099341477Svmaffione
2100341477Svmaffione#ifdef WITH_EXTMEM
2101341477Svmaffionestruct netmap_mem_ext {
2102341477Svmaffione	struct netmap_mem_d up;
2103341477Svmaffione
2104341477Svmaffione	struct nm_os_extmem *os;
2105341477Svmaffione	struct netmap_mem_ext *next, *prev;
2106341477Svmaffione};
2107341477Svmaffione
2108341477Svmaffione/* call with nm_mem_list_lock held */
2109341477Svmaffionestatic void
2110341477Svmaffionenetmap_mem_ext_register(struct netmap_mem_ext *e)
2111341477Svmaffione{
2112341477Svmaffione	NM_MTX_LOCK(nm_mem_ext_list_lock);
2113341477Svmaffione	if (netmap_mem_ext_list)
2114341477Svmaffione		netmap_mem_ext_list->prev = e;
2115341477Svmaffione	e->next = netmap_mem_ext_list;
2116341477Svmaffione	netmap_mem_ext_list = e;
2117341477Svmaffione	e->prev = NULL;
2118341477Svmaffione	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2119341477Svmaffione}
2120341477Svmaffione
2121341477Svmaffione/* call with nm_mem_list_lock held */
2122341477Svmaffionestatic void
2123341477Svmaffionenetmap_mem_ext_unregister(struct netmap_mem_ext *e)
2124341477Svmaffione{
2125341477Svmaffione	if (e->prev)
2126341477Svmaffione		e->prev->next = e->next;
2127341477Svmaffione	else
2128341477Svmaffione		netmap_mem_ext_list = e->next;
2129341477Svmaffione	if (e->next)
2130341477Svmaffione		e->next->prev = e->prev;
2131341477Svmaffione	e->prev = e->next = NULL;
2132341477Svmaffione}
2133341477Svmaffione
2134341477Svmaffionestatic struct netmap_mem_ext *
2135341477Svmaffionenetmap_mem_ext_search(struct nm_os_extmem *os)
2136341477Svmaffione{
2137341477Svmaffione	struct netmap_mem_ext *e;
2138341477Svmaffione
2139341477Svmaffione	NM_MTX_LOCK(nm_mem_ext_list_lock);
2140341477Svmaffione	for (e = netmap_mem_ext_list; e; e = e->next) {
2141341477Svmaffione		if (nm_os_extmem_isequal(e->os, os)) {
2142341477Svmaffione			netmap_mem_get(&e->up);
2143341477Svmaffione			break;
2144341477Svmaffione		}
2145341477Svmaffione	}
2146341477Svmaffione	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2147341477Svmaffione	return e;
2148341477Svmaffione}
2149341477Svmaffione
2150341477Svmaffione
2151341477Svmaffionestatic void
2152341477Svmaffionenetmap_mem_ext_delete(struct netmap_mem_d *d)
2153341477Svmaffione{
2154341477Svmaffione	int i;
2155341477Svmaffione	struct netmap_mem_ext *e =
2156341477Svmaffione		(struct netmap_mem_ext *)d;
2157341477Svmaffione
2158341477Svmaffione	netmap_mem_ext_unregister(e);
2159341477Svmaffione
2160341477Svmaffione	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2161341477Svmaffione		struct netmap_obj_pool *p = &d->pools[i];
2162341477Svmaffione
2163341477Svmaffione		if (p->lut) {
2164341477Svmaffione			nm_free_lut(p->lut, p->objtotal);
2165341477Svmaffione			p->lut = NULL;
2166341477Svmaffione		}
2167341477Svmaffione	}
2168341477Svmaffione	if (e->os)
2169341477Svmaffione		nm_os_extmem_delete(e->os);
2170341477Svmaffione	netmap_mem2_delete(d);
2171341477Svmaffione}
2172341477Svmaffione
2173341477Svmaffionestatic int
2174341477Svmaffionenetmap_mem_ext_config(struct netmap_mem_d *nmd)
2175341477Svmaffione{
2176341477Svmaffione	return 0;
2177341477Svmaffione}
2178341477Svmaffione
2179341477Svmaffionestruct netmap_mem_ops netmap_mem_ext_ops = {
2180285349Sluigi	.nmd_get_lut = netmap_mem2_get_lut,
2181285349Sluigi	.nmd_get_info = netmap_mem2_get_info,
2182285349Sluigi	.nmd_ofstophys = netmap_mem2_ofstophys,
2183341477Svmaffione	.nmd_config = netmap_mem_ext_config,
2184341477Svmaffione	.nmd_finalize = netmap_mem2_finalize,
2185341477Svmaffione	.nmd_deref = netmap_mem2_deref,
2186341477Svmaffione	.nmd_delete = netmap_mem_ext_delete,
2187285349Sluigi	.nmd_if_offset = netmap_mem2_if_offset,
2188285349Sluigi	.nmd_if_new = netmap_mem2_if_new,
2189285349Sluigi	.nmd_if_delete = netmap_mem2_if_delete,
2190285349Sluigi	.nmd_rings_create = netmap_mem2_rings_create,
2191285349Sluigi	.nmd_rings_delete = netmap_mem2_rings_delete
2192285349Sluigi};
2193341477Svmaffione
2194341477Svmaffionestruct netmap_mem_d *
2195341477Svmaffionenetmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2196341477Svmaffione{
2197341477Svmaffione	int error = 0;
2198341477Svmaffione	int i, j;
2199341477Svmaffione	struct netmap_mem_ext *nme;
2200341477Svmaffione	char *clust;
2201341477Svmaffione	size_t off;
2202341477Svmaffione	struct nm_os_extmem *os = NULL;
2203341477Svmaffione	int nr_pages;
2204341477Svmaffione
2205341477Svmaffione	// XXX sanity checks
2206341477Svmaffione	if (pi->nr_if_pool_objtotal == 0)
2207341477Svmaffione		pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2208341477Svmaffione	if (pi->nr_if_pool_objsize == 0)
2209341477Svmaffione		pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2210341477Svmaffione	if (pi->nr_ring_pool_objtotal == 0)
2211341477Svmaffione		pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2212341477Svmaffione	if (pi->nr_ring_pool_objsize == 0)
2213341477Svmaffione		pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2214341477Svmaffione	if (pi->nr_buf_pool_objtotal == 0)
2215341477Svmaffione		pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2216341477Svmaffione	if (pi->nr_buf_pool_objsize == 0)
2217341477Svmaffione		pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2218341477Svmaffione	D("if %d %d ring %d %d buf %d %d",
2219341477Svmaffione			pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2220341477Svmaffione			pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2221341477Svmaffione			pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2222341477Svmaffione
2223341477Svmaffione	os = nm_os_extmem_create(usrptr, pi, &error);
2224341477Svmaffione	if (os == NULL) {
2225341477Svmaffione		D("os extmem creation failed");
2226341477Svmaffione		goto out;
2227341477Svmaffione	}
2228341477Svmaffione
2229341477Svmaffione	nme = netmap_mem_ext_search(os);
2230341477Svmaffione	if (nme) {
2231341477Svmaffione		nm_os_extmem_delete(os);
2232341477Svmaffione		return &nme->up;
2233341477Svmaffione	}
2234341477Svmaffione	D("not found, creating new");
2235341477Svmaffione
2236341477Svmaffione	nme = _netmap_mem_private_new(sizeof(*nme),
2237341477Svmaffione			(struct netmap_obj_params[]){
2238341477Svmaffione				{ pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2239341477Svmaffione				{ pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2240341477Svmaffione				{ pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2241341477Svmaffione			&netmap_mem_ext_ops,
2242341477Svmaffione			&error);
2243341477Svmaffione	if (nme == NULL)
2244341477Svmaffione		goto out_unmap;
2245341477Svmaffione
2246341477Svmaffione	nr_pages = nm_os_extmem_nr_pages(os);
2247341477Svmaffione
2248341477Svmaffione	/* from now on pages will be released by nme destructor;
2249341477Svmaffione	 * we let res = 0 to prevent release in out_unmap below
2250341477Svmaffione	 */
2251341477Svmaffione	nme->os = os;
2252341477Svmaffione	os = NULL; /* pass ownership */
2253341477Svmaffione
2254341477Svmaffione	clust = nm_os_extmem_nextpage(nme->os);
2255341477Svmaffione	off = 0;
2256341477Svmaffione	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2257341477Svmaffione		struct netmap_obj_pool *p = &nme->up.pools[i];
2258341477Svmaffione		struct netmap_obj_params *o = &nme->up.params[i];
2259341477Svmaffione
2260341477Svmaffione		p->_objsize = o->size;
2261341477Svmaffione		p->_clustsize = o->size;
2262341477Svmaffione		p->_clustentries = 1;
2263341477Svmaffione
2264341477Svmaffione		p->lut = nm_alloc_lut(o->num);
2265341477Svmaffione		if (p->lut == NULL) {
2266341477Svmaffione			error = ENOMEM;
2267341477Svmaffione			goto out_delete;
2268341477Svmaffione		}
2269341477Svmaffione
2270341477Svmaffione		p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2271341477Svmaffione		p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2272341477Svmaffione		if (p->invalid_bitmap == NULL) {
2273341477Svmaffione			error = ENOMEM;
2274341477Svmaffione			goto out_delete;
2275341477Svmaffione		}
2276341477Svmaffione
2277341477Svmaffione		if (nr_pages == 0) {
2278341477Svmaffione			p->objtotal = 0;
2279341477Svmaffione			p->memtotal = 0;
2280341477Svmaffione			p->objfree = 0;
2281341477Svmaffione			continue;
2282341477Svmaffione		}
2283341477Svmaffione
2284341477Svmaffione		for (j = 0; j < o->num && nr_pages > 0; j++) {
2285341477Svmaffione			size_t noff;
2286341477Svmaffione
2287341477Svmaffione			p->lut[j].vaddr = clust + off;
2288341477Svmaffione#if !defined(linux) && !defined(_WIN32)
2289341477Svmaffione			p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2290341477Svmaffione#endif
2291341477Svmaffione			ND("%s %d at %p", p->name, j, p->lut[j].vaddr);
2292341477Svmaffione			noff = off + p->_objsize;
2293341477Svmaffione			if (noff < PAGE_SIZE) {
2294341477Svmaffione				off = noff;
2295341477Svmaffione				continue;
2296341477Svmaffione			}
2297341477Svmaffione			ND("too big, recomputing offset...");
2298341477Svmaffione			while (noff >= PAGE_SIZE) {
2299341477Svmaffione				char *old_clust = clust;
2300341477Svmaffione				noff -= PAGE_SIZE;
2301341477Svmaffione				clust = nm_os_extmem_nextpage(nme->os);
2302341477Svmaffione				nr_pages--;
2303341477Svmaffione				ND("noff %zu page %p nr_pages %d", noff,
2304341477Svmaffione						page_to_virt(*pages), nr_pages);
2305341477Svmaffione				if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2306341477Svmaffione					(nr_pages == 0 ||
2307341477Svmaffione					 old_clust + PAGE_SIZE != clust))
2308341477Svmaffione				{
2309341477Svmaffione					/* out of space or non contiguous,
2310341477Svmaffione					 * drop this object
2311341477Svmaffione					 * */
2312341477Svmaffione					p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2313341477Svmaffione					ND("non contiguous at off %zu, drop", noff);
2314341477Svmaffione				}
2315341477Svmaffione				if (nr_pages == 0)
2316341477Svmaffione					break;
2317341477Svmaffione			}
2318341477Svmaffione			off = noff;
2319341477Svmaffione		}
2320341477Svmaffione		p->objtotal = j;
2321341477Svmaffione		p->numclusters = p->objtotal;
2322341477Svmaffione		p->memtotal = j * p->_objsize;
2323341477Svmaffione		ND("%d memtotal %u", j, p->memtotal);
2324341477Svmaffione	}
2325341477Svmaffione
2326341477Svmaffione	netmap_mem_ext_register(nme);
2327341477Svmaffione
2328341477Svmaffione	return &nme->up;
2329341477Svmaffione
2330341477Svmaffioneout_delete:
2331341477Svmaffione	netmap_mem_put(&nme->up);
2332341477Svmaffioneout_unmap:
2333341477Svmaffione	if (os)
2334341477Svmaffione		nm_os_extmem_delete(os);
2335341477Svmaffioneout:
2336341477Svmaffione	if (perror)
2337341477Svmaffione		*perror = error;
2338341477Svmaffione	return NULL;
2339341477Svmaffione
2340341477Svmaffione}
2341341477Svmaffione#endif /* WITH_EXTMEM */
2342341477Svmaffione
2343341477Svmaffione
2344341477Svmaffione#ifdef WITH_PTNETMAP_GUEST
2345341477Svmaffionestruct mem_pt_if {
2346341477Svmaffione	struct mem_pt_if *next;
2347341477Svmaffione	struct ifnet *ifp;
2348341477Svmaffione	unsigned int nifp_offset;
2349341477Svmaffione};
2350341477Svmaffione
2351341477Svmaffione/* Netmap allocator for ptnetmap guests. */
2352341477Svmaffionestruct netmap_mem_ptg {
2353341477Svmaffione	struct netmap_mem_d up;
2354341477Svmaffione
2355341477Svmaffione	vm_paddr_t nm_paddr;            /* physical address in the guest */
2356341477Svmaffione	void *nm_addr;                  /* virtual address in the guest */
2357341477Svmaffione	struct netmap_lut buf_lut;      /* lookup table for BUF pool in the guest */
2358341477Svmaffione	nm_memid_t host_mem_id;         /* allocator identifier in the host */
2359341477Svmaffione	struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2360341477Svmaffione	struct mem_pt_if *pt_ifs;	/* list of interfaces in passthrough */
2361341477Svmaffione};
2362341477Svmaffione
2363341477Svmaffione/* Link a passthrough interface to a passthrough netmap allocator. */
2364341477Svmaffionestatic int
2365341477Svmaffionenetmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp,
2366341477Svmaffione			    unsigned int nifp_offset)
2367341477Svmaffione{
2368341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2369341477Svmaffione	struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2370341477Svmaffione
2371341477Svmaffione	if (!ptif) {
2372341477Svmaffione		return ENOMEM;
2373341477Svmaffione	}
2374341477Svmaffione
2375341477Svmaffione	NMA_LOCK(nmd);
2376341477Svmaffione
2377341477Svmaffione	ptif->ifp = ifp;
2378341477Svmaffione	ptif->nifp_offset = nifp_offset;
2379341477Svmaffione
2380341477Svmaffione	if (ptnmd->pt_ifs) {
2381341477Svmaffione		ptif->next = ptnmd->pt_ifs;
2382341477Svmaffione	}
2383341477Svmaffione	ptnmd->pt_ifs = ptif;
2384341477Svmaffione
2385341477Svmaffione	NMA_UNLOCK(nmd);
2386341477Svmaffione
2387341477Svmaffione	D("added (ifp=%p,nifp_offset=%u)", ptif->ifp, ptif->nifp_offset);
2388341477Svmaffione
2389341477Svmaffione	return 0;
2390341477Svmaffione}
2391341477Svmaffione
2392341477Svmaffione/* Called with NMA_LOCK(nmd) held. */
2393341477Svmaffionestatic struct mem_pt_if *
2394341477Svmaffionenetmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp)
2395341477Svmaffione{
2396341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2397341477Svmaffione	struct mem_pt_if *curr;
2398341477Svmaffione
2399341477Svmaffione	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2400341477Svmaffione		if (curr->ifp == ifp) {
2401341477Svmaffione			return curr;
2402341477Svmaffione		}
2403341477Svmaffione	}
2404341477Svmaffione
2405341477Svmaffione	return NULL;
2406341477Svmaffione}
2407341477Svmaffione
2408341477Svmaffione/* Unlink a passthrough interface from a passthrough netmap allocator. */
2409341477Svmaffioneint
2410341477Svmaffionenetmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
2411341477Svmaffione{
2412341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2413341477Svmaffione	struct mem_pt_if *prev = NULL;
2414341477Svmaffione	struct mem_pt_if *curr;
2415341477Svmaffione	int ret = -1;
2416341477Svmaffione
2417341477Svmaffione	NMA_LOCK(nmd);
2418341477Svmaffione
2419341477Svmaffione	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2420341477Svmaffione		if (curr->ifp == ifp) {
2421341477Svmaffione			if (prev) {
2422341477Svmaffione				prev->next = curr->next;
2423341477Svmaffione			} else {
2424341477Svmaffione				ptnmd->pt_ifs = curr->next;
2425341477Svmaffione			}
2426341477Svmaffione			D("removed (ifp=%p,nifp_offset=%u)",
2427341477Svmaffione			  curr->ifp, curr->nifp_offset);
2428341477Svmaffione			nm_os_free(curr);
2429341477Svmaffione			ret = 0;
2430341477Svmaffione			break;
2431341477Svmaffione		}
2432341477Svmaffione		prev = curr;
2433341477Svmaffione	}
2434341477Svmaffione
2435341477Svmaffione	NMA_UNLOCK(nmd);
2436341477Svmaffione
2437341477Svmaffione	return ret;
2438341477Svmaffione}
2439341477Svmaffione
2440341477Svmaffionestatic int
2441341477Svmaffionenetmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2442341477Svmaffione{
2443341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2444341477Svmaffione
2445341477Svmaffione	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2446341477Svmaffione		return EINVAL;
2447341477Svmaffione	}
2448341477Svmaffione
2449341477Svmaffione	*lut = ptnmd->buf_lut;
2450341477Svmaffione	return 0;
2451341477Svmaffione}
2452341477Svmaffione
2453341477Svmaffionestatic int
2454341477Svmaffionenetmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2455341477Svmaffione			     u_int *memflags, uint16_t *id)
2456341477Svmaffione{
2457341477Svmaffione	int error = 0;
2458341477Svmaffione
2459341477Svmaffione	error = nmd->ops->nmd_config(nmd);
2460341477Svmaffione	if (error)
2461341477Svmaffione		goto out;
2462341477Svmaffione
2463341477Svmaffione	if (size)
2464341477Svmaffione		*size = nmd->nm_totalsize;
2465341477Svmaffione	if (memflags)
2466341477Svmaffione		*memflags = nmd->flags;
2467341477Svmaffione	if (id)
2468341477Svmaffione		*id = nmd->nm_id;
2469341477Svmaffione
2470341477Svmaffioneout:
2471341477Svmaffione
2472341477Svmaffione	return error;
2473341477Svmaffione}
2474341477Svmaffione
2475341477Svmaffionestatic vm_paddr_t
2476341477Svmaffionenetmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2477341477Svmaffione{
2478341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2479341477Svmaffione	vm_paddr_t paddr;
2480341477Svmaffione	/* if the offset is valid, just return csb->base_addr + off */
2481341477Svmaffione	paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2482341477Svmaffione	ND("off %lx padr %lx", off, (unsigned long)paddr);
2483341477Svmaffione	return paddr;
2484341477Svmaffione}
2485341477Svmaffione
2486341477Svmaffionestatic int
2487341477Svmaffionenetmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2488341477Svmaffione{
2489341477Svmaffione	/* nothing to do, we are configured on creation
2490341477Svmaffione	 * and configuration never changes thereafter
2491341477Svmaffione	 */
2492341477Svmaffione	return 0;
2493341477Svmaffione}
2494341477Svmaffione
2495341477Svmaffionestatic int
2496341477Svmaffionenetmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
2497341477Svmaffione{
2498341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2499341477Svmaffione	uint64_t mem_size;
2500341477Svmaffione	uint32_t bufsize;
2501341477Svmaffione	uint32_t nbuffers;
2502341477Svmaffione	uint32_t poolofs;
2503341477Svmaffione	vm_paddr_t paddr;
2504341477Svmaffione	char *vaddr;
2505341477Svmaffione	int i;
2506341477Svmaffione	int error = 0;
2507341477Svmaffione
2508341477Svmaffione	if (nmd->flags & NETMAP_MEM_FINALIZED)
2509341477Svmaffione		goto out;
2510341477Svmaffione
2511341477Svmaffione	if (ptnmd->ptn_dev == NULL) {
2512341477Svmaffione		D("ptnetmap memdev not attached");
2513341477Svmaffione		error = ENOMEM;
2514341477Svmaffione		goto out;
2515341477Svmaffione	}
2516341477Svmaffione	/* Map memory through ptnetmap-memdev BAR. */
2517341477Svmaffione	error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2518341477Svmaffione				      &ptnmd->nm_addr, &mem_size);
2519341477Svmaffione	if (error)
2520341477Svmaffione		goto out;
2521341477Svmaffione
2522341477Svmaffione	/* Initialize the lut using the information contained in the
2523341477Svmaffione	 * ptnetmap memory device. */
2524341477Svmaffione	bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2525341477Svmaffione					 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2526341477Svmaffione	nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2527341477Svmaffione					 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2528341477Svmaffione
2529341477Svmaffione	/* allocate the lut */
2530341477Svmaffione	if (ptnmd->buf_lut.lut == NULL) {
2531341477Svmaffione		D("allocating lut");
2532341477Svmaffione		ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2533341477Svmaffione		if (ptnmd->buf_lut.lut == NULL) {
2534341477Svmaffione			D("lut allocation failed");
2535341477Svmaffione			return ENOMEM;
2536341477Svmaffione		}
2537341477Svmaffione	}
2538341477Svmaffione
2539341477Svmaffione	/* we have physically contiguous memory mapped through PCI BAR */
2540341477Svmaffione	poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2541341477Svmaffione					 PTNET_MDEV_IO_BUF_POOL_OFS);
2542341477Svmaffione	vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2543341477Svmaffione	paddr = ptnmd->nm_paddr + poolofs;
2544341477Svmaffione
2545341477Svmaffione	for (i = 0; i < nbuffers; i++) {
2546341477Svmaffione		ptnmd->buf_lut.lut[i].vaddr = vaddr;
2547341477Svmaffione		vaddr += bufsize;
2548341477Svmaffione		paddr += bufsize;
2549341477Svmaffione	}
2550341477Svmaffione
2551341477Svmaffione	ptnmd->buf_lut.objtotal = nbuffers;
2552341477Svmaffione	ptnmd->buf_lut.objsize = bufsize;
2553341477Svmaffione	nmd->nm_totalsize = (unsigned int)mem_size;
2554341477Svmaffione
2555341477Svmaffione	/* Initialize these fields as are needed by
2556341477Svmaffione	 * netmap_mem_bufsize().
2557341477Svmaffione	 * XXX please improve this, why do we need this
2558341477Svmaffione	 * replication? maybe we nmd->pools[] should no be
2559341477Svmaffione	 * there for the guest allocator? */
2560341477Svmaffione	nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2561341477Svmaffione	nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2562341477Svmaffione
2563341477Svmaffione	nmd->flags |= NETMAP_MEM_FINALIZED;
2564341477Svmaffioneout:
2565341477Svmaffione	return error;
2566341477Svmaffione}
2567341477Svmaffione
2568341477Svmaffionestatic void
2569341477Svmaffionenetmap_mem_pt_guest_deref(struct netmap_mem_d *nmd)
2570341477Svmaffione{
2571341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2572341477Svmaffione
2573341477Svmaffione	if (nmd->active == 1 &&
2574341477Svmaffione		(nmd->flags & NETMAP_MEM_FINALIZED)) {
2575341477Svmaffione	    nmd->flags  &= ~NETMAP_MEM_FINALIZED;
2576341477Svmaffione	    /* unmap ptnetmap-memdev memory */
2577341477Svmaffione	    if (ptnmd->ptn_dev) {
2578341477Svmaffione		nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2579341477Svmaffione	    }
2580341477Svmaffione	    ptnmd->nm_addr = NULL;
2581341477Svmaffione	    ptnmd->nm_paddr = 0;
2582341477Svmaffione	}
2583341477Svmaffione}
2584341477Svmaffione
2585341477Svmaffionestatic ssize_t
2586341477Svmaffionenetmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2587341477Svmaffione{
2588341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2589341477Svmaffione
2590341477Svmaffione	return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2591341477Svmaffione}
2592341477Svmaffione
2593341477Svmaffionestatic void
2594341477Svmaffionenetmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2595341477Svmaffione{
2596341477Svmaffione	if (nmd == NULL)
2597341477Svmaffione		return;
2598341477Svmaffione	if (netmap_verbose)
2599341477Svmaffione		D("deleting %p", nmd);
2600341477Svmaffione	if (nmd->active > 0)
2601341477Svmaffione		D("bug: deleting mem allocator with active=%d!", nmd->active);
2602341477Svmaffione	if (netmap_verbose)
2603341477Svmaffione		D("done deleting %p", nmd);
2604341477Svmaffione	NMA_LOCK_DESTROY(nmd);
2605341477Svmaffione	nm_os_free(nmd);
2606341477Svmaffione}
2607341477Svmaffione
2608341477Svmaffionestatic struct netmap_if *
2609341477Svmaffionenetmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
2610341477Svmaffione{
2611341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2612341477Svmaffione	struct mem_pt_if *ptif;
2613341477Svmaffione	struct netmap_if *nifp = NULL;
2614341477Svmaffione
2615341477Svmaffione	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2616341477Svmaffione	if (ptif == NULL) {
2617341477Svmaffione		D("Error: interface %p is not in passthrough", na->ifp);
2618341477Svmaffione		goto out;
2619341477Svmaffione	}
2620341477Svmaffione
2621341477Svmaffione	nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2622341477Svmaffione				    ptif->nifp_offset);
2623341477Svmaffioneout:
2624341477Svmaffione	return nifp;
2625341477Svmaffione}
2626341477Svmaffione
2627341477Svmaffionestatic void
2628341477Svmaffionenetmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2629341477Svmaffione{
2630341477Svmaffione	struct mem_pt_if *ptif;
2631341477Svmaffione
2632341477Svmaffione	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2633341477Svmaffione	if (ptif == NULL) {
2634341477Svmaffione		D("Error: interface %p is not in passthrough", na->ifp);
2635341477Svmaffione	}
2636341477Svmaffione}
2637341477Svmaffione
2638341477Svmaffionestatic int
2639341477Svmaffionenetmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
2640341477Svmaffione{
2641341477Svmaffione	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2642341477Svmaffione	struct mem_pt_if *ptif;
2643341477Svmaffione	struct netmap_if *nifp;
2644341477Svmaffione	int i, error = -1;
2645341477Svmaffione
2646341477Svmaffione	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2647341477Svmaffione	if (ptif == NULL) {
2648341477Svmaffione		D("Error: interface %p is not in passthrough", na->ifp);
2649341477Svmaffione		goto out;
2650341477Svmaffione	}
2651341477Svmaffione
2652341477Svmaffione
2653341477Svmaffione	/* point each kring to the corresponding backend ring */
2654341477Svmaffione	nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2655341477Svmaffione	for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2656341477Svmaffione		struct netmap_kring *kring = na->tx_rings[i];
2657341477Svmaffione		if (kring->ring)
2658341477Svmaffione			continue;
2659341477Svmaffione		kring->ring = (struct netmap_ring *)
2660341477Svmaffione			((char *)nifp + nifp->ring_ofs[i]);
2661341477Svmaffione	}
2662341477Svmaffione	for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2663341477Svmaffione		struct netmap_kring *kring = na->rx_rings[i];
2664341477Svmaffione		if (kring->ring)
2665341477Svmaffione			continue;
2666341477Svmaffione		kring->ring = (struct netmap_ring *)
2667341477Svmaffione			((char *)nifp +
2668341477Svmaffione			 nifp->ring_ofs[i + na->num_tx_rings + 1]);
2669341477Svmaffione	}
2670341477Svmaffione
2671341477Svmaffione	error = 0;
2672341477Svmaffioneout:
2673341477Svmaffione	return error;
2674341477Svmaffione}
2675341477Svmaffione
2676341477Svmaffionestatic void
2677341477Svmaffionenetmap_mem_pt_guest_rings_delete(struct netmap_adapter *na)
2678341477Svmaffione{
2679341477Svmaffione#if 0
2680341477Svmaffione	enum txrx t;
2681341477Svmaffione
2682341477Svmaffione	for_rx_tx(t) {
2683341477Svmaffione		u_int i;
2684341477Svmaffione		for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2685341477Svmaffione			struct netmap_kring *kring = &NMR(na, t)[i];
2686341477Svmaffione
2687341477Svmaffione			kring->ring = NULL;
2688341477Svmaffione		}
2689341477Svmaffione	}
2690341477Svmaffione#endif
2691341477Svmaffione}
2692341477Svmaffione
2693341477Svmaffionestatic struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2694341477Svmaffione	.nmd_get_lut = netmap_mem_pt_guest_get_lut,
2695341477Svmaffione	.nmd_get_info = netmap_mem_pt_guest_get_info,
2696341477Svmaffione	.nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2697341477Svmaffione	.nmd_config = netmap_mem_pt_guest_config,
2698341477Svmaffione	.nmd_finalize = netmap_mem_pt_guest_finalize,
2699341477Svmaffione	.nmd_deref = netmap_mem_pt_guest_deref,
2700341477Svmaffione	.nmd_if_offset = netmap_mem_pt_guest_if_offset,
2701341477Svmaffione	.nmd_delete = netmap_mem_pt_guest_delete,
2702341477Svmaffione	.nmd_if_new = netmap_mem_pt_guest_if_new,
2703341477Svmaffione	.nmd_if_delete = netmap_mem_pt_guest_if_delete,
2704341477Svmaffione	.nmd_rings_create = netmap_mem_pt_guest_rings_create,
2705341477Svmaffione	.nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2706341477Svmaffione};
2707341477Svmaffione
2708341477Svmaffione/* Called with nm_mem_list_lock held. */
2709341477Svmaffionestatic struct netmap_mem_d *
2710341477Svmaffionenetmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2711341477Svmaffione{
2712341477Svmaffione	struct netmap_mem_d *mem = NULL;
2713341477Svmaffione	struct netmap_mem_d *scan = netmap_last_mem_d;
2714341477Svmaffione
2715341477Svmaffione	do {
2716341477Svmaffione		/* find ptnetmap allocator through host ID */
2717341477Svmaffione		if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2718341477Svmaffione			((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2719341477Svmaffione			mem = scan;
2720341477Svmaffione			mem->refcount++;
2721341477Svmaffione			NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2722341477Svmaffione			break;
2723341477Svmaffione		}
2724341477Svmaffione		scan = scan->next;
2725341477Svmaffione	} while (scan != netmap_last_mem_d);
2726341477Svmaffione
2727341477Svmaffione	return mem;
2728341477Svmaffione}
2729341477Svmaffione
2730341477Svmaffione/* Called with nm_mem_list_lock held. */
2731341477Svmaffionestatic struct netmap_mem_d *
2732341477Svmaffionenetmap_mem_pt_guest_create(nm_memid_t mem_id)
2733341477Svmaffione{
2734341477Svmaffione	struct netmap_mem_ptg *ptnmd;
2735341477Svmaffione	int err = 0;
2736341477Svmaffione
2737341477Svmaffione	ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2738341477Svmaffione	if (ptnmd == NULL) {
2739341477Svmaffione		err = ENOMEM;
2740341477Svmaffione		goto error;
2741341477Svmaffione	}
2742341477Svmaffione
2743341477Svmaffione	ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2744341477Svmaffione	ptnmd->host_mem_id = mem_id;
2745341477Svmaffione	ptnmd->pt_ifs = NULL;
2746341477Svmaffione
2747341477Svmaffione	/* Assign new id in the guest (We have the lock) */
2748341477Svmaffione	err = nm_mem_assign_id_locked(&ptnmd->up);
2749341477Svmaffione	if (err)
2750341477Svmaffione		goto error;
2751341477Svmaffione
2752341477Svmaffione	ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2753341477Svmaffione	ptnmd->up.flags |= NETMAP_MEM_IO;
2754341477Svmaffione
2755341477Svmaffione	NMA_LOCK_INIT(&ptnmd->up);
2756341477Svmaffione
2757341477Svmaffione	snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2758341477Svmaffione
2759341477Svmaffione
2760341477Svmaffione	return &ptnmd->up;
2761341477Svmaffioneerror:
2762341477Svmaffione	netmap_mem_pt_guest_delete(&ptnmd->up);
2763341477Svmaffione	return NULL;
2764341477Svmaffione}
2765341477Svmaffione
2766341477Svmaffione/*
2767341477Svmaffione * find host id in guest allocators and create guest allocator
2768341477Svmaffione * if it is not there
2769341477Svmaffione */
2770341477Svmaffionestatic struct netmap_mem_d *
2771341477Svmaffionenetmap_mem_pt_guest_get(nm_memid_t mem_id)
2772341477Svmaffione{
2773341477Svmaffione	struct netmap_mem_d *nmd;
2774341477Svmaffione
2775341477Svmaffione	NM_MTX_LOCK(nm_mem_list_lock);
2776341477Svmaffione	nmd = netmap_mem_pt_guest_find_memid(mem_id);
2777341477Svmaffione	if (nmd == NULL) {
2778341477Svmaffione		nmd = netmap_mem_pt_guest_create(mem_id);
2779341477Svmaffione	}
2780341477Svmaffione	NM_MTX_UNLOCK(nm_mem_list_lock);
2781341477Svmaffione
2782341477Svmaffione	return nmd;
2783341477Svmaffione}
2784341477Svmaffione
2785341477Svmaffione/*
2786341477Svmaffione * The guest allocator can be created by ptnetmap_memdev (during the device
2787341477Svmaffione * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2788341477Svmaffione *
2789341477Svmaffione * The order is not important (we have different order in LINUX and FreeBSD).
2790341477Svmaffione * The first one, creates the device, and the second one simply attaches it.
2791341477Svmaffione */
2792341477Svmaffione
2793341477Svmaffione/* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2794341477Svmaffione * the guest */
2795341477Svmaffionestruct netmap_mem_d *
2796341477Svmaffionenetmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2797341477Svmaffione{
2798341477Svmaffione	struct netmap_mem_d *nmd;
2799341477Svmaffione	struct netmap_mem_ptg *ptnmd;
2800341477Svmaffione
2801341477Svmaffione	nmd = netmap_mem_pt_guest_get(mem_id);
2802341477Svmaffione
2803341477Svmaffione	/* assign this device to the guest allocator */
2804341477Svmaffione	if (nmd) {
2805341477Svmaffione		ptnmd = (struct netmap_mem_ptg *)nmd;
2806341477Svmaffione		ptnmd->ptn_dev = ptn_dev;
2807341477Svmaffione	}
2808341477Svmaffione
2809341477Svmaffione	return nmd;
2810341477Svmaffione}
2811341477Svmaffione
2812341477Svmaffione/* Called when ptnet device is attaching */
2813341477Svmaffionestruct netmap_mem_d *
2814341477Svmaffionenetmap_mem_pt_guest_new(struct ifnet *ifp,
2815341477Svmaffione			unsigned int nifp_offset,
2816341477Svmaffione			unsigned int memid)
2817341477Svmaffione{
2818341477Svmaffione	struct netmap_mem_d *nmd;
2819341477Svmaffione
2820341477Svmaffione	if (ifp == NULL) {
2821341477Svmaffione		return NULL;
2822341477Svmaffione	}
2823341477Svmaffione
2824341477Svmaffione	nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2825341477Svmaffione
2826341477Svmaffione	if (nmd) {
2827341477Svmaffione		netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2828341477Svmaffione	}
2829341477Svmaffione
2830341477Svmaffione	return nmd;
2831341477Svmaffione}
2832341477Svmaffione
2833341477Svmaffione#endif /* WITH_PTNETMAP_GUEST */
2834