netmap_mem2.c revision 331722
1331722Seadler/*
2260368Sluigi * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3234228Sluigi *
4234228Sluigi * Redistribution and use in source and binary forms, with or without
5234228Sluigi * modification, are permitted provided that the following conditions
6234228Sluigi * are met:
7234228Sluigi *   1. Redistributions of source code must retain the above copyright
8234228Sluigi *      notice, this list of conditions and the following disclaimer.
9234228Sluigi *   2. Redistributions in binary form must reproduce the above copyright
10234228Sluigi *      notice, this list of conditions and the following disclaimer in the
11259412Sluigi *      documentation and/or other materials provided with the distribution.
12234228Sluigi *
13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16234228Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23234228Sluigi * SUCH DAMAGE.
24234228Sluigi */
25234228Sluigi
26257529Sluigi#ifdef linux
27257529Sluigi#include "bsd_glue.h"
28257529Sluigi#endif /* linux */
29234228Sluigi
30257529Sluigi#ifdef __APPLE__
31257529Sluigi#include "osx_glue.h"
32257529Sluigi#endif /* __APPLE__ */
33234228Sluigi
34257529Sluigi#ifdef __FreeBSD__
35257529Sluigi#include <sys/cdefs.h> /* prerequisite */
36257529Sluigi__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_mem2.c 331722 2018-03-29 02:50:57Z eadler $");
37234228Sluigi
38257529Sluigi#include <sys/types.h>
39257529Sluigi#include <sys/malloc.h>
40257529Sluigi#include <sys/proc.h>
41257529Sluigi#include <vm/vm.h>	/* vtophys */
42257529Sluigi#include <vm/pmap.h>	/* vtophys */
43257529Sluigi#include <sys/socket.h> /* sockaddrs */
44257529Sluigi#include <sys/selinfo.h>
45257529Sluigi#include <sys/sysctl.h>
46257529Sluigi#include <net/if.h>
47257529Sluigi#include <net/if_var.h>
48257529Sluigi#include <net/vnet.h>
49257529Sluigi#include <machine/bus.h>	/* bus_dmamap_* */
50257529Sluigi
51257529Sluigi#endif /* __FreeBSD__ */
52257529Sluigi
53257529Sluigi#include <net/netmap.h>
54257529Sluigi#include <dev/netmap/netmap_kern.h>
55257529Sluigi#include "netmap_mem2.h"
56257529Sluigi
57270063Sluigi#define NETMAP_BUF_MAX_NUM	20*4096*2	/* large machine */
58270063Sluigi
59270063Sluigi#define NETMAP_POOL_MAX_NAMSZ	32
60270063Sluigi
61270063Sluigi
62270063Sluigienum {
63270063Sluigi	NETMAP_IF_POOL   = 0,
64270063Sluigi	NETMAP_RING_POOL,
65270063Sluigi	NETMAP_BUF_POOL,
66270063Sluigi	NETMAP_POOLS_NR
67270063Sluigi};
68270063Sluigi
69270063Sluigi
70270063Sluigistruct netmap_obj_params {
71270063Sluigi	u_int size;
72270063Sluigi	u_int num;
73270063Sluigi};
74285349Sluigi
75270063Sluigistruct netmap_obj_pool {
76270063Sluigi	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
77270063Sluigi
78270063Sluigi	/* ---------------------------------------------------*/
79270063Sluigi	/* these are only meaningful if the pool is finalized */
80270063Sluigi	/* (see 'finalized' field in netmap_mem_d)            */
81270063Sluigi	u_int objtotal;         /* actual total number of objects. */
82270063Sluigi	u_int memtotal;		/* actual total memory space */
83270063Sluigi	u_int numclusters;	/* actual number of clusters */
84270063Sluigi
85270063Sluigi	u_int objfree;          /* number of free objects. */
86270063Sluigi
87270063Sluigi	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
88270063Sluigi	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
89270063Sluigi	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
90270063Sluigi	/* ---------------------------------------------------*/
91270063Sluigi
92270063Sluigi	/* limits */
93270063Sluigi	u_int objminsize;	/* minimum object size */
94270063Sluigi	u_int objmaxsize;	/* maximum object size */
95270063Sluigi	u_int nummin;		/* minimum number of objects */
96270063Sluigi	u_int nummax;		/* maximum number of objects */
97270063Sluigi
98270063Sluigi	/* these are changed only by config */
99270063Sluigi	u_int _objtotal;	/* total number of objects */
100270063Sluigi	u_int _objsize;		/* object size */
101270063Sluigi	u_int _clustsize;       /* cluster size */
102270063Sluigi	u_int _clustentries;    /* objects per cluster */
103270063Sluigi	u_int _numclusters;	/* number of clusters */
104270063Sluigi
105270063Sluigi	/* requested values */
106270063Sluigi	u_int r_objtotal;
107270063Sluigi	u_int r_objsize;
108270063Sluigi};
109270063Sluigi
110285349Sluigi#define NMA_LOCK_T		NM_MTX_T
111270063Sluigi
112270063Sluigi
113285349Sluigistruct netmap_mem_ops {
114285349Sluigi	void (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
115285349Sluigi	int  (*nmd_get_info)(struct netmap_mem_d *, u_int *size,
116285349Sluigi			u_int *memflags, uint16_t *id);
117285349Sluigi
118285349Sluigi	vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
119285349Sluigi	int (*nmd_config)(struct netmap_mem_d *);
120285349Sluigi	int (*nmd_finalize)(struct netmap_mem_d *);
121285349Sluigi	void (*nmd_deref)(struct netmap_mem_d *);
122285349Sluigi	ssize_t  (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
123285349Sluigi	void (*nmd_delete)(struct netmap_mem_d *);
124285349Sluigi
125285349Sluigi	struct netmap_if * (*nmd_if_new)(struct netmap_adapter *);
126285349Sluigi	void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *);
127285349Sluigi	int  (*nmd_rings_create)(struct netmap_adapter *);
128285349Sluigi	void (*nmd_rings_delete)(struct netmap_adapter *);
129285349Sluigi};
130285349Sluigi
131270063Sluigitypedef uint16_t nm_memid_t;
132270063Sluigi
133270063Sluigistruct netmap_mem_d {
134270063Sluigi	NMA_LOCK_T nm_mtx;  /* protect the allocator */
135270063Sluigi	u_int nm_totalsize; /* shorthand */
136270063Sluigi
137270063Sluigi	u_int flags;
138270063Sluigi#define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
139270063Sluigi	int lasterr;		/* last error for curr config */
140285349Sluigi	int active;		/* active users */
141285349Sluigi	int refcount;
142270063Sluigi	/* the three allocators */
143270063Sluigi	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
144270063Sluigi
145270063Sluigi	nm_memid_t nm_id;	/* allocator identifier */
146270063Sluigi	int nm_grp;	/* iommu groupd id */
147270063Sluigi
148270063Sluigi	/* list of all existing allocators, sorted by nm_id */
149270063Sluigi	struct netmap_mem_d *prev, *next;
150285349Sluigi
151285349Sluigi	struct netmap_mem_ops *ops;
152270063Sluigi};
153270063Sluigi
154285349Sluigi#define NMD_DEFCB(t0, name) \
155285349Sluigit0 \
156285349Sluiginetmap_mem_##name(struct netmap_mem_d *nmd) \
157285349Sluigi{ \
158285349Sluigi	return nmd->ops->nmd_##name(nmd); \
159285349Sluigi}
160285349Sluigi
161285349Sluigi#define NMD_DEFCB1(t0, name, t1) \
162285349Sluigit0 \
163285349Sluiginetmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \
164285349Sluigi{ \
165285349Sluigi	return nmd->ops->nmd_##name(nmd, a1); \
166285349Sluigi}
167285349Sluigi
168285349Sluigi#define NMD_DEFCB3(t0, name, t1, t2, t3) \
169285349Sluigit0 \
170285349Sluiginetmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \
171285349Sluigi{ \
172285349Sluigi	return nmd->ops->nmd_##name(nmd, a1, a2, a3); \
173285349Sluigi}
174285349Sluigi
175285349Sluigi#define NMD_DEFNACB(t0, name) \
176285349Sluigit0 \
177285349Sluiginetmap_mem_##name(struct netmap_adapter *na) \
178285349Sluigi{ \
179285349Sluigi	return na->nm_mem->ops->nmd_##name(na); \
180285349Sluigi}
181285349Sluigi
182285349Sluigi#define NMD_DEFNACB1(t0, name, t1) \
183285349Sluigit0 \
184285349Sluiginetmap_mem_##name(struct netmap_adapter *na, t1 a1) \
185285349Sluigi{ \
186285349Sluigi	return na->nm_mem->ops->nmd_##name(na, a1); \
187285349Sluigi}
188285349Sluigi
189285349SluigiNMD_DEFCB1(void, get_lut, struct netmap_lut *);
190285349SluigiNMD_DEFCB3(int, get_info, u_int *, u_int *, uint16_t *);
191285349SluigiNMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t);
192285349Sluigistatic int netmap_mem_config(struct netmap_mem_d *);
193285349SluigiNMD_DEFCB(int, config);
194285349SluigiNMD_DEFCB1(ssize_t, if_offset, const void *);
195285349SluigiNMD_DEFCB(void, delete);
196285349Sluigi
197285349SluigiNMD_DEFNACB(struct netmap_if *, if_new);
198285349SluigiNMD_DEFNACB1(void, if_delete, struct netmap_if *);
199285349SluigiNMD_DEFNACB(int, rings_create);
200285349SluigiNMD_DEFNACB(void, rings_delete);
201285349Sluigi
202285349Sluigistatic int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
203285349Sluigistatic int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
204285349Sluigistatic int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
205285349Sluigi
206285349Sluigi#define NMA_LOCK_INIT(n)	NM_MTX_INIT((n)->nm_mtx)
207285349Sluigi#define NMA_LOCK_DESTROY(n)	NM_MTX_DESTROY((n)->nm_mtx)
208285349Sluigi#define NMA_LOCK(n)		NM_MTX_LOCK((n)->nm_mtx)
209285349Sluigi#define NMA_UNLOCK(n)		NM_MTX_UNLOCK((n)->nm_mtx)
210285349Sluigi
211285349Sluigi#ifdef NM_DEBUG_MEM_PUTGET
212285349Sluigi#define NM_DBG_REFC(nmd, func, line)	\
213285349Sluigi	printf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount);
214285349Sluigi#else
215285349Sluigi#define NM_DBG_REFC(nmd, func, line)
216285349Sluigi#endif
217285349Sluigi
218285349Sluigi#ifdef NM_DEBUG_MEM_PUTGET
219285349Sluigivoid __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
220285349Sluigi#else
221285349Sluigivoid netmap_mem_get(struct netmap_mem_d *nmd)
222285349Sluigi#endif
223270063Sluigi{
224285349Sluigi	NMA_LOCK(nmd);
225285349Sluigi	nmd->refcount++;
226285349Sluigi	NM_DBG_REFC(nmd, func, line);
227285349Sluigi	NMA_UNLOCK(nmd);
228270063Sluigi}
229270063Sluigi
230285349Sluigi#ifdef NM_DEBUG_MEM_PUTGET
231285349Sluigivoid __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
232285349Sluigi#else
233285349Sluigivoid netmap_mem_put(struct netmap_mem_d *nmd)
234285349Sluigi#endif
235270063Sluigi{
236285349Sluigi	int last;
237285349Sluigi	NMA_LOCK(nmd);
238285349Sluigi	last = (--nmd->refcount == 0);
239285349Sluigi	NM_DBG_REFC(nmd, func, line);
240285349Sluigi	NMA_UNLOCK(nmd);
241285349Sluigi	if (last)
242285349Sluigi		netmap_mem_delete(nmd);
243270063Sluigi}
244270063Sluigi
245285349Sluigiint
246285349Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
247270063Sluigi{
248285349Sluigi	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
249285349Sluigi		return ENOMEM;
250285349Sluigi	} else {
251285349Sluigi		nmd->ops->nmd_finalize(nmd);
252285349Sluigi	}
253285349Sluigi
254285349Sluigi	if (!nmd->lasterr && na->pdev)
255285349Sluigi		netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
256285349Sluigi
257285349Sluigi	return nmd->lasterr;
258270063Sluigi}
259270063Sluigi
260285349Sluigivoid
261285349Sluiginetmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
262285349Sluigi{
263285349Sluigi	NMA_LOCK(nmd);
264285349Sluigi	netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
265285349Sluigi	NMA_UNLOCK(nmd);
266285349Sluigi	return nmd->ops->nmd_deref(nmd);
267285349Sluigi}
268234228Sluigi
269241719Sluigi
270285349Sluigi/* accessor functions */
271285349Sluigistatic void
272285349Sluiginetmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
273285349Sluigi{
274285349Sluigi	lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
275285349Sluigi	lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
276285349Sluigi	lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
277285349Sluigi}
278285349Sluigi
279241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
280241719Sluigi	[NETMAP_IF_POOL] = {
281241719Sluigi		.size = 1024,
282241719Sluigi		.num  = 100,
283241719Sluigi	},
284241719Sluigi	[NETMAP_RING_POOL] = {
285241719Sluigi		.size = 9*PAGE_SIZE,
286241719Sluigi		.num  = 200,
287241719Sluigi	},
288241719Sluigi	[NETMAP_BUF_POOL] = {
289241719Sluigi		.size = 2048,
290241719Sluigi		.num  = NETMAP_BUF_MAX_NUM,
291241719Sluigi	},
292241719Sluigi};
293241719Sluigi
294261909Sluigistruct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
295261909Sluigi	[NETMAP_IF_POOL] = {
296261909Sluigi		.size = 1024,
297261909Sluigi		.num  = 1,
298261909Sluigi	},
299261909Sluigi	[NETMAP_RING_POOL] = {
300261909Sluigi		.size = 5*PAGE_SIZE,
301261909Sluigi		.num  = 4,
302261909Sluigi	},
303261909Sluigi	[NETMAP_BUF_POOL] = {
304261909Sluigi		.size = 2048,
305261909Sluigi		.num  = 4098,
306261909Sluigi	},
307261909Sluigi};
308241719Sluigi
309261909Sluigi
310249659Sluigi/*
311249659Sluigi * nm_mem is the memory allocator used for all physical interfaces
312249659Sluigi * running in netmap mode.
313249659Sluigi * Virtual (VALE) ports will have each its own allocator.
314249659Sluigi */
315285349Sluigiextern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
316257529Sluigistruct netmap_mem_d nm_mem = {	/* Our memory allocator. */
317241719Sluigi	.pools = {
318241719Sluigi		[NETMAP_IF_POOL] = {
319241719Sluigi			.name 	= "netmap_if",
320241719Sluigi			.objminsize = sizeof(struct netmap_if),
321241719Sluigi			.objmaxsize = 4096,
322241719Sluigi			.nummin     = 10,	/* don't be stingy */
323241719Sluigi			.nummax	    = 10000,	/* XXX very large */
324241719Sluigi		},
325241719Sluigi		[NETMAP_RING_POOL] = {
326241719Sluigi			.name 	= "netmap_ring",
327241719Sluigi			.objminsize = sizeof(struct netmap_ring),
328241719Sluigi			.objmaxsize = 32*PAGE_SIZE,
329241719Sluigi			.nummin     = 2,
330241719Sluigi			.nummax	    = 1024,
331241719Sluigi		},
332241719Sluigi		[NETMAP_BUF_POOL] = {
333241719Sluigi			.name	= "netmap_buf",
334241719Sluigi			.objminsize = 64,
335241719Sluigi			.objmaxsize = 65536,
336241719Sluigi			.nummin     = 4,
337241719Sluigi			.nummax	    = 1000000, /* one million! */
338241719Sluigi		},
339241719Sluigi	},
340261909Sluigi
341261909Sluigi	.nm_id = 1,
342270063Sluigi	.nm_grp = -1,
343261909Sluigi
344261909Sluigi	.prev = &nm_mem,
345261909Sluigi	.next = &nm_mem,
346285349Sluigi
347285349Sluigi	.ops = &netmap_mem_global_ops
348241719Sluigi};
349241719Sluigi
350257529Sluigi
351261909Sluigistruct netmap_mem_d *netmap_last_mem_d = &nm_mem;
352261909Sluigi
353257529Sluigi/* blueprint for the private memory allocators */
354285349Sluigiextern struct netmap_mem_ops netmap_mem_private_ops; /* forward */
355257529Sluigiconst struct netmap_mem_d nm_blueprint = {
356257529Sluigi	.pools = {
357257529Sluigi		[NETMAP_IF_POOL] = {
358257529Sluigi			.name 	= "%s_if",
359257529Sluigi			.objminsize = sizeof(struct netmap_if),
360257529Sluigi			.objmaxsize = 4096,
361257529Sluigi			.nummin     = 1,
362261909Sluigi			.nummax	    = 100,
363257529Sluigi		},
364257529Sluigi		[NETMAP_RING_POOL] = {
365257529Sluigi			.name 	= "%s_ring",
366257529Sluigi			.objminsize = sizeof(struct netmap_ring),
367257529Sluigi			.objmaxsize = 32*PAGE_SIZE,
368257529Sluigi			.nummin     = 2,
369257529Sluigi			.nummax	    = 1024,
370257529Sluigi		},
371257529Sluigi		[NETMAP_BUF_POOL] = {
372257529Sluigi			.name	= "%s_buf",
373257529Sluigi			.objminsize = 64,
374257529Sluigi			.objmaxsize = 65536,
375257529Sluigi			.nummin     = 4,
376257529Sluigi			.nummax	    = 1000000, /* one million! */
377257529Sluigi		},
378257529Sluigi	},
379257529Sluigi
380257529Sluigi	.flags = NETMAP_MEM_PRIVATE,
381285349Sluigi
382285349Sluigi	.ops = &netmap_mem_private_ops
383257529Sluigi};
384257529Sluigi
385241719Sluigi/* memory allocator related sysctls */
386234228Sluigi
387241719Sluigi#define STRINGIFY(x) #x
388241719Sluigi
389257529Sluigi
390241719Sluigi#define DECLARE_SYSCTLS(id, name) \
391241719Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
392241719Sluigi	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
393259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
394259412Sluigi	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
395259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
396259412Sluigi	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
397259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
398261909Sluigi	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
399261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
400261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
401261909Sluigi	    "Default size of private netmap " STRINGIFY(name) "s"); \
402261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
403261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
404261909Sluigi	    "Default number of private netmap " STRINGIFY(name) "s")
405241719Sluigi
406257529SluigiSYSCTL_DECL(_dev_netmap);
407241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if);
408241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
409241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
410241719Sluigi
411261909Sluigistatic int
412261909Sluiginm_mem_assign_id(struct netmap_mem_d *nmd)
413261909Sluigi{
414261909Sluigi	nm_memid_t id;
415261909Sluigi	struct netmap_mem_d *scan = netmap_last_mem_d;
416261909Sluigi	int error = ENOMEM;
417261909Sluigi
418261909Sluigi	NMA_LOCK(&nm_mem);
419261909Sluigi
420261909Sluigi	do {
421261909Sluigi		/* we rely on unsigned wrap around */
422261909Sluigi		id = scan->nm_id + 1;
423261909Sluigi		if (id == 0) /* reserve 0 as error value */
424261909Sluigi			id = 1;
425261909Sluigi		scan = scan->next;
426261909Sluigi		if (id != scan->nm_id) {
427261909Sluigi			nmd->nm_id = id;
428261909Sluigi			nmd->prev = scan->prev;
429261909Sluigi			nmd->next = scan;
430261909Sluigi			scan->prev->next = nmd;
431261909Sluigi			scan->prev = nmd;
432261909Sluigi			netmap_last_mem_d = nmd;
433261909Sluigi			error = 0;
434261909Sluigi			break;
435261909Sluigi		}
436261909Sluigi	} while (scan != netmap_last_mem_d);
437261909Sluigi
438261909Sluigi	NMA_UNLOCK(&nm_mem);
439261909Sluigi	return error;
440261909Sluigi}
441261909Sluigi
442261909Sluigistatic void
443261909Sluiginm_mem_release_id(struct netmap_mem_d *nmd)
444261909Sluigi{
445261909Sluigi	NMA_LOCK(&nm_mem);
446261909Sluigi
447261909Sluigi	nmd->prev->next = nmd->next;
448261909Sluigi	nmd->next->prev = nmd->prev;
449261909Sluigi
450261909Sluigi	if (netmap_last_mem_d == nmd)
451261909Sluigi		netmap_last_mem_d = nmd->prev;
452261909Sluigi
453261909Sluigi	nmd->prev = nmd->next = NULL;
454261909Sluigi
455261909Sluigi	NMA_UNLOCK(&nm_mem);
456261909Sluigi}
457261909Sluigi
458270063Sluigistatic int
459270063Sluiginm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
460270063Sluigi{
461270063Sluigi	int err = 0, id;
462270063Sluigi	id = nm_iommu_group_id(dev);
463270063Sluigi	if (netmap_verbose)
464270063Sluigi		D("iommu_group %d", id);
465261909Sluigi
466270063Sluigi	NMA_LOCK(nmd);
467270063Sluigi
468270063Sluigi	if (nmd->nm_grp < 0)
469270063Sluigi		nmd->nm_grp = id;
470270063Sluigi
471270063Sluigi	if (nmd->nm_grp != id)
472270063Sluigi		nmd->lasterr = err = ENOMEM;
473270063Sluigi
474270063Sluigi	NMA_UNLOCK(nmd);
475270063Sluigi	return err;
476270063Sluigi}
477270063Sluigi
478234228Sluigi/*
479249659Sluigi * First, find the allocator that contains the requested offset,
480249659Sluigi * then locate the cluster through a lookup table.
481234228Sluigi */
482285349Sluigistatic vm_paddr_t
483285349Sluiginetmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
484234228Sluigi{
485234228Sluigi	int i;
486257529Sluigi	vm_ooffset_t o = offset;
487257529Sluigi	vm_paddr_t pa;
488257529Sluigi	struct netmap_obj_pool *p;
489234228Sluigi
490257529Sluigi	NMA_LOCK(nmd);
491257529Sluigi	p = nmd->pools;
492257529Sluigi
493257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
494257529Sluigi		if (offset >= p[i].memtotal)
495234228Sluigi			continue;
496249659Sluigi		// now lookup the cluster's address
497270063Sluigi		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
498241719Sluigi			offset % p[i]._objsize;
499257529Sluigi		NMA_UNLOCK(nmd);
500257529Sluigi		return pa;
501234228Sluigi	}
502241719Sluigi	/* this is only in case of errors */
503234290Sluigi	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
504257529Sluigi		p[NETMAP_IF_POOL].memtotal,
505257529Sluigi		p[NETMAP_IF_POOL].memtotal
506257529Sluigi			+ p[NETMAP_RING_POOL].memtotal,
507257529Sluigi		p[NETMAP_IF_POOL].memtotal
508257529Sluigi			+ p[NETMAP_RING_POOL].memtotal
509257529Sluigi			+ p[NETMAP_BUF_POOL].memtotal);
510257529Sluigi	NMA_UNLOCK(nmd);
511234228Sluigi	return 0;	// XXX bad address
512234228Sluigi}
513234228Sluigi
514285349Sluigistatic int
515285349Sluiginetmap_mem2_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
516261909Sluigi	nm_memid_t *id)
517257529Sluigi{
518257529Sluigi	int error = 0;
519257529Sluigi	NMA_LOCK(nmd);
520285349Sluigi	error = netmap_mem_config(nmd);
521257529Sluigi	if (error)
522257529Sluigi		goto out;
523270063Sluigi	if (size) {
524270063Sluigi		if (nmd->flags & NETMAP_MEM_FINALIZED) {
525270063Sluigi			*size = nmd->nm_totalsize;
526270063Sluigi		} else {
527270063Sluigi			int i;
528270063Sluigi			*size = 0;
529270063Sluigi			for (i = 0; i < NETMAP_POOLS_NR; i++) {
530270063Sluigi				struct netmap_obj_pool *p = nmd->pools + i;
531270063Sluigi				*size += (p->_numclusters * p->_clustsize);
532270063Sluigi			}
533257529Sluigi		}
534257529Sluigi	}
535270063Sluigi	if (memflags)
536270063Sluigi		*memflags = nmd->flags;
537270063Sluigi	if (id)
538270063Sluigi		*id = nmd->nm_id;
539257529Sluigiout:
540257529Sluigi	NMA_UNLOCK(nmd);
541257529Sluigi	return error;
542257529Sluigi}
543257529Sluigi
544234228Sluigi/*
545234228Sluigi * we store objects by kernel address, need to find the offset
546234228Sluigi * within the pool to export the value to userspace.
547234228Sluigi * Algorithm: scan until we find the cluster, then add the
548234228Sluigi * actual offset in the cluster
549234228Sluigi */
550234242Sluigistatic ssize_t
551234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
552234228Sluigi{
553257529Sluigi	int i, k = p->_clustentries, n = p->objtotal;
554234228Sluigi	ssize_t ofs = 0;
555234228Sluigi
556234228Sluigi	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
557234228Sluigi		const char *base = p->lut[i].vaddr;
558234228Sluigi		ssize_t relofs = (const char *) vaddr - base;
559234228Sluigi
560249504Sluigi		if (relofs < 0 || relofs >= p->_clustsize)
561234228Sluigi			continue;
562234228Sluigi
563234228Sluigi		ofs = ofs + relofs;
564234228Sluigi		ND("%s: return offset %d (cluster %d) for pointer %p",
565234228Sluigi		    p->name, ofs, i, vaddr);
566234228Sluigi		return ofs;
567234228Sluigi	}
568234228Sluigi	D("address %p is not contained inside any cluster (%s)",
569234228Sluigi	    vaddr, p->name);
570234228Sluigi	return 0; /* An error occurred */
571234228Sluigi}
572234228Sluigi
573234228Sluigi/* Helper functions which convert virtual addresses to offsets */
574257529Sluigi#define netmap_if_offset(n, v)					\
575257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
576234228Sluigi
577257529Sluigi#define netmap_ring_offset(n, v)				\
578257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
579257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
580234228Sluigi
581257529Sluigi#define netmap_buf_offset(n, v)					\
582257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal +			\
583257529Sluigi	(n)->pools[NETMAP_RING_POOL].memtotal +		\
584257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
585234228Sluigi
586234228Sluigi
587285349Sluigistatic ssize_t
588285349Sluiginetmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
589257529Sluigi{
590257529Sluigi	ssize_t v;
591257529Sluigi	NMA_LOCK(nmd);
592257529Sluigi	v = netmap_if_offset(nmd, addr);
593257529Sluigi	NMA_UNLOCK(nmd);
594257529Sluigi	return v;
595257529Sluigi}
596257529Sluigi
597241719Sluigi/*
598241719Sluigi * report the index, and use start position as a hint,
599241719Sluigi * otherwise buffer allocation becomes terribly expensive.
600241719Sluigi */
601234228Sluigistatic void *
602257529Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
603234228Sluigi{
604234228Sluigi	uint32_t i = 0;			/* index in the bitmap */
605234228Sluigi	uint32_t mask, j;		/* slot counter */
606234228Sluigi	void *vaddr = NULL;
607234228Sluigi
608234228Sluigi	if (len > p->_objsize) {
609234228Sluigi		D("%s request size %d too large", p->name, len);
610234228Sluigi		// XXX cannot reduce the size
611234228Sluigi		return NULL;
612234228Sluigi	}
613234228Sluigi
614234228Sluigi	if (p->objfree == 0) {
615259412Sluigi		D("no more %s objects", p->name);
616234228Sluigi		return NULL;
617234228Sluigi	}
618241719Sluigi	if (start)
619241719Sluigi		i = *start;
620234228Sluigi
621241719Sluigi	/* termination is guaranteed by p->free, but better check bounds on i */
622241719Sluigi	while (vaddr == NULL && i < p->bitmap_slots)  {
623234228Sluigi		uint32_t cur = p->bitmap[i];
624234228Sluigi		if (cur == 0) { /* bitmask is fully used */
625234228Sluigi			i++;
626234228Sluigi			continue;
627234228Sluigi		}
628234228Sluigi		/* locate a slot */
629234228Sluigi		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
630234228Sluigi			;
631234228Sluigi
632234228Sluigi		p->bitmap[i] &= ~mask; /* mark object as in use */
633234228Sluigi		p->objfree--;
634234228Sluigi
635234228Sluigi		vaddr = p->lut[i * 32 + j].vaddr;
636241719Sluigi		if (index)
637241719Sluigi			*index = i * 32 + j;
638234228Sluigi	}
639234228Sluigi	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
640234228Sluigi
641241719Sluigi	if (start)
642241719Sluigi		*start = i;
643234228Sluigi	return vaddr;
644234228Sluigi}
645234228Sluigi
646234228Sluigi
647234228Sluigi/*
648261909Sluigi * free by index, not by address.
649261909Sluigi * XXX should we also cleanup the content ?
650234228Sluigi */
651261909Sluigistatic int
652234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
653234228Sluigi{
654261909Sluigi	uint32_t *ptr, mask;
655261909Sluigi
656234228Sluigi	if (j >= p->objtotal) {
657234228Sluigi		D("invalid index %u, max %u", j, p->objtotal);
658261909Sluigi		return 1;
659234228Sluigi	}
660261909Sluigi	ptr = &p->bitmap[j / 32];
661261909Sluigi	mask = (1 << (j % 32));
662261909Sluigi	if (*ptr & mask) {
663261909Sluigi		D("ouch, double free on buffer %d", j);
664261909Sluigi		return 1;
665261909Sluigi	} else {
666261909Sluigi		*ptr |= mask;
667261909Sluigi		p->objfree++;
668261909Sluigi		return 0;
669261909Sluigi	}
670234228Sluigi}
671234228Sluigi
672261909Sluigi/*
673261909Sluigi * free by address. This is slow but is only used for a few
674261909Sluigi * objects (rings, nifp)
675261909Sluigi */
676234228Sluigistatic void
677234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
678234228Sluigi{
679257529Sluigi	u_int i, j, n = p->numclusters;
680234228Sluigi
681257529Sluigi	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
682257529Sluigi		void *base = p->lut[i * p->_clustentries].vaddr;
683234228Sluigi		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
684234228Sluigi
685234228Sluigi		/* Given address, is out of the scope of the current cluster.*/
686250441Sluigi		if (vaddr < base || relofs >= p->_clustsize)
687234228Sluigi			continue;
688234228Sluigi
689234228Sluigi		j = j + relofs / p->_objsize;
690257529Sluigi		/* KASSERT(j != 0, ("Cannot free object 0")); */
691234228Sluigi		netmap_obj_free(p, j);
692234228Sluigi		return;
693234228Sluigi	}
694245835Sluigi	D("address %p is not contained inside any cluster (%s)",
695234228Sluigi	    vaddr, p->name);
696234228Sluigi}
697234228Sluigi
698270063Sluigi#define netmap_mem_bufsize(n)	\
699270063Sluigi	((n)->pools[NETMAP_BUF_POOL]._objsize)
700270063Sluigi
701257529Sluigi#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
702257529Sluigi#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
703257529Sluigi#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
704257529Sluigi#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
705257529Sluigi#define netmap_buf_malloc(n, _pos, _index)			\
706270063Sluigi	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
707234228Sluigi
708234228Sluigi
709261909Sluigi#if 0 // XXX unused
710234228Sluigi/* Return the index associated to the given packet buffer */
711257529Sluigi#define netmap_buf_index(n, v)						\
712257529Sluigi    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
713261909Sluigi#endif
714234228Sluigi
715261909Sluigi/*
716261909Sluigi * allocate extra buffers in a linked list.
717261909Sluigi * returns the actual number.
718261909Sluigi */
719261909Sluigiuint32_t
720261909Sluiginetmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
721261909Sluigi{
722261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
723261909Sluigi	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
724234228Sluigi
725261909Sluigi	NMA_LOCK(nmd);
726261909Sluigi
727261909Sluigi	*head = 0;	/* default, 'null' index ie empty list */
728261909Sluigi	for (i = 0 ; i < n; i++) {
729261909Sluigi		uint32_t cur = *head;	/* save current head */
730261909Sluigi		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
731261909Sluigi		if (p == NULL) {
732261909Sluigi			D("no more buffers after %d of %d", i, n);
733261909Sluigi			*head = cur; /* restore */
734261909Sluigi			break;
735261909Sluigi		}
736261909Sluigi		RD(5, "allocate buffer %d -> %d", *head, cur);
737261909Sluigi		*p = cur; /* link to previous head */
738261909Sluigi	}
739261909Sluigi
740261909Sluigi	NMA_UNLOCK(nmd);
741261909Sluigi
742261909Sluigi	return i;
743261909Sluigi}
744261909Sluigi
745261909Sluigistatic void
746261909Sluiginetmap_extra_free(struct netmap_adapter *na, uint32_t head)
747261909Sluigi{
748285349Sluigi        struct lut_entry *lut = na->na_lut.lut;
749261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
750261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
751261909Sluigi	uint32_t i, cur, *buf;
752261909Sluigi
753261909Sluigi	D("freeing the extra list");
754261909Sluigi	for (i = 0; head >=2 && head < p->objtotal; i++) {
755261909Sluigi		cur = head;
756261909Sluigi		buf = lut[head].vaddr;
757261909Sluigi		head = *buf;
758261909Sluigi		*buf = 0;
759261909Sluigi		if (netmap_obj_free(p, cur))
760261909Sluigi			break;
761261909Sluigi	}
762261909Sluigi	if (head != 0)
763261909Sluigi		D("breaking with head %d", head);
764261909Sluigi	D("freed %d buffers", i);
765261909Sluigi}
766261909Sluigi
767261909Sluigi
768241719Sluigi/* Return nonzero on error */
769241719Sluigistatic int
770259412Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
771234228Sluigi{
772257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
773257529Sluigi	u_int i = 0;	/* slot counter */
774241719Sluigi	uint32_t pos = 0;	/* slot in p->bitmap */
775241719Sluigi	uint32_t index = 0;	/* buffer index */
776234228Sluigi
777234228Sluigi	for (i = 0; i < n; i++) {
778257529Sluigi		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
779234228Sluigi		if (vaddr == NULL) {
780259412Sluigi			D("no more buffers after %d of %d", i, n);
781234228Sluigi			goto cleanup;
782234228Sluigi		}
783241719Sluigi		slot[i].buf_idx = index;
784234228Sluigi		slot[i].len = p->_objsize;
785259412Sluigi		slot[i].flags = 0;
786234228Sluigi	}
787234228Sluigi
788241719Sluigi	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
789241719Sluigi	return (0);
790234228Sluigi
791234228Sluigicleanup:
792241643Semaste	while (i > 0) {
793241643Semaste		i--;
794241719Sluigi		netmap_obj_free(p, slot[i].buf_idx);
795234228Sluigi	}
796241719Sluigi	bzero(slot, n * sizeof(slot[0]));
797241719Sluigi	return (ENOMEM);
798234228Sluigi}
799234228Sluigi
800261909Sluigistatic void
801261909Sluiginetmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
802261909Sluigi{
803261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
804261909Sluigi	u_int i;
805234228Sluigi
806261909Sluigi	for (i = 0; i < n; i++) {
807261909Sluigi		slot[i].buf_idx = index;
808261909Sluigi		slot[i].len = p->_objsize;
809261909Sluigi		slot[i].flags = 0;
810261909Sluigi	}
811261909Sluigi}
812261909Sluigi
813261909Sluigi
814234228Sluigistatic void
815259412Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
816234228Sluigi{
817257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
818241719Sluigi
819234228Sluigi	if (i < 2 || i >= p->objtotal) {
820234228Sluigi		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
821234228Sluigi		return;
822234228Sluigi	}
823241719Sluigi	netmap_obj_free(p, i);
824234228Sluigi}
825234228Sluigi
826261909Sluigi
827234228Sluigistatic void
828261909Sluiginetmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
829261909Sluigi{
830261909Sluigi	u_int i;
831261909Sluigi
832261909Sluigi	for (i = 0; i < n; i++) {
833261909Sluigi		if (slot[i].buf_idx > 2)
834261909Sluigi			netmap_free_buf(nmd, slot[i].buf_idx);
835261909Sluigi	}
836261909Sluigi}
837261909Sluigi
838261909Sluigistatic void
839241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p)
840234228Sluigi{
841257529Sluigi
842234228Sluigi	if (p == NULL)
843234228Sluigi		return;
844234228Sluigi	if (p->bitmap)
845234228Sluigi		free(p->bitmap, M_NETMAP);
846241719Sluigi	p->bitmap = NULL;
847234228Sluigi	if (p->lut) {
848257529Sluigi		u_int i;
849257529Sluigi		size_t sz = p->_clustsize;
850257529Sluigi
851282978Spkelsey		/*
852282978Spkelsey		 * Free each cluster allocated in
853282978Spkelsey		 * netmap_finalize_obj_allocator().  The cluster start
854282978Spkelsey		 * addresses are stored at multiples of p->_clusterentries
855282978Spkelsey		 * in the lut.
856282978Spkelsey		 */
857257529Sluigi		for (i = 0; i < p->objtotal; i += p->_clustentries) {
858234228Sluigi			if (p->lut[i].vaddr)
859257529Sluigi				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
860234228Sluigi		}
861234228Sluigi		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
862241719Sluigi#ifdef linux
863241719Sluigi		vfree(p->lut);
864241719Sluigi#else
865234228Sluigi		free(p->lut, M_NETMAP);
866241719Sluigi#endif
867234228Sluigi	}
868241719Sluigi	p->lut = NULL;
869257529Sluigi	p->objtotal = 0;
870257529Sluigi	p->memtotal = 0;
871257529Sluigi	p->numclusters = 0;
872257529Sluigi	p->objfree = 0;
873234228Sluigi}
874234228Sluigi
875234228Sluigi/*
876241719Sluigi * Free all resources related to an allocator.
877241719Sluigi */
878241719Sluigistatic void
879241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p)
880241719Sluigi{
881241719Sluigi	if (p == NULL)
882241719Sluigi		return;
883241719Sluigi	netmap_reset_obj_allocator(p);
884241719Sluigi}
885241719Sluigi
886241719Sluigi/*
887234228Sluigi * We receive a request for objtotal objects, of size objsize each.
888234228Sluigi * Internally we may round up both numbers, as we allocate objects
889234228Sluigi * in small clusters multiple of the page size.
890257529Sluigi * We need to keep track of objtotal and clustentries,
891234228Sluigi * as they are needed when freeing memory.
892234228Sluigi *
893234228Sluigi * XXX note -- userspace needs the buffers to be contiguous,
894234228Sluigi *	so we cannot afford gaps at the end of a cluster.
895234228Sluigi */
896241719Sluigi
897241719Sluigi
898241719Sluigi/* call with NMA_LOCK held */
899241719Sluigistatic int
900241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
901234228Sluigi{
902257529Sluigi	int i;
903234228Sluigi	u_int clustsize;	/* the cluster size, multiple of page size */
904234228Sluigi	u_int clustentries;	/* how many objects per entry */
905234228Sluigi
906257529Sluigi	/* we store the current request, so we can
907257529Sluigi	 * detect configuration changes later */
908257529Sluigi	p->r_objtotal = objtotal;
909257529Sluigi	p->r_objsize = objsize;
910257529Sluigi
911270063Sluigi#define MAX_CLUSTSIZE	(1<<22)		// 4 MB
912260368Sluigi#define LINE_ROUND	NM_CACHE_ALIGN	// 64
913234228Sluigi	if (objsize >= MAX_CLUSTSIZE) {
914234228Sluigi		/* we could do it but there is no point */
915234228Sluigi		D("unsupported allocation for %d bytes", objsize);
916257529Sluigi		return EINVAL;
917234228Sluigi	}
918234228Sluigi	/* make sure objsize is a multiple of LINE_ROUND */
919234228Sluigi	i = (objsize & (LINE_ROUND - 1));
920234228Sluigi	if (i) {
921234228Sluigi		D("XXX aligning object by %d bytes", LINE_ROUND - i);
922234228Sluigi		objsize += LINE_ROUND - i;
923234228Sluigi	}
924241719Sluigi	if (objsize < p->objminsize || objsize > p->objmaxsize) {
925250184Sluigi		D("requested objsize %d out of range [%d, %d]",
926241719Sluigi			objsize, p->objminsize, p->objmaxsize);
927257529Sluigi		return EINVAL;
928241719Sluigi	}
929241719Sluigi	if (objtotal < p->nummin || objtotal > p->nummax) {
930250184Sluigi		D("requested objtotal %d out of range [%d, %d]",
931241719Sluigi			objtotal, p->nummin, p->nummax);
932257529Sluigi		return EINVAL;
933241719Sluigi	}
934234228Sluigi	/*
935234228Sluigi	 * Compute number of objects using a brute-force approach:
936234228Sluigi	 * given a max cluster size,
937234228Sluigi	 * we try to fill it with objects keeping track of the
938234228Sluigi	 * wasted space to the next page boundary.
939234228Sluigi	 */
940234228Sluigi	for (clustentries = 0, i = 1;; i++) {
941234228Sluigi		u_int delta, used = i * objsize;
942234228Sluigi		if (used > MAX_CLUSTSIZE)
943234228Sluigi			break;
944234228Sluigi		delta = used % PAGE_SIZE;
945234228Sluigi		if (delta == 0) { // exact solution
946234228Sluigi			clustentries = i;
947234228Sluigi			break;
948234228Sluigi		}
949234228Sluigi	}
950270063Sluigi	/* exact solution not found */
951270063Sluigi	if (clustentries == 0) {
952270063Sluigi		D("unsupported allocation for %d bytes", objsize);
953270063Sluigi		return EINVAL;
954270063Sluigi	}
955270063Sluigi	/* compute clustsize */
956234228Sluigi	clustsize = clustentries * objsize;
957245835Sluigi	if (netmap_verbose)
958245835Sluigi		D("objsize %d clustsize %d objects %d",
959245835Sluigi			objsize, clustsize, clustentries);
960234228Sluigi
961234228Sluigi	/*
962234228Sluigi	 * The number of clusters is n = ceil(objtotal/clustentries)
963234228Sluigi	 * objtotal' = n * clustentries
964234228Sluigi	 */
965257529Sluigi	p->_clustentries = clustentries;
966234228Sluigi	p->_clustsize = clustsize;
967257529Sluigi	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
968257529Sluigi
969257529Sluigi	/* actual values (may be larger than requested) */
970234228Sluigi	p->_objsize = objsize;
971257529Sluigi	p->_objtotal = p->_numclusters * clustentries;
972234228Sluigi
973241719Sluigi	return 0;
974241719Sluigi}
975241719Sluigi
976241719Sluigi
977241719Sluigi/* call with NMA_LOCK held */
978241719Sluigistatic int
979241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p)
980241719Sluigi{
981257529Sluigi	int i; /* must be signed */
982257529Sluigi	size_t n;
983241719Sluigi
984257529Sluigi	/* optimistically assume we have enough memory */
985257529Sluigi	p->numclusters = p->_numclusters;
986257529Sluigi	p->objtotal = p->_objtotal;
987257529Sluigi
988241719Sluigi	n = sizeof(struct lut_entry) * p->objtotal;
989241719Sluigi#ifdef linux
990241719Sluigi	p->lut = vmalloc(n);
991241719Sluigi#else
992241750Semaste	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
993241719Sluigi#endif
994234228Sluigi	if (p->lut == NULL) {
995257529Sluigi		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
996234228Sluigi		goto clean;
997234228Sluigi	}
998234228Sluigi
999234228Sluigi	/* Allocate the bitmap */
1000234228Sluigi	n = (p->objtotal + 31) / 32;
1001241750Semaste	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
1002234228Sluigi	if (p->bitmap == NULL) {
1003257529Sluigi		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
1004241719Sluigi		    p->name);
1005234228Sluigi		goto clean;
1006234228Sluigi	}
1007241719Sluigi	p->bitmap_slots = n;
1008234228Sluigi
1009234228Sluigi	/*
1010234228Sluigi	 * Allocate clusters, init pointers and bitmap
1011234228Sluigi	 */
1012257529Sluigi
1013257529Sluigi	n = p->_clustsize;
1014257529Sluigi	for (i = 0; i < (int)p->objtotal;) {
1015257529Sluigi		int lim = i + p->_clustentries;
1016234228Sluigi		char *clust;
1017234228Sluigi
1018257529Sluigi		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1019257529Sluigi		    (size_t)0, -1UL, PAGE_SIZE, 0);
1020234228Sluigi		if (clust == NULL) {
1021234228Sluigi			/*
1022234228Sluigi			 * If we get here, there is a severe memory shortage,
1023234228Sluigi			 * so halve the allocated memory to reclaim some.
1024234228Sluigi			 */
1025234228Sluigi			D("Unable to create cluster at %d for '%s' allocator",
1026241719Sluigi			    i, p->name);
1027257529Sluigi			if (i < 2) /* nothing to halve */
1028257529Sluigi				goto out;
1029234228Sluigi			lim = i / 2;
1030241719Sluigi			for (i--; i >= lim; i--) {
1031234228Sluigi				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
1032257529Sluigi				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1033234228Sluigi					contigfree(p->lut[i].vaddr,
1034257529Sluigi						n, M_NETMAP);
1035282978Spkelsey				p->lut[i].vaddr = NULL;
1036234228Sluigi			}
1037257529Sluigi		out:
1038234228Sluigi			p->objtotal = i;
1039257529Sluigi			/* we may have stopped in the middle of a cluster */
1040257529Sluigi			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1041234228Sluigi			break;
1042234228Sluigi		}
1043282978Spkelsey		/*
1044282978Spkelsey		 * Set bitmap and lut state for all buffers in the current
1045282978Spkelsey		 * cluster.
1046282978Spkelsey		 *
1047282978Spkelsey		 * [i, lim) is the set of buffer indexes that cover the
1048282978Spkelsey		 * current cluster.
1049282978Spkelsey		 *
1050282978Spkelsey		 * 'clust' is really the address of the current buffer in
1051282978Spkelsey		 * the current cluster as we index through it with a stride
1052282978Spkelsey		 * of p->_objsize.
1053282978Spkelsey		 */
1054241719Sluigi		for (; i < lim; i++, clust += p->_objsize) {
1055234228Sluigi			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
1056234228Sluigi			p->lut[i].vaddr = clust;
1057234228Sluigi			p->lut[i].paddr = vtophys(clust);
1058234228Sluigi		}
1059234228Sluigi	}
1060257529Sluigi	p->objfree = p->objtotal;
1061257529Sluigi	p->memtotal = p->numclusters * p->_clustsize;
1062257529Sluigi	if (p->objfree == 0)
1063257529Sluigi		goto clean;
1064245835Sluigi	if (netmap_verbose)
1065245835Sluigi		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
1066257529Sluigi		    p->numclusters, p->_clustsize >> 10,
1067257529Sluigi		    p->memtotal >> 10, p->name);
1068234228Sluigi
1069241719Sluigi	return 0;
1070234228Sluigi
1071234228Sluigiclean:
1072241719Sluigi	netmap_reset_obj_allocator(p);
1073241719Sluigi	return ENOMEM;
1074234228Sluigi}
1075234228Sluigi
1076241719Sluigi/* call with lock held */
1077234228Sluigistatic int
1078257529Sluiginetmap_memory_config_changed(struct netmap_mem_d *nmd)
1079234228Sluigi{
1080241719Sluigi	int i;
1081234228Sluigi
1082241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1083257529Sluigi		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
1084257529Sluigi		    nmd->pools[i].r_objtotal != netmap_params[i].num)
1085241719Sluigi		    return 1;
1086241719Sluigi	}
1087241719Sluigi	return 0;
1088241719Sluigi}
1089234228Sluigi
1090257529Sluigistatic void
1091257529Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd)
1092257529Sluigi{
1093257529Sluigi	int i;
1094261909Sluigi
1095261909Sluigi	if (netmap_verbose)
1096261909Sluigi		D("resetting %p", nmd);
1097257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1098257529Sluigi		netmap_reset_obj_allocator(&nmd->pools[i]);
1099257529Sluigi	}
1100257529Sluigi	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
1101257529Sluigi}
1102234228Sluigi
1103257529Sluigistatic int
1104270063Sluiginetmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1105270063Sluigi{
1106270063Sluigi	int i, lim = p->_objtotal;
1107270063Sluigi
1108270063Sluigi	if (na->pdev == NULL)
1109270063Sluigi		return 0;
1110270063Sluigi
1111270063Sluigi#ifdef __FreeBSD__
1112270063Sluigi	(void)i;
1113270063Sluigi	(void)lim;
1114270063Sluigi	D("unsupported on FreeBSD");
1115270063Sluigi#else /* linux */
1116270063Sluigi	for (i = 2; i < lim; i++) {
1117270063Sluigi		netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr);
1118270063Sluigi	}
1119270063Sluigi#endif /* linux */
1120270063Sluigi
1121270063Sluigi	return 0;
1122270063Sluigi}
1123270063Sluigi
1124270063Sluigistatic int
1125270063Sluiginetmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1126270063Sluigi{
1127270063Sluigi#ifdef __FreeBSD__
1128270063Sluigi	D("unsupported on FreeBSD");
1129270063Sluigi#else /* linux */
1130270063Sluigi	int i, lim = p->_objtotal;
1131270063Sluigi
1132270063Sluigi	if (na->pdev == NULL)
1133270063Sluigi		return 0;
1134270063Sluigi
1135270063Sluigi	for (i = 2; i < lim; i++) {
1136270063Sluigi		netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr,
1137270063Sluigi				p->lut[i].vaddr);
1138270063Sluigi	}
1139270063Sluigi#endif /* linux */
1140270063Sluigi
1141270063Sluigi	return 0;
1142270063Sluigi}
1143270063Sluigi
1144270063Sluigistatic int
1145257529Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd)
1146257529Sluigi{
1147257529Sluigi	int i;
1148257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED)
1149257529Sluigi		return 0;
1150257529Sluigi	nmd->lasterr = 0;
1151257529Sluigi	nmd->nm_totalsize = 0;
1152257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1153257529Sluigi		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1154257529Sluigi		if (nmd->lasterr)
1155257529Sluigi			goto error;
1156257529Sluigi		nmd->nm_totalsize += nmd->pools[i].memtotal;
1157257529Sluigi	}
1158257529Sluigi	/* buffers 0 and 1 are reserved */
1159257529Sluigi	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1160257529Sluigi	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
1161257529Sluigi	nmd->flags |= NETMAP_MEM_FINALIZED;
1162257529Sluigi
1163261909Sluigi	if (netmap_verbose)
1164261909Sluigi		D("interfaces %d KB, rings %d KB, buffers %d MB",
1165261909Sluigi		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1166261909Sluigi		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1167261909Sluigi		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1168257529Sluigi
1169261909Sluigi	if (netmap_verbose)
1170261909Sluigi		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1171257529Sluigi
1172257529Sluigi
1173257529Sluigi	return 0;
1174257529Sluigierror:
1175257529Sluigi	netmap_mem_reset_all(nmd);
1176257529Sluigi	return nmd->lasterr;
1177257529Sluigi}
1178257529Sluigi
1179257529Sluigi
1180257529Sluigi
1181285349Sluigistatic void
1182257529Sluiginetmap_mem_private_delete(struct netmap_mem_d *nmd)
1183257529Sluigi{
1184257529Sluigi	if (nmd == NULL)
1185257529Sluigi		return;
1186261909Sluigi	if (netmap_verbose)
1187261909Sluigi		D("deleting %p", nmd);
1188285349Sluigi	if (nmd->active > 0)
1189285349Sluigi		D("bug: deleting mem allocator with active=%d!", nmd->active);
1190261909Sluigi	nm_mem_release_id(nmd);
1191261909Sluigi	if (netmap_verbose)
1192261909Sluigi		D("done deleting %p", nmd);
1193257529Sluigi	NMA_LOCK_DESTROY(nmd);
1194257529Sluigi	free(nmd, M_DEVBUF);
1195257529Sluigi}
1196257529Sluigi
1197257529Sluigistatic int
1198257529Sluiginetmap_mem_private_config(struct netmap_mem_d *nmd)
1199257529Sluigi{
1200257529Sluigi	/* nothing to do, we are configured on creation
1201257529Sluigi 	 * and configuration never changes thereafter
1202257529Sluigi 	 */
1203257529Sluigi	return 0;
1204257529Sluigi}
1205257529Sluigi
1206257529Sluigistatic int
1207257529Sluiginetmap_mem_private_finalize(struct netmap_mem_d *nmd)
1208257529Sluigi{
1209257529Sluigi	int err;
1210285349Sluigi	NMA_LOCK(nmd);
1211285349Sluigi	nmd->active++;
1212257529Sluigi	err = netmap_mem_finalize_all(nmd);
1213285349Sluigi	NMA_UNLOCK(nmd);
1214257529Sluigi	return err;
1215257529Sluigi
1216257529Sluigi}
1217257529Sluigi
1218259412Sluigistatic void
1219259412Sluiginetmap_mem_private_deref(struct netmap_mem_d *nmd)
1220257529Sluigi{
1221285349Sluigi	NMA_LOCK(nmd);
1222285349Sluigi	if (--nmd->active <= 0)
1223257529Sluigi		netmap_mem_reset_all(nmd);
1224285349Sluigi	NMA_UNLOCK(nmd);
1225257529Sluigi}
1226257529Sluigi
1227261909Sluigi
1228261909Sluigi/*
1229261909Sluigi * allocator for private memory
1230261909Sluigi */
1231257529Sluigistruct netmap_mem_d *
1232261909Sluiginetmap_mem_private_new(const char *name, u_int txr, u_int txd,
1233261909Sluigi	u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
1234257529Sluigi{
1235257529Sluigi	struct netmap_mem_d *d = NULL;
1236257529Sluigi	struct netmap_obj_params p[NETMAP_POOLS_NR];
1237261909Sluigi	int i, err;
1238261909Sluigi	u_int v, maxd;
1239257529Sluigi
1240257529Sluigi	d = malloc(sizeof(struct netmap_mem_d),
1241257529Sluigi			M_DEVBUF, M_NOWAIT | M_ZERO);
1242261909Sluigi	if (d == NULL) {
1243261909Sluigi		err = ENOMEM;
1244261909Sluigi		goto error;
1245261909Sluigi	}
1246257529Sluigi
1247257529Sluigi	*d = nm_blueprint;
1248257529Sluigi
1249261909Sluigi	err = nm_mem_assign_id(d);
1250261909Sluigi	if (err)
1251261909Sluigi		goto error;
1252261909Sluigi
1253261909Sluigi	/* account for the fake host rings */
1254257529Sluigi	txr++;
1255257529Sluigi	rxr++;
1256261909Sluigi
1257261909Sluigi	/* copy the min values */
1258261909Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1259261909Sluigi		p[i] = netmap_min_priv_params[i];
1260261909Sluigi	}
1261261909Sluigi
1262261909Sluigi	/* possibly increase them to fit user request */
1263261909Sluigi	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1264261909Sluigi	if (p[NETMAP_IF_POOL].size < v)
1265261909Sluigi		p[NETMAP_IF_POOL].size = v;
1266261909Sluigi	v = 2 + 4 * npipes;
1267261909Sluigi	if (p[NETMAP_IF_POOL].num < v)
1268261909Sluigi		p[NETMAP_IF_POOL].num = v;
1269257529Sluigi	maxd = (txd > rxd) ? txd : rxd;
1270261909Sluigi	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1271261909Sluigi	if (p[NETMAP_RING_POOL].size < v)
1272261909Sluigi		p[NETMAP_RING_POOL].size = v;
1273261909Sluigi	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1274261909Sluigi         * and two rx rings (again, 1 normal and 1 fake host)
1275261909Sluigi         */
1276261909Sluigi	v = txr + rxr + 8 * npipes;
1277261909Sluigi	if (p[NETMAP_RING_POOL].num < v)
1278261909Sluigi		p[NETMAP_RING_POOL].num = v;
1279261909Sluigi	/* for each pipe we only need the buffers for the 4 "real" rings.
1280267128Sluigi         * On the other end, the pipe ring dimension may be different from
1281261909Sluigi         * the parent port ring dimension. As a compromise, we allocate twice the
1282261909Sluigi         * space actually needed if the pipe rings were the same size as the parent rings
1283261909Sluigi         */
1284261909Sluigi	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1285261909Sluigi		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1286261909Sluigi	if (p[NETMAP_BUF_POOL].num < v)
1287261909Sluigi		p[NETMAP_BUF_POOL].num = v;
1288257529Sluigi
1289261909Sluigi	if (netmap_verbose)
1290261909Sluigi		D("req if %d*%d ring %d*%d buf %d*%d",
1291257529Sluigi			p[NETMAP_IF_POOL].num,
1292257529Sluigi			p[NETMAP_IF_POOL].size,
1293257529Sluigi			p[NETMAP_RING_POOL].num,
1294257529Sluigi			p[NETMAP_RING_POOL].size,
1295257529Sluigi			p[NETMAP_BUF_POOL].num,
1296257529Sluigi			p[NETMAP_BUF_POOL].size);
1297257529Sluigi
1298257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1299257529Sluigi		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1300257529Sluigi				nm_blueprint.pools[i].name,
1301257529Sluigi				name);
1302261909Sluigi		err = netmap_config_obj_allocator(&d->pools[i],
1303261909Sluigi				p[i].num, p[i].size);
1304261909Sluigi		if (err)
1305257529Sluigi			goto error;
1306257529Sluigi	}
1307257529Sluigi
1308257529Sluigi	d->flags &= ~NETMAP_MEM_FINALIZED;
1309257529Sluigi
1310257529Sluigi	NMA_LOCK_INIT(d);
1311257529Sluigi
1312257529Sluigi	return d;
1313257529Sluigierror:
1314257529Sluigi	netmap_mem_private_delete(d);
1315261909Sluigi	if (perr)
1316261909Sluigi		*perr = err;
1317257529Sluigi	return NULL;
1318257529Sluigi}
1319257529Sluigi
1320257529Sluigi
1321241719Sluigi/* call with lock held */
1322241719Sluigistatic int
1323257529Sluiginetmap_mem_global_config(struct netmap_mem_d *nmd)
1324241719Sluigi{
1325241719Sluigi	int i;
1326234228Sluigi
1327285349Sluigi	if (nmd->active)
1328257529Sluigi		/* already in use, we cannot change the configuration */
1329241719Sluigi		goto out;
1330234228Sluigi
1331257529Sluigi	if (!netmap_memory_config_changed(nmd))
1332257529Sluigi		goto out;
1333257529Sluigi
1334285349Sluigi	ND("reconfiguring");
1335241719Sluigi
1336257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1337241719Sluigi		/* reset previous allocation */
1338241719Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1339257529Sluigi			netmap_reset_obj_allocator(&nmd->pools[i]);
1340250184Sluigi		}
1341257529Sluigi		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1342259412Sluigi	}
1343241719Sluigi
1344241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1345257529Sluigi		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1346241719Sluigi				netmap_params[i].num, netmap_params[i].size);
1347257529Sluigi		if (nmd->lasterr)
1348241719Sluigi			goto out;
1349241719Sluigi	}
1350241719Sluigi
1351241719Sluigiout:
1352241719Sluigi
1353257529Sluigi	return nmd->lasterr;
1354241719Sluigi}
1355241719Sluigi
1356241719Sluigistatic int
1357257529Sluiginetmap_mem_global_finalize(struct netmap_mem_d *nmd)
1358241719Sluigi{
1359257529Sluigi	int err;
1360282978Spkelsey
1361241719Sluigi	/* update configuration if changed */
1362257529Sluigi	if (netmap_mem_global_config(nmd))
1363241719Sluigi		goto out;
1364241719Sluigi
1365285349Sluigi	nmd->active++;
1366257529Sluigi
1367257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1368241719Sluigi		/* may happen if config is not changed */
1369241719Sluigi		ND("nothing to do");
1370241719Sluigi		goto out;
1371241719Sluigi	}
1372241719Sluigi
1373257529Sluigi	if (netmap_mem_finalize_all(nmd))
1374257529Sluigi		goto out;
1375241719Sluigi
1376257529Sluigi	nmd->lasterr = 0;
1377241719Sluigi
1378241719Sluigiout:
1379257529Sluigi	if (nmd->lasterr)
1380285349Sluigi		nmd->active--;
1381257529Sluigi	err = nmd->lasterr;
1382241719Sluigi
1383257529Sluigi	return err;
1384241719Sluigi
1385234228Sluigi}
1386234228Sluigi
1387285349Sluigistatic void
1388285349Sluiginetmap_mem_global_delete(struct netmap_mem_d *nmd)
1389285349Sluigi{
1390285349Sluigi	int i;
1391285349Sluigi
1392285349Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1393285349Sluigi	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1394285349Sluigi	}
1395285349Sluigi
1396285349Sluigi	NMA_LOCK_DESTROY(&nm_mem);
1397285349Sluigi}
1398285349Sluigi
1399257529Sluigiint
1400257529Sluiginetmap_mem_init(void)
1401241719Sluigi{
1402257529Sluigi	NMA_LOCK_INIT(&nm_mem);
1403285349Sluigi	netmap_mem_get(&nm_mem);
1404241719Sluigi	return (0);
1405241719Sluigi}
1406234228Sluigi
1407257529Sluigivoid
1408257529Sluiginetmap_mem_fini(void)
1409234228Sluigi{
1410285349Sluigi	netmap_mem_put(&nm_mem);
1411234228Sluigi}
1412234228Sluigi
1413241719Sluigistatic void
1414241719Sluiginetmap_free_rings(struct netmap_adapter *na)
1415241719Sluigi{
1416285349Sluigi	enum txrx t;
1417285349Sluigi
1418285349Sluigi	for_rx_tx(t) {
1419285349Sluigi		u_int i;
1420285349Sluigi		for (i = 0; i < netmap_real_rings(na, t); i++) {
1421285349Sluigi			struct netmap_kring *kring = &NMR(na, t)[i];
1422285349Sluigi			struct netmap_ring *ring = kring->ring;
1423285349Sluigi
1424285349Sluigi			if (ring == NULL)
1425285349Sluigi				continue;
1426285349Sluigi			netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1427285349Sluigi			netmap_ring_free(na->nm_mem, ring);
1428285349Sluigi			kring->ring = NULL;
1429285349Sluigi		}
1430241719Sluigi	}
1431241719Sluigi}
1432234228Sluigi
1433259412Sluigi/* call with NMA_LOCK held *
1434257529Sluigi *
1435259412Sluigi * Allocate netmap rings and buffers for this card
1436259412Sluigi * The rings are contiguous, but have variable size.
1437261909Sluigi * The kring array must follow the layout described
1438261909Sluigi * in netmap_krings_create().
1439245835Sluigi */
1440285349Sluigistatic int
1441285349Sluiginetmap_mem2_rings_create(struct netmap_adapter *na)
1442234228Sluigi{
1443285349Sluigi	enum txrx t;
1444234228Sluigi
1445257529Sluigi	NMA_LOCK(na->nm_mem);
1446257529Sluigi
1447285349Sluigi	for_rx_tx(t) {
1448285349Sluigi		u_int i;
1449234228Sluigi
1450285349Sluigi		for (i = 0; i <= nma_get_nrings(na, t); i++) {
1451285349Sluigi			struct netmap_kring *kring = &NMR(na, t)[i];
1452285349Sluigi			struct netmap_ring *ring = kring->ring;
1453285349Sluigi			u_int len, ndesc;
1454285349Sluigi
1455285349Sluigi			if (ring) {
1456285349Sluigi				ND("%s already created", kring->name);
1457285349Sluigi				continue; /* already created by somebody else */
1458285349Sluigi			}
1459285349Sluigi			ndesc = kring->nkr_num_slots;
1460285349Sluigi			len = sizeof(struct netmap_ring) +
1461285349Sluigi				  ndesc * sizeof(struct netmap_slot);
1462285349Sluigi			ring = netmap_ring_malloc(na->nm_mem, len);
1463285349Sluigi			if (ring == NULL) {
1464285349Sluigi				D("Cannot allocate %s_ring", nm_txrx2str(t));
1465261909Sluigi				goto cleanup;
1466261909Sluigi			}
1467285349Sluigi			ND("txring at %p", ring);
1468285349Sluigi			kring->ring = ring;
1469285349Sluigi			*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1470285349Sluigi			*(int64_t *)(uintptr_t)&ring->buf_ofs =
1471285349Sluigi			    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1472285349Sluigi				na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1473285349Sluigi				netmap_ring_offset(na->nm_mem, ring);
1474234228Sluigi
1475285349Sluigi			/* copy values from kring */
1476285349Sluigi			ring->head = kring->rhead;
1477285349Sluigi			ring->cur = kring->rcur;
1478285349Sluigi			ring->tail = kring->rtail;
1479285349Sluigi			*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1480285349Sluigi				netmap_mem_bufsize(na->nm_mem);
1481285349Sluigi			ND("%s h %d c %d t %d", kring->name,
1482285349Sluigi				ring->head, ring->cur, ring->tail);
1483285349Sluigi			ND("initializing slots for %s_ring", nm_txrx2str(txrx));
1484285349Sluigi			if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) {
1485285349Sluigi				/* this is a real ring */
1486285349Sluigi				if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1487285349Sluigi					D("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
1488285349Sluigi					goto cleanup;
1489285349Sluigi				}
1490285349Sluigi			} else {
1491285349Sluigi				/* this is a fake ring, set all indices to 0 */
1492285349Sluigi				netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1493261909Sluigi			}
1494285349Sluigi		        /* ring info */
1495285349Sluigi		        *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
1496285349Sluigi		        *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
1497241719Sluigi		}
1498234228Sluigi	}
1499259412Sluigi
1500259412Sluigi	NMA_UNLOCK(na->nm_mem);
1501259412Sluigi
1502259412Sluigi	return 0;
1503259412Sluigi
1504259412Sluigicleanup:
1505259412Sluigi	netmap_free_rings(na);
1506259412Sluigi
1507259412Sluigi	NMA_UNLOCK(na->nm_mem);
1508259412Sluigi
1509259412Sluigi	return ENOMEM;
1510259412Sluigi}
1511259412Sluigi
1512285349Sluigistatic void
1513285349Sluiginetmap_mem2_rings_delete(struct netmap_adapter *na)
1514259412Sluigi{
1515259412Sluigi	/* last instance, release bufs and rings */
1516259412Sluigi	NMA_LOCK(na->nm_mem);
1517259412Sluigi
1518259412Sluigi	netmap_free_rings(na);
1519259412Sluigi
1520259412Sluigi	NMA_UNLOCK(na->nm_mem);
1521259412Sluigi}
1522259412Sluigi
1523259412Sluigi
1524259412Sluigi/* call with NMA_LOCK held */
1525259412Sluigi/*
1526259412Sluigi * Allocate the per-fd structure netmap_if.
1527259412Sluigi *
1528259412Sluigi * We assume that the configuration stored in na
1529259412Sluigi * (number of tx/rx rings and descs) does not change while
1530259412Sluigi * the interface is in netmap mode.
1531259412Sluigi */
1532285349Sluigistatic struct netmap_if *
1533285349Sluiginetmap_mem2_if_new(struct netmap_adapter *na)
1534259412Sluigi{
1535259412Sluigi	struct netmap_if *nifp;
1536259412Sluigi	ssize_t base; /* handy for relative offsets between rings and nifp */
1537285349Sluigi	u_int i, len, n[NR_TXRX], ntot;
1538285349Sluigi	enum txrx t;
1539259412Sluigi
1540285349Sluigi	ntot = 0;
1541285349Sluigi	for_rx_tx(t) {
1542285349Sluigi		/* account for the (eventually fake) host rings */
1543285349Sluigi		n[t] = nma_get_nrings(na, t) + 1;
1544285349Sluigi		ntot += n[t];
1545285349Sluigi	}
1546234228Sluigi	/*
1547259412Sluigi	 * the descriptor is followed inline by an array of offsets
1548259412Sluigi	 * to the tx and rx rings in the shared memory region.
1549259412Sluigi	 */
1550259412Sluigi
1551259412Sluigi	NMA_LOCK(na->nm_mem);
1552259412Sluigi
1553285349Sluigi	len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
1554259412Sluigi	nifp = netmap_if_malloc(na->nm_mem, len);
1555259412Sluigi	if (nifp == NULL) {
1556259412Sluigi		NMA_UNLOCK(na->nm_mem);
1557259412Sluigi		return NULL;
1558259412Sluigi	}
1559259412Sluigi
1560259412Sluigi	/* initialize base fields -- override const */
1561259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1562259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1563270063Sluigi	strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ);
1564259412Sluigi
1565259412Sluigi	/*
1566234228Sluigi	 * fill the slots for the rx and tx rings. They contain the offset
1567234228Sluigi	 * between the ring and nifp, so the information is usable in
1568234228Sluigi	 * userspace to reach the ring from the nifp.
1569234228Sluigi	 */
1570257529Sluigi	base = netmap_if_offset(na->nm_mem, nifp);
1571285349Sluigi	for (i = 0; i < n[NR_TX]; i++) {
1572234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1573257529Sluigi			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1574234228Sluigi	}
1575285349Sluigi	for (i = 0; i < n[NR_RX]; i++) {
1576285349Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] =
1577257529Sluigi			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1578234228Sluigi	}
1579257529Sluigi
1580257529Sluigi	NMA_UNLOCK(na->nm_mem);
1581257529Sluigi
1582234228Sluigi	return (nifp);
1583234228Sluigi}
1584234228Sluigi
1585285349Sluigistatic void
1586285349Sluiginetmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1587257529Sluigi{
1588257529Sluigi	if (nifp == NULL)
1589257529Sluigi		/* nothing to do */
1590257529Sluigi		return;
1591257529Sluigi	NMA_LOCK(na->nm_mem);
1592261909Sluigi	if (nifp->ni_bufs_head)
1593261909Sluigi		netmap_extra_free(na, nifp->ni_bufs_head);
1594257529Sluigi	netmap_if_free(na->nm_mem, nifp);
1595257529Sluigi
1596257529Sluigi	NMA_UNLOCK(na->nm_mem);
1597257529Sluigi}
1598257529Sluigi
1599234228Sluigistatic void
1600257529Sluiginetmap_mem_global_deref(struct netmap_mem_d *nmd)
1601234228Sluigi{
1602257529Sluigi
1603285349Sluigi	nmd->active--;
1604285349Sluigi	if (!nmd->active)
1605270063Sluigi		nmd->nm_grp = -1;
1606245835Sluigi	if (netmap_verbose)
1607285349Sluigi		D("active = %d", nmd->active);
1608257529Sluigi
1609234228Sluigi}
1610257529Sluigi
1611285349Sluigistruct netmap_mem_ops netmap_mem_global_ops = {
1612285349Sluigi	.nmd_get_lut = netmap_mem2_get_lut,
1613285349Sluigi	.nmd_get_info = netmap_mem2_get_info,
1614285349Sluigi	.nmd_ofstophys = netmap_mem2_ofstophys,
1615285349Sluigi	.nmd_config = netmap_mem_global_config,
1616285349Sluigi	.nmd_finalize = netmap_mem_global_finalize,
1617285349Sluigi	.nmd_deref = netmap_mem_global_deref,
1618285349Sluigi	.nmd_delete = netmap_mem_global_delete,
1619285349Sluigi	.nmd_if_offset = netmap_mem2_if_offset,
1620285349Sluigi	.nmd_if_new = netmap_mem2_if_new,
1621285349Sluigi	.nmd_if_delete = netmap_mem2_if_delete,
1622285349Sluigi	.nmd_rings_create = netmap_mem2_rings_create,
1623285349Sluigi	.nmd_rings_delete = netmap_mem2_rings_delete
1624285349Sluigi};
1625285349Sluigistruct netmap_mem_ops netmap_mem_private_ops = {
1626285349Sluigi	.nmd_get_lut = netmap_mem2_get_lut,
1627285349Sluigi	.nmd_get_info = netmap_mem2_get_info,
1628285349Sluigi	.nmd_ofstophys = netmap_mem2_ofstophys,
1629285349Sluigi	.nmd_config = netmap_mem_private_config,
1630285349Sluigi	.nmd_finalize = netmap_mem_private_finalize,
1631285349Sluigi	.nmd_deref = netmap_mem_private_deref,
1632285349Sluigi	.nmd_if_offset = netmap_mem2_if_offset,
1633285349Sluigi	.nmd_delete = netmap_mem_private_delete,
1634285349Sluigi	.nmd_if_new = netmap_mem2_if_new,
1635285349Sluigi	.nmd_if_delete = netmap_mem2_if_delete,
1636285349Sluigi	.nmd_rings_create = netmap_mem2_rings_create,
1637285349Sluigi	.nmd_rings_delete = netmap_mem2_rings_delete
1638285349Sluigi};
1639