1234228Sluigi/*
2262151Sluigi * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3234228Sluigi *
4234228Sluigi * Redistribution and use in source and binary forms, with or without
5234228Sluigi * modification, are permitted provided that the following conditions
6234228Sluigi * are met:
7234228Sluigi *   1. Redistributions of source code must retain the above copyright
8234228Sluigi *      notice, this list of conditions and the following disclaimer.
9234228Sluigi *   2. Redistributions in binary form must reproduce the above copyright
10234228Sluigi *      notice, this list of conditions and the following disclaimer in the
11262151Sluigi *      documentation and/or other materials provided with the distribution.
12234228Sluigi *
13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16234228Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23234228Sluigi * SUCH DAMAGE.
24234228Sluigi */
25234228Sluigi
26262151Sluigi#ifdef linux
27262151Sluigi#include "bsd_glue.h"
28262151Sluigi#endif /* linux */
29234228Sluigi
30262151Sluigi#ifdef __APPLE__
31262151Sluigi#include "osx_glue.h"
32262151Sluigi#endif /* __APPLE__ */
33234228Sluigi
34262151Sluigi#ifdef __FreeBSD__
35262151Sluigi#include <sys/cdefs.h> /* prerequisite */
36262151Sluigi__FBSDID("$FreeBSD$");
37234228Sluigi
38262151Sluigi#include <sys/types.h>
39262151Sluigi#include <sys/malloc.h>
40262151Sluigi#include <sys/proc.h>
41262151Sluigi#include <vm/vm.h>	/* vtophys */
42262151Sluigi#include <vm/pmap.h>	/* vtophys */
43262151Sluigi#include <sys/socket.h> /* sockaddrs */
44262151Sluigi#include <sys/selinfo.h>
45262151Sluigi#include <sys/sysctl.h>
46262151Sluigi#include <net/if.h>
47262151Sluigi#include <net/if_var.h>
48262151Sluigi#include <net/vnet.h>
49262151Sluigi#include <machine/bus.h>	/* bus_dmamap_* */
50262151Sluigi
51262151Sluigi#endif /* __FreeBSD__ */
52262151Sluigi
53262151Sluigi#include <net/netmap.h>
54262151Sluigi#include <dev/netmap/netmap_kern.h>
55262151Sluigi#include "netmap_mem2.h"
56262151Sluigi
57270252Sluigi#define NETMAP_BUF_MAX_NUM	20*4096*2	/* large machine */
58270252Sluigi
59270252Sluigi#define NETMAP_POOL_MAX_NAMSZ	32
60270252Sluigi
61270252Sluigi
62270252Sluigienum {
63270252Sluigi	NETMAP_IF_POOL   = 0,
64270252Sluigi	NETMAP_RING_POOL,
65270252Sluigi	NETMAP_BUF_POOL,
66270252Sluigi	NETMAP_POOLS_NR
67270252Sluigi};
68270252Sluigi
69270252Sluigi
70270252Sluigistruct netmap_obj_params {
71270252Sluigi	u_int size;
72270252Sluigi	u_int num;
73270252Sluigi};
74270252Sluigistruct netmap_obj_pool {
75270252Sluigi	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
76270252Sluigi
77270252Sluigi	/* ---------------------------------------------------*/
78270252Sluigi	/* these are only meaningful if the pool is finalized */
79270252Sluigi	/* (see 'finalized' field in netmap_mem_d)            */
80270252Sluigi	u_int objtotal;         /* actual total number of objects. */
81270252Sluigi	u_int memtotal;		/* actual total memory space */
82270252Sluigi	u_int numclusters;	/* actual number of clusters */
83270252Sluigi
84270252Sluigi	u_int objfree;          /* number of free objects. */
85270252Sluigi
86270252Sluigi	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
87270252Sluigi	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
88270252Sluigi	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
89270252Sluigi	/* ---------------------------------------------------*/
90270252Sluigi
91270252Sluigi	/* limits */
92270252Sluigi	u_int objminsize;	/* minimum object size */
93270252Sluigi	u_int objmaxsize;	/* maximum object size */
94270252Sluigi	u_int nummin;		/* minimum number of objects */
95270252Sluigi	u_int nummax;		/* maximum number of objects */
96270252Sluigi
97270252Sluigi	/* these are changed only by config */
98270252Sluigi	u_int _objtotal;	/* total number of objects */
99270252Sluigi	u_int _objsize;		/* object size */
100270252Sluigi	u_int _clustsize;       /* cluster size */
101270252Sluigi	u_int _clustentries;    /* objects per cluster */
102270252Sluigi	u_int _numclusters;	/* number of clusters */
103270252Sluigi
104270252Sluigi	/* requested values */
105270252Sluigi	u_int r_objtotal;
106270252Sluigi	u_int r_objsize;
107270252Sluigi};
108270252Sluigi
109241719Sluigi#ifdef linux
110270252Sluigi// XXX a mtx would suffice here 20130415 lr
111270252Sluigi#define NMA_LOCK_T		struct semaphore
112270252Sluigi#else /* !linux */
113270252Sluigi#define NMA_LOCK_T		struct mtx
114270252Sluigi#endif /* linux */
115270252Sluigi
116270252Sluigitypedef int (*netmap_mem_config_t)(struct netmap_mem_d*);
117270252Sluigitypedef int (*netmap_mem_finalize_t)(struct netmap_mem_d*);
118270252Sluigitypedef void (*netmap_mem_deref_t)(struct netmap_mem_d*);
119270252Sluigi
120270252Sluigitypedef uint16_t nm_memid_t;
121270252Sluigi
122270252Sluigistruct netmap_mem_d {
123270252Sluigi	NMA_LOCK_T nm_mtx;  /* protect the allocator */
124270252Sluigi	u_int nm_totalsize; /* shorthand */
125270252Sluigi
126270252Sluigi	u_int flags;
127270252Sluigi#define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
128270252Sluigi	int lasterr;		/* last error for curr config */
129270252Sluigi	int refcount;		/* existing priv structures */
130270252Sluigi	/* the three allocators */
131270252Sluigi	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
132270252Sluigi
133270252Sluigi	netmap_mem_config_t   config;
134270252Sluigi	netmap_mem_finalize_t finalize;
135270252Sluigi	netmap_mem_deref_t    deref;
136270252Sluigi
137270252Sluigi	nm_memid_t nm_id;	/* allocator identifier */
138270252Sluigi	int nm_grp;	/* iommu groupd id */
139270252Sluigi
140270252Sluigi	/* list of all existing allocators, sorted by nm_id */
141270252Sluigi	struct netmap_mem_d *prev, *next;
142270252Sluigi};
143270252Sluigi
144270252Sluigi/* accessor functions */
145270252Sluigistruct lut_entry*
146270252Sluiginetmap_mem_get_lut(struct netmap_mem_d *nmd)
147270252Sluigi{
148270252Sluigi	return nmd->pools[NETMAP_BUF_POOL].lut;
149270252Sluigi}
150270252Sluigi
151270252Sluigiu_int
152270252Sluiginetmap_mem_get_buftotal(struct netmap_mem_d *nmd)
153270252Sluigi{
154270252Sluigi	return nmd->pools[NETMAP_BUF_POOL].objtotal;
155270252Sluigi}
156270252Sluigi
157270252Sluigisize_t
158270252Sluiginetmap_mem_get_bufsize(struct netmap_mem_d *nmd)
159270252Sluigi{
160270252Sluigi	return nmd->pools[NETMAP_BUF_POOL]._objsize;
161270252Sluigi}
162270252Sluigi
163270252Sluigi#ifdef linux
164262151Sluigi#define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
165262151Sluigi#define NMA_LOCK_DESTROY(n)
166262151Sluigi#define NMA_LOCK(n)		down(&(n)->nm_mtx)
167262151Sluigi#define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
168241719Sluigi#else /* !linux */
169262151Sluigi#define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
170262151Sluigi#define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
171262151Sluigi#define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
172262151Sluigi#define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
173241719Sluigi#endif /* linux */
174234228Sluigi
175241719Sluigi
176241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
177241719Sluigi	[NETMAP_IF_POOL] = {
178241719Sluigi		.size = 1024,
179241719Sluigi		.num  = 100,
180241719Sluigi	},
181241719Sluigi	[NETMAP_RING_POOL] = {
182241719Sluigi		.size = 9*PAGE_SIZE,
183241719Sluigi		.num  = 200,
184241719Sluigi	},
185241719Sluigi	[NETMAP_BUF_POOL] = {
186241719Sluigi		.size = 2048,
187241719Sluigi		.num  = NETMAP_BUF_MAX_NUM,
188241719Sluigi	},
189241719Sluigi};
190241719Sluigi
191262151Sluigistruct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
192262151Sluigi	[NETMAP_IF_POOL] = {
193262151Sluigi		.size = 1024,
194262151Sluigi		.num  = 1,
195262151Sluigi	},
196262151Sluigi	[NETMAP_RING_POOL] = {
197262151Sluigi		.size = 5*PAGE_SIZE,
198262151Sluigi		.num  = 4,
199262151Sluigi	},
200262151Sluigi	[NETMAP_BUF_POOL] = {
201262151Sluigi		.size = 2048,
202262151Sluigi		.num  = 4098,
203262151Sluigi	},
204234228Sluigi};
205234228Sluigi
206241719Sluigi
207249659Sluigi/*
208249659Sluigi * nm_mem is the memory allocator used for all physical interfaces
209249659Sluigi * running in netmap mode.
210249659Sluigi * Virtual (VALE) ports will have each its own allocator.
211249659Sluigi */
212262151Sluigistatic int netmap_mem_global_config(struct netmap_mem_d *nmd);
213262151Sluigistatic int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
214262151Sluigistatic void netmap_mem_global_deref(struct netmap_mem_d *nmd);
215262151Sluigistruct netmap_mem_d nm_mem = {	/* Our memory allocator. */
216241719Sluigi	.pools = {
217241719Sluigi		[NETMAP_IF_POOL] = {
218241719Sluigi			.name 	= "netmap_if",
219241719Sluigi			.objminsize = sizeof(struct netmap_if),
220241719Sluigi			.objmaxsize = 4096,
221241719Sluigi			.nummin     = 10,	/* don't be stingy */
222241719Sluigi			.nummax	    = 10000,	/* XXX very large */
223241719Sluigi		},
224241719Sluigi		[NETMAP_RING_POOL] = {
225241719Sluigi			.name 	= "netmap_ring",
226241719Sluigi			.objminsize = sizeof(struct netmap_ring),
227241719Sluigi			.objmaxsize = 32*PAGE_SIZE,
228241719Sluigi			.nummin     = 2,
229241719Sluigi			.nummax	    = 1024,
230241719Sluigi		},
231241719Sluigi		[NETMAP_BUF_POOL] = {
232241719Sluigi			.name	= "netmap_buf",
233241719Sluigi			.objminsize = 64,
234241719Sluigi			.objmaxsize = 65536,
235241719Sluigi			.nummin     = 4,
236241719Sluigi			.nummax	    = 1000000, /* one million! */
237241719Sluigi		},
238241719Sluigi	},
239262151Sluigi	.config   = netmap_mem_global_config,
240262151Sluigi	.finalize = netmap_mem_global_finalize,
241262151Sluigi	.deref    = netmap_mem_global_deref,
242262151Sluigi
243262151Sluigi	.nm_id = 1,
244270252Sluigi	.nm_grp = -1,
245262151Sluigi
246262151Sluigi	.prev = &nm_mem,
247262151Sluigi	.next = &nm_mem,
248241719Sluigi};
249241719Sluigi
250262151Sluigi
251262151Sluigistruct netmap_mem_d *netmap_last_mem_d = &nm_mem;
252262151Sluigi
253262151Sluigi/* blueprint for the private memory allocators */
254262151Sluigistatic int netmap_mem_private_config(struct netmap_mem_d *nmd);
255262151Sluigistatic int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
256262151Sluigistatic void netmap_mem_private_deref(struct netmap_mem_d *nmd);
257262151Sluigiconst struct netmap_mem_d nm_blueprint = {
258262151Sluigi	.pools = {
259262151Sluigi		[NETMAP_IF_POOL] = {
260262151Sluigi			.name 	= "%s_if",
261262151Sluigi			.objminsize = sizeof(struct netmap_if),
262262151Sluigi			.objmaxsize = 4096,
263262151Sluigi			.nummin     = 1,
264262151Sluigi			.nummax	    = 100,
265262151Sluigi		},
266262151Sluigi		[NETMAP_RING_POOL] = {
267262151Sluigi			.name 	= "%s_ring",
268262151Sluigi			.objminsize = sizeof(struct netmap_ring),
269262151Sluigi			.objmaxsize = 32*PAGE_SIZE,
270262151Sluigi			.nummin     = 2,
271262151Sluigi			.nummax	    = 1024,
272262151Sluigi		},
273262151Sluigi		[NETMAP_BUF_POOL] = {
274262151Sluigi			.name	= "%s_buf",
275262151Sluigi			.objminsize = 64,
276262151Sluigi			.objmaxsize = 65536,
277262151Sluigi			.nummin     = 4,
278262151Sluigi			.nummax	    = 1000000, /* one million! */
279262151Sluigi		},
280262151Sluigi	},
281262151Sluigi	.config   = netmap_mem_private_config,
282262151Sluigi	.finalize = netmap_mem_private_finalize,
283262151Sluigi	.deref    = netmap_mem_private_deref,
284262151Sluigi
285262151Sluigi	.flags = NETMAP_MEM_PRIVATE,
286262151Sluigi};
287262151Sluigi
288241719Sluigi/* memory allocator related sysctls */
289234228Sluigi
290241719Sluigi#define STRINGIFY(x) #x
291241719Sluigi
292262151Sluigi
293241719Sluigi#define DECLARE_SYSCTLS(id, name) \
294241719Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
295241719Sluigi	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
296262151Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
297262151Sluigi	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
298262151Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
299262151Sluigi	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
300262151Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
301262151Sluigi	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
302262151Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
303262151Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
304262151Sluigi	    "Default size of private netmap " STRINGIFY(name) "s"); \
305262151Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
306262151Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
307262151Sluigi	    "Default number of private netmap " STRINGIFY(name) "s")
308241719Sluigi
309262151SluigiSYSCTL_DECL(_dev_netmap);
310241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if);
311241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
312241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
313241719Sluigi
314262151Sluigistatic int
315262151Sluiginm_mem_assign_id(struct netmap_mem_d *nmd)
316262151Sluigi{
317262151Sluigi	nm_memid_t id;
318262151Sluigi	struct netmap_mem_d *scan = netmap_last_mem_d;
319262151Sluigi	int error = ENOMEM;
320262151Sluigi
321262151Sluigi	NMA_LOCK(&nm_mem);
322262151Sluigi
323262151Sluigi	do {
324262151Sluigi		/* we rely on unsigned wrap around */
325262151Sluigi		id = scan->nm_id + 1;
326262151Sluigi		if (id == 0) /* reserve 0 as error value */
327262151Sluigi			id = 1;
328262151Sluigi		scan = scan->next;
329262151Sluigi		if (id != scan->nm_id) {
330262151Sluigi			nmd->nm_id = id;
331262151Sluigi			nmd->prev = scan->prev;
332262151Sluigi			nmd->next = scan;
333262151Sluigi			scan->prev->next = nmd;
334262151Sluigi			scan->prev = nmd;
335262151Sluigi			netmap_last_mem_d = nmd;
336262151Sluigi			error = 0;
337262151Sluigi			break;
338262151Sluigi		}
339262151Sluigi	} while (scan != netmap_last_mem_d);
340262151Sluigi
341262151Sluigi	NMA_UNLOCK(&nm_mem);
342262151Sluigi	return error;
343262151Sluigi}
344262151Sluigi
345262151Sluigistatic void
346262151Sluiginm_mem_release_id(struct netmap_mem_d *nmd)
347262151Sluigi{
348262151Sluigi	NMA_LOCK(&nm_mem);
349262151Sluigi
350262151Sluigi	nmd->prev->next = nmd->next;
351262151Sluigi	nmd->next->prev = nmd->prev;
352262151Sluigi
353262151Sluigi	if (netmap_last_mem_d == nmd)
354262151Sluigi		netmap_last_mem_d = nmd->prev;
355262151Sluigi
356262151Sluigi	nmd->prev = nmd->next = NULL;
357262151Sluigi
358262151Sluigi	NMA_UNLOCK(&nm_mem);
359262151Sluigi}
360262151Sluigi
361270252Sluigistatic int
362270252Sluiginm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
363270252Sluigi{
364270252Sluigi	int err = 0, id;
365270252Sluigi	id = nm_iommu_group_id(dev);
366270252Sluigi	if (netmap_verbose)
367270252Sluigi		D("iommu_group %d", id);
368262151Sluigi
369270252Sluigi	NMA_LOCK(nmd);
370270252Sluigi
371270252Sluigi	if (nmd->nm_grp < 0)
372270252Sluigi		nmd->nm_grp = id;
373270252Sluigi
374270252Sluigi	if (nmd->nm_grp != id)
375270252Sluigi		nmd->lasterr = err = ENOMEM;
376270252Sluigi
377270252Sluigi	NMA_UNLOCK(nmd);
378270252Sluigi	return err;
379270252Sluigi}
380270252Sluigi
381234228Sluigi/*
382249659Sluigi * First, find the allocator that contains the requested offset,
383249659Sluigi * then locate the cluster through a lookup table.
384234228Sluigi */
385262151Sluigivm_paddr_t
386262151Sluiginetmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
387234228Sluigi{
388234228Sluigi	int i;
389262151Sluigi	vm_ooffset_t o = offset;
390262151Sluigi	vm_paddr_t pa;
391262151Sluigi	struct netmap_obj_pool *p;
392234228Sluigi
393262151Sluigi	NMA_LOCK(nmd);
394262151Sluigi	p = nmd->pools;
395262151Sluigi
396262151Sluigi	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
397262151Sluigi		if (offset >= p[i].memtotal)
398234228Sluigi			continue;
399249659Sluigi		// now lookup the cluster's address
400270252Sluigi		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
401241719Sluigi			offset % p[i]._objsize;
402262151Sluigi		NMA_UNLOCK(nmd);
403262151Sluigi		return pa;
404234228Sluigi	}
405241719Sluigi	/* this is only in case of errors */
406234290Sluigi	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
407262151Sluigi		p[NETMAP_IF_POOL].memtotal,
408262151Sluigi		p[NETMAP_IF_POOL].memtotal
409262151Sluigi			+ p[NETMAP_RING_POOL].memtotal,
410262151Sluigi		p[NETMAP_IF_POOL].memtotal
411262151Sluigi			+ p[NETMAP_RING_POOL].memtotal
412262151Sluigi			+ p[NETMAP_BUF_POOL].memtotal);
413262151Sluigi	NMA_UNLOCK(nmd);
414234228Sluigi	return 0;	// XXX bad address
415234228Sluigi}
416234228Sluigi
417262151Sluigiint
418262151Sluiginetmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
419262151Sluigi	nm_memid_t *id)
420262151Sluigi{
421262151Sluigi	int error = 0;
422262151Sluigi	NMA_LOCK(nmd);
423262151Sluigi	error = nmd->config(nmd);
424262151Sluigi	if (error)
425262151Sluigi		goto out;
426270252Sluigi	if (size) {
427270252Sluigi		if (nmd->flags & NETMAP_MEM_FINALIZED) {
428270252Sluigi			*size = nmd->nm_totalsize;
429270252Sluigi		} else {
430270252Sluigi			int i;
431270252Sluigi			*size = 0;
432270252Sluigi			for (i = 0; i < NETMAP_POOLS_NR; i++) {
433270252Sluigi				struct netmap_obj_pool *p = nmd->pools + i;
434270252Sluigi				*size += (p->_numclusters * p->_clustsize);
435270252Sluigi			}
436262151Sluigi		}
437262151Sluigi	}
438270252Sluigi	if (memflags)
439270252Sluigi		*memflags = nmd->flags;
440270252Sluigi	if (id)
441270252Sluigi		*id = nmd->nm_id;
442262151Sluigiout:
443262151Sluigi	NMA_UNLOCK(nmd);
444262151Sluigi	return error;
445262151Sluigi}
446262151Sluigi
447234228Sluigi/*
448234228Sluigi * we store objects by kernel address, need to find the offset
449234228Sluigi * within the pool to export the value to userspace.
450234228Sluigi * Algorithm: scan until we find the cluster, then add the
451234228Sluigi * actual offset in the cluster
452234228Sluigi */
453234242Sluigistatic ssize_t
454234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
455234228Sluigi{
456262151Sluigi	int i, k = p->_clustentries, n = p->objtotal;
457234228Sluigi	ssize_t ofs = 0;
458234228Sluigi
459234228Sluigi	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
460234228Sluigi		const char *base = p->lut[i].vaddr;
461234228Sluigi		ssize_t relofs = (const char *) vaddr - base;
462234228Sluigi
463249504Sluigi		if (relofs < 0 || relofs >= p->_clustsize)
464234228Sluigi			continue;
465234228Sluigi
466234228Sluigi		ofs = ofs + relofs;
467234228Sluigi		ND("%s: return offset %d (cluster %d) for pointer %p",
468234228Sluigi		    p->name, ofs, i, vaddr);
469234228Sluigi		return ofs;
470234228Sluigi	}
471234228Sluigi	D("address %p is not contained inside any cluster (%s)",
472234228Sluigi	    vaddr, p->name);
473234228Sluigi	return 0; /* An error occurred */
474234228Sluigi}
475234228Sluigi
476234228Sluigi/* Helper functions which convert virtual addresses to offsets */
477262151Sluigi#define netmap_if_offset(n, v)					\
478262151Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
479234228Sluigi
480262151Sluigi#define netmap_ring_offset(n, v)				\
481262151Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
482262151Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
483234228Sluigi
484262151Sluigi#define netmap_buf_offset(n, v)					\
485262151Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal +			\
486262151Sluigi	(n)->pools[NETMAP_RING_POOL].memtotal +		\
487262151Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
488234228Sluigi
489234228Sluigi
490262151Sluigissize_t
491262151Sluiginetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
492262151Sluigi{
493262151Sluigi	ssize_t v;
494262151Sluigi	NMA_LOCK(nmd);
495262151Sluigi	v = netmap_if_offset(nmd, addr);
496262151Sluigi	NMA_UNLOCK(nmd);
497262151Sluigi	return v;
498262151Sluigi}
499262151Sluigi
500241719Sluigi/*
501241719Sluigi * report the index, and use start position as a hint,
502241719Sluigi * otherwise buffer allocation becomes terribly expensive.
503241719Sluigi */
504234228Sluigistatic void *
505262151Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
506234228Sluigi{
507234228Sluigi	uint32_t i = 0;			/* index in the bitmap */
508234228Sluigi	uint32_t mask, j;		/* slot counter */
509234228Sluigi	void *vaddr = NULL;
510234228Sluigi
511234228Sluigi	if (len > p->_objsize) {
512234228Sluigi		D("%s request size %d too large", p->name, len);
513234228Sluigi		// XXX cannot reduce the size
514234228Sluigi		return NULL;
515234228Sluigi	}
516234228Sluigi
517234228Sluigi	if (p->objfree == 0) {
518262151Sluigi		D("no more %s objects", p->name);
519234228Sluigi		return NULL;
520234228Sluigi	}
521241719Sluigi	if (start)
522241719Sluigi		i = *start;
523234228Sluigi
524241719Sluigi	/* termination is guaranteed by p->free, but better check bounds on i */
525241719Sluigi	while (vaddr == NULL && i < p->bitmap_slots)  {
526234228Sluigi		uint32_t cur = p->bitmap[i];
527234228Sluigi		if (cur == 0) { /* bitmask is fully used */
528234228Sluigi			i++;
529234228Sluigi			continue;
530234228Sluigi		}
531234228Sluigi		/* locate a slot */
532234228Sluigi		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
533234228Sluigi			;
534234228Sluigi
535234228Sluigi		p->bitmap[i] &= ~mask; /* mark object as in use */
536234228Sluigi		p->objfree--;
537234228Sluigi
538234228Sluigi		vaddr = p->lut[i * 32 + j].vaddr;
539241719Sluigi		if (index)
540241719Sluigi			*index = i * 32 + j;
541234228Sluigi	}
542234228Sluigi	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
543234228Sluigi
544241719Sluigi	if (start)
545241719Sluigi		*start = i;
546234228Sluigi	return vaddr;
547234228Sluigi}
548234228Sluigi
549234228Sluigi
550234228Sluigi/*
551262151Sluigi * free by index, not by address.
552262151Sluigi * XXX should we also cleanup the content ?
553234228Sluigi */
554262151Sluigistatic int
555234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
556234228Sluigi{
557262151Sluigi	uint32_t *ptr, mask;
558262151Sluigi
559234228Sluigi	if (j >= p->objtotal) {
560234228Sluigi		D("invalid index %u, max %u", j, p->objtotal);
561262151Sluigi		return 1;
562234228Sluigi	}
563262151Sluigi	ptr = &p->bitmap[j / 32];
564262151Sluigi	mask = (1 << (j % 32));
565262151Sluigi	if (*ptr & mask) {
566262151Sluigi		D("ouch, double free on buffer %d", j);
567262151Sluigi		return 1;
568262151Sluigi	} else {
569262151Sluigi		*ptr |= mask;
570262151Sluigi		p->objfree++;
571262151Sluigi		return 0;
572262151Sluigi	}
573234228Sluigi}
574234228Sluigi
575262151Sluigi/*
576262151Sluigi * free by address. This is slow but is only used for a few
577262151Sluigi * objects (rings, nifp)
578262151Sluigi */
579234228Sluigistatic void
580234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
581234228Sluigi{
582262151Sluigi	u_int i, j, n = p->numclusters;
583234228Sluigi
584262151Sluigi	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
585262151Sluigi		void *base = p->lut[i * p->_clustentries].vaddr;
586234228Sluigi		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
587234228Sluigi
588234228Sluigi		/* Given address, is out of the scope of the current cluster.*/
589250441Sluigi		if (vaddr < base || relofs >= p->_clustsize)
590234228Sluigi			continue;
591234228Sluigi
592234228Sluigi		j = j + relofs / p->_objsize;
593262151Sluigi		/* KASSERT(j != 0, ("Cannot free object 0")); */
594234228Sluigi		netmap_obj_free(p, j);
595234228Sluigi		return;
596234228Sluigi	}
597245835Sluigi	D("address %p is not contained inside any cluster (%s)",
598234228Sluigi	    vaddr, p->name);
599234228Sluigi}
600234228Sluigi
601270252Sluigi#define netmap_mem_bufsize(n)	\
602270252Sluigi	((n)->pools[NETMAP_BUF_POOL]._objsize)
603270252Sluigi
604262151Sluigi#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
605262151Sluigi#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
606262151Sluigi#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
607262151Sluigi#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
608262151Sluigi#define netmap_buf_malloc(n, _pos, _index)			\
609270252Sluigi	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
610234228Sluigi
611234228Sluigi
612262151Sluigi#if 0 // XXX unused
613234228Sluigi/* Return the index associated to the given packet buffer */
614262151Sluigi#define netmap_buf_index(n, v)						\
615262151Sluigi    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
616262151Sluigi#endif
617234228Sluigi
618262151Sluigi/*
619262151Sluigi * allocate extra buffers in a linked list.
620262151Sluigi * returns the actual number.
621262151Sluigi */
622262151Sluigiuint32_t
623262151Sluiginetmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
624262151Sluigi{
625262151Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
626262151Sluigi	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
627234228Sluigi
628262151Sluigi	NMA_LOCK(nmd);
629262151Sluigi
630262151Sluigi	*head = 0;	/* default, 'null' index ie empty list */
631262151Sluigi	for (i = 0 ; i < n; i++) {
632262151Sluigi		uint32_t cur = *head;	/* save current head */
633262151Sluigi		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
634262151Sluigi		if (p == NULL) {
635262151Sluigi			D("no more buffers after %d of %d", i, n);
636262151Sluigi			*head = cur; /* restore */
637262151Sluigi			break;
638262151Sluigi		}
639262151Sluigi		RD(5, "allocate buffer %d -> %d", *head, cur);
640262151Sluigi		*p = cur; /* link to previous head */
641262151Sluigi	}
642262151Sluigi
643262151Sluigi	NMA_UNLOCK(nmd);
644262151Sluigi
645262151Sluigi	return i;
646262151Sluigi}
647262151Sluigi
648262151Sluigistatic void
649262151Sluiginetmap_extra_free(struct netmap_adapter *na, uint32_t head)
650262151Sluigi{
651262151Sluigi        struct lut_entry *lut = na->na_lut;
652262151Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
653262151Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
654262151Sluigi	uint32_t i, cur, *buf;
655262151Sluigi
656262151Sluigi	D("freeing the extra list");
657262151Sluigi	for (i = 0; head >=2 && head < p->objtotal; i++) {
658262151Sluigi		cur = head;
659262151Sluigi		buf = lut[head].vaddr;
660262151Sluigi		head = *buf;
661262151Sluigi		*buf = 0;
662262151Sluigi		if (netmap_obj_free(p, cur))
663262151Sluigi			break;
664262151Sluigi	}
665262151Sluigi	if (head != 0)
666262151Sluigi		D("breaking with head %d", head);
667262151Sluigi	D("freed %d buffers", i);
668262151Sluigi}
669262151Sluigi
670262151Sluigi
671241719Sluigi/* Return nonzero on error */
672241719Sluigistatic int
673262151Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
674234228Sluigi{
675262151Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
676262151Sluigi	u_int i = 0;	/* slot counter */
677241719Sluigi	uint32_t pos = 0;	/* slot in p->bitmap */
678241719Sluigi	uint32_t index = 0;	/* buffer index */
679234228Sluigi
680234228Sluigi	for (i = 0; i < n; i++) {
681262151Sluigi		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
682234228Sluigi		if (vaddr == NULL) {
683262151Sluigi			D("no more buffers after %d of %d", i, n);
684234228Sluigi			goto cleanup;
685234228Sluigi		}
686241719Sluigi		slot[i].buf_idx = index;
687234228Sluigi		slot[i].len = p->_objsize;
688262151Sluigi		slot[i].flags = 0;
689234228Sluigi	}
690234228Sluigi
691241719Sluigi	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
692241719Sluigi	return (0);
693234228Sluigi
694234228Sluigicleanup:
695241643Semaste	while (i > 0) {
696241643Semaste		i--;
697241719Sluigi		netmap_obj_free(p, slot[i].buf_idx);
698234228Sluigi	}
699241719Sluigi	bzero(slot, n * sizeof(slot[0]));
700241719Sluigi	return (ENOMEM);
701234228Sluigi}
702234228Sluigi
703262151Sluigistatic void
704262151Sluiginetmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
705262151Sluigi{
706262151Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
707262151Sluigi	u_int i;
708234228Sluigi
709262151Sluigi	for (i = 0; i < n; i++) {
710262151Sluigi		slot[i].buf_idx = index;
711262151Sluigi		slot[i].len = p->_objsize;
712262151Sluigi		slot[i].flags = 0;
713262151Sluigi	}
714262151Sluigi}
715262151Sluigi
716262151Sluigi
717234228Sluigistatic void
718262151Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
719234228Sluigi{
720262151Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
721241719Sluigi
722234228Sluigi	if (i < 2 || i >= p->objtotal) {
723234228Sluigi		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
724234228Sluigi		return;
725234228Sluigi	}
726241719Sluigi	netmap_obj_free(p, i);
727234228Sluigi}
728234228Sluigi
729262151Sluigi
730234228Sluigistatic void
731262151Sluiginetmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
732262151Sluigi{
733262151Sluigi	u_int i;
734262151Sluigi
735262151Sluigi	for (i = 0; i < n; i++) {
736262151Sluigi		if (slot[i].buf_idx > 2)
737262151Sluigi			netmap_free_buf(nmd, slot[i].buf_idx);
738262151Sluigi	}
739262151Sluigi}
740262151Sluigi
741262151Sluigistatic void
742241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p)
743234228Sluigi{
744262151Sluigi
745234228Sluigi	if (p == NULL)
746234228Sluigi		return;
747234228Sluigi	if (p->bitmap)
748234228Sluigi		free(p->bitmap, M_NETMAP);
749241719Sluigi	p->bitmap = NULL;
750234228Sluigi	if (p->lut) {
751262151Sluigi		u_int i;
752262151Sluigi		size_t sz = p->_clustsize;
753262151Sluigi
754262151Sluigi		for (i = 0; i < p->objtotal; i += p->_clustentries) {
755234228Sluigi			if (p->lut[i].vaddr)
756262151Sluigi				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
757234228Sluigi		}
758234228Sluigi		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
759241719Sluigi#ifdef linux
760241719Sluigi		vfree(p->lut);
761241719Sluigi#else
762234228Sluigi		free(p->lut, M_NETMAP);
763241719Sluigi#endif
764234228Sluigi	}
765241719Sluigi	p->lut = NULL;
766262151Sluigi	p->objtotal = 0;
767262151Sluigi	p->memtotal = 0;
768262151Sluigi	p->numclusters = 0;
769262151Sluigi	p->objfree = 0;
770234228Sluigi}
771234228Sluigi
772234228Sluigi/*
773241719Sluigi * Free all resources related to an allocator.
774241719Sluigi */
775241719Sluigistatic void
776241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p)
777241719Sluigi{
778241719Sluigi	if (p == NULL)
779241719Sluigi		return;
780241719Sluigi	netmap_reset_obj_allocator(p);
781241719Sluigi}
782241719Sluigi
783241719Sluigi/*
784234228Sluigi * We receive a request for objtotal objects, of size objsize each.
785234228Sluigi * Internally we may round up both numbers, as we allocate objects
786234228Sluigi * in small clusters multiple of the page size.
787262151Sluigi * We need to keep track of objtotal and clustentries,
788234228Sluigi * as they are needed when freeing memory.
789234228Sluigi *
790234228Sluigi * XXX note -- userspace needs the buffers to be contiguous,
791234228Sluigi *	so we cannot afford gaps at the end of a cluster.
792234228Sluigi */
793241719Sluigi
794241719Sluigi
795241719Sluigi/* call with NMA_LOCK held */
796241719Sluigistatic int
797241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
798234228Sluigi{
799262151Sluigi	int i;
800234228Sluigi	u_int clustsize;	/* the cluster size, multiple of page size */
801234228Sluigi	u_int clustentries;	/* how many objects per entry */
802234228Sluigi
803262151Sluigi	/* we store the current request, so we can
804262151Sluigi	 * detect configuration changes later */
805262151Sluigi	p->r_objtotal = objtotal;
806262151Sluigi	p->r_objsize = objsize;
807262151Sluigi
808270252Sluigi#define MAX_CLUSTSIZE	(1<<22)		// 4 MB
809262151Sluigi#define LINE_ROUND	NM_CACHE_ALIGN	// 64
810234228Sluigi	if (objsize >= MAX_CLUSTSIZE) {
811234228Sluigi		/* we could do it but there is no point */
812234228Sluigi		D("unsupported allocation for %d bytes", objsize);
813262151Sluigi		return EINVAL;
814234228Sluigi	}
815234228Sluigi	/* make sure objsize is a multiple of LINE_ROUND */
816234228Sluigi	i = (objsize & (LINE_ROUND - 1));
817234228Sluigi	if (i) {
818234228Sluigi		D("XXX aligning object by %d bytes", LINE_ROUND - i);
819234228Sluigi		objsize += LINE_ROUND - i;
820234228Sluigi	}
821241719Sluigi	if (objsize < p->objminsize || objsize > p->objmaxsize) {
822250184Sluigi		D("requested objsize %d out of range [%d, %d]",
823241719Sluigi			objsize, p->objminsize, p->objmaxsize);
824262151Sluigi		return EINVAL;
825241719Sluigi	}
826241719Sluigi	if (objtotal < p->nummin || objtotal > p->nummax) {
827250184Sluigi		D("requested objtotal %d out of range [%d, %d]",
828241719Sluigi			objtotal, p->nummin, p->nummax);
829262151Sluigi		return EINVAL;
830241719Sluigi	}
831234228Sluigi	/*
832234228Sluigi	 * Compute number of objects using a brute-force approach:
833234228Sluigi	 * given a max cluster size,
834234228Sluigi	 * we try to fill it with objects keeping track of the
835234228Sluigi	 * wasted space to the next page boundary.
836234228Sluigi	 */
837234228Sluigi	for (clustentries = 0, i = 1;; i++) {
838234228Sluigi		u_int delta, used = i * objsize;
839234228Sluigi		if (used > MAX_CLUSTSIZE)
840234228Sluigi			break;
841234228Sluigi		delta = used % PAGE_SIZE;
842234228Sluigi		if (delta == 0) { // exact solution
843234228Sluigi			clustentries = i;
844234228Sluigi			break;
845234228Sluigi		}
846234228Sluigi	}
847270252Sluigi	/* exact solution not found */
848270252Sluigi	if (clustentries == 0) {
849270252Sluigi		D("unsupported allocation for %d bytes", objsize);
850270252Sluigi		return EINVAL;
851270252Sluigi	}
852270252Sluigi	/* compute clustsize */
853234228Sluigi	clustsize = clustentries * objsize;
854245835Sluigi	if (netmap_verbose)
855245835Sluigi		D("objsize %d clustsize %d objects %d",
856245835Sluigi			objsize, clustsize, clustentries);
857234228Sluigi
858234228Sluigi	/*
859234228Sluigi	 * The number of clusters is n = ceil(objtotal/clustentries)
860234228Sluigi	 * objtotal' = n * clustentries
861234228Sluigi	 */
862262151Sluigi	p->_clustentries = clustentries;
863234228Sluigi	p->_clustsize = clustsize;
864262151Sluigi	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
865262151Sluigi
866262151Sluigi	/* actual values (may be larger than requested) */
867234228Sluigi	p->_objsize = objsize;
868262151Sluigi	p->_objtotal = p->_numclusters * clustentries;
869234228Sluigi
870241719Sluigi	return 0;
871241719Sluigi}
872241719Sluigi
873241719Sluigi
874241719Sluigi/* call with NMA_LOCK held */
875241719Sluigistatic int
876241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p)
877241719Sluigi{
878262151Sluigi	int i; /* must be signed */
879262151Sluigi	size_t n;
880241719Sluigi
881262151Sluigi	/* optimistically assume we have enough memory */
882262151Sluigi	p->numclusters = p->_numclusters;
883262151Sluigi	p->objtotal = p->_objtotal;
884262151Sluigi
885241719Sluigi	n = sizeof(struct lut_entry) * p->objtotal;
886241719Sluigi#ifdef linux
887241719Sluigi	p->lut = vmalloc(n);
888241719Sluigi#else
889241750Semaste	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
890241719Sluigi#endif
891234228Sluigi	if (p->lut == NULL) {
892262151Sluigi		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
893234228Sluigi		goto clean;
894234228Sluigi	}
895234228Sluigi
896234228Sluigi	/* Allocate the bitmap */
897234228Sluigi	n = (p->objtotal + 31) / 32;
898241750Semaste	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
899234228Sluigi	if (p->bitmap == NULL) {
900262151Sluigi		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
901241719Sluigi		    p->name);
902234228Sluigi		goto clean;
903234228Sluigi	}
904241719Sluigi	p->bitmap_slots = n;
905234228Sluigi
906234228Sluigi	/*
907234228Sluigi	 * Allocate clusters, init pointers and bitmap
908234228Sluigi	 */
909262151Sluigi
910262151Sluigi	n = p->_clustsize;
911262151Sluigi	for (i = 0; i < (int)p->objtotal;) {
912262151Sluigi		int lim = i + p->_clustentries;
913234228Sluigi		char *clust;
914234228Sluigi
915262151Sluigi		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
916262151Sluigi		    (size_t)0, -1UL, PAGE_SIZE, 0);
917234228Sluigi		if (clust == NULL) {
918234228Sluigi			/*
919234228Sluigi			 * If we get here, there is a severe memory shortage,
920234228Sluigi			 * so halve the allocated memory to reclaim some.
921234228Sluigi			 */
922234228Sluigi			D("Unable to create cluster at %d for '%s' allocator",
923241719Sluigi			    i, p->name);
924262151Sluigi			if (i < 2) /* nothing to halve */
925262151Sluigi				goto out;
926234228Sluigi			lim = i / 2;
927241719Sluigi			for (i--; i >= lim; i--) {
928234228Sluigi				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
929262151Sluigi				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
930234228Sluigi					contigfree(p->lut[i].vaddr,
931262151Sluigi						n, M_NETMAP);
932234228Sluigi			}
933262151Sluigi		out:
934234228Sluigi			p->objtotal = i;
935262151Sluigi			/* we may have stopped in the middle of a cluster */
936262151Sluigi			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
937234228Sluigi			break;
938234228Sluigi		}
939241719Sluigi		for (; i < lim; i++, clust += p->_objsize) {
940234228Sluigi			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
941234228Sluigi			p->lut[i].vaddr = clust;
942234228Sluigi			p->lut[i].paddr = vtophys(clust);
943234228Sluigi		}
944234228Sluigi	}
945262151Sluigi	p->objfree = p->objtotal;
946262151Sluigi	p->memtotal = p->numclusters * p->_clustsize;
947262151Sluigi	if (p->objfree == 0)
948262151Sluigi		goto clean;
949245835Sluigi	if (netmap_verbose)
950245835Sluigi		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
951262151Sluigi		    p->numclusters, p->_clustsize >> 10,
952262151Sluigi		    p->memtotal >> 10, p->name);
953234228Sluigi
954241719Sluigi	return 0;
955234228Sluigi
956234228Sluigiclean:
957241719Sluigi	netmap_reset_obj_allocator(p);
958241719Sluigi	return ENOMEM;
959234228Sluigi}
960234228Sluigi
961241719Sluigi/* call with lock held */
962234228Sluigistatic int
963262151Sluiginetmap_memory_config_changed(struct netmap_mem_d *nmd)
964234228Sluigi{
965241719Sluigi	int i;
966234228Sluigi
967241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
968262151Sluigi		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
969262151Sluigi		    nmd->pools[i].r_objtotal != netmap_params[i].num)
970241719Sluigi		    return 1;
971241719Sluigi	}
972241719Sluigi	return 0;
973241719Sluigi}
974234228Sluigi
975262151Sluigistatic void
976262151Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd)
977262151Sluigi{
978262151Sluigi	int i;
979234228Sluigi
980262151Sluigi	if (netmap_verbose)
981262151Sluigi		D("resetting %p", nmd);
982262151Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
983262151Sluigi		netmap_reset_obj_allocator(&nmd->pools[i]);
984262151Sluigi	}
985262151Sluigi	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
986262151Sluigi}
987262151Sluigi
988262151Sluigistatic int
989270252Sluiginetmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
990270252Sluigi{
991270252Sluigi	int i, lim = p->_objtotal;
992270252Sluigi
993270252Sluigi	if (na->pdev == NULL)
994270252Sluigi		return 0;
995270252Sluigi
996270252Sluigi#ifdef __FreeBSD__
997270252Sluigi	(void)i;
998270252Sluigi	(void)lim;
999270252Sluigi	D("unsupported on FreeBSD");
1000270252Sluigi#else /* linux */
1001270252Sluigi	for (i = 2; i < lim; i++) {
1002270252Sluigi		netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr);
1003270252Sluigi	}
1004270252Sluigi#endif /* linux */
1005270252Sluigi
1006270252Sluigi	return 0;
1007270252Sluigi}
1008270252Sluigi
1009270252Sluigistatic int
1010270252Sluiginetmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1011270252Sluigi{
1012270252Sluigi#ifdef __FreeBSD__
1013270252Sluigi	D("unsupported on FreeBSD");
1014270252Sluigi#else /* linux */
1015270252Sluigi	int i, lim = p->_objtotal;
1016270252Sluigi
1017270252Sluigi	if (na->pdev == NULL)
1018270252Sluigi		return 0;
1019270252Sluigi
1020270252Sluigi	for (i = 2; i < lim; i++) {
1021270252Sluigi		netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr,
1022270252Sluigi				p->lut[i].vaddr);
1023270252Sluigi	}
1024270252Sluigi#endif /* linux */
1025270252Sluigi
1026270252Sluigi	return 0;
1027270252Sluigi}
1028270252Sluigi
1029270252Sluigistatic int
1030262151Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd)
1031262151Sluigi{
1032262151Sluigi	int i;
1033262151Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED)
1034262151Sluigi		return 0;
1035262151Sluigi	nmd->lasterr = 0;
1036262151Sluigi	nmd->nm_totalsize = 0;
1037262151Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1038262151Sluigi		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1039262151Sluigi		if (nmd->lasterr)
1040262151Sluigi			goto error;
1041262151Sluigi		nmd->nm_totalsize += nmd->pools[i].memtotal;
1042262151Sluigi	}
1043262151Sluigi	/* buffers 0 and 1 are reserved */
1044262151Sluigi	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1045262151Sluigi	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
1046262151Sluigi	nmd->flags |= NETMAP_MEM_FINALIZED;
1047262151Sluigi
1048262151Sluigi	if (netmap_verbose)
1049262151Sluigi		D("interfaces %d KB, rings %d KB, buffers %d MB",
1050262151Sluigi		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1051262151Sluigi		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1052262151Sluigi		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1053262151Sluigi
1054262151Sluigi	if (netmap_verbose)
1055262151Sluigi		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1056262151Sluigi
1057262151Sluigi
1058262151Sluigi	return 0;
1059262151Sluigierror:
1060262151Sluigi	netmap_mem_reset_all(nmd);
1061262151Sluigi	return nmd->lasterr;
1062262151Sluigi}
1063262151Sluigi
1064262151Sluigi
1065262151Sluigi
1066262151Sluigivoid
1067262151Sluiginetmap_mem_private_delete(struct netmap_mem_d *nmd)
1068262151Sluigi{
1069262151Sluigi	if (nmd == NULL)
1070262151Sluigi		return;
1071262151Sluigi	if (netmap_verbose)
1072262151Sluigi		D("deleting %p", nmd);
1073262151Sluigi	if (nmd->refcount > 0)
1074262151Sluigi		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
1075262151Sluigi	nm_mem_release_id(nmd);
1076262151Sluigi	if (netmap_verbose)
1077262151Sluigi		D("done deleting %p", nmd);
1078262151Sluigi	NMA_LOCK_DESTROY(nmd);
1079262151Sluigi	free(nmd, M_DEVBUF);
1080262151Sluigi}
1081262151Sluigi
1082262151Sluigistatic int
1083262151Sluiginetmap_mem_private_config(struct netmap_mem_d *nmd)
1084262151Sluigi{
1085262151Sluigi	/* nothing to do, we are configured on creation
1086262151Sluigi 	 * and configuration never changes thereafter
1087262151Sluigi 	 */
1088262151Sluigi	return 0;
1089262151Sluigi}
1090262151Sluigi
1091262151Sluigistatic int
1092262151Sluiginetmap_mem_private_finalize(struct netmap_mem_d *nmd)
1093262151Sluigi{
1094262151Sluigi	int err;
1095262151Sluigi	NMA_LOCK(nmd);
1096262151Sluigi	nmd->refcount++;
1097262151Sluigi	err = netmap_mem_finalize_all(nmd);
1098262151Sluigi	NMA_UNLOCK(nmd);
1099262151Sluigi	return err;
1100262151Sluigi
1101262151Sluigi}
1102262151Sluigi
1103262151Sluigistatic void
1104262151Sluiginetmap_mem_private_deref(struct netmap_mem_d *nmd)
1105262151Sluigi{
1106262151Sluigi	NMA_LOCK(nmd);
1107262151Sluigi	if (--nmd->refcount <= 0)
1108262151Sluigi		netmap_mem_reset_all(nmd);
1109262151Sluigi	NMA_UNLOCK(nmd);
1110262151Sluigi}
1111262151Sluigi
1112262151Sluigi
1113262151Sluigi/*
1114262151Sluigi * allocator for private memory
1115262151Sluigi */
1116262151Sluigistruct netmap_mem_d *
1117262151Sluiginetmap_mem_private_new(const char *name, u_int txr, u_int txd,
1118262151Sluigi	u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
1119262151Sluigi{
1120262151Sluigi	struct netmap_mem_d *d = NULL;
1121262151Sluigi	struct netmap_obj_params p[NETMAP_POOLS_NR];
1122262151Sluigi	int i, err;
1123262151Sluigi	u_int v, maxd;
1124262151Sluigi
1125262151Sluigi	d = malloc(sizeof(struct netmap_mem_d),
1126262151Sluigi			M_DEVBUF, M_NOWAIT | M_ZERO);
1127262151Sluigi	if (d == NULL) {
1128262151Sluigi		err = ENOMEM;
1129262151Sluigi		goto error;
1130262151Sluigi	}
1131262151Sluigi
1132262151Sluigi	*d = nm_blueprint;
1133262151Sluigi
1134262151Sluigi	err = nm_mem_assign_id(d);
1135262151Sluigi	if (err)
1136262151Sluigi		goto error;
1137262151Sluigi
1138262151Sluigi	/* account for the fake host rings */
1139262151Sluigi	txr++;
1140262151Sluigi	rxr++;
1141262151Sluigi
1142262151Sluigi	/* copy the min values */
1143262151Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1144262151Sluigi		p[i] = netmap_min_priv_params[i];
1145262151Sluigi	}
1146262151Sluigi
1147262151Sluigi	/* possibly increase them to fit user request */
1148262151Sluigi	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1149262151Sluigi	if (p[NETMAP_IF_POOL].size < v)
1150262151Sluigi		p[NETMAP_IF_POOL].size = v;
1151262151Sluigi	v = 2 + 4 * npipes;
1152262151Sluigi	if (p[NETMAP_IF_POOL].num < v)
1153262151Sluigi		p[NETMAP_IF_POOL].num = v;
1154262151Sluigi	maxd = (txd > rxd) ? txd : rxd;
1155262151Sluigi	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1156262151Sluigi	if (p[NETMAP_RING_POOL].size < v)
1157262151Sluigi		p[NETMAP_RING_POOL].size = v;
1158262151Sluigi	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1159262151Sluigi         * and two rx rings (again, 1 normal and 1 fake host)
1160262151Sluigi         */
1161262151Sluigi	v = txr + rxr + 8 * npipes;
1162262151Sluigi	if (p[NETMAP_RING_POOL].num < v)
1163262151Sluigi		p[NETMAP_RING_POOL].num = v;
1164262151Sluigi	/* for each pipe we only need the buffers for the 4 "real" rings.
1165267282Sluigi         * On the other end, the pipe ring dimension may be different from
1166262151Sluigi         * the parent port ring dimension. As a compromise, we allocate twice the
1167262151Sluigi         * space actually needed if the pipe rings were the same size as the parent rings
1168262151Sluigi         */
1169262151Sluigi	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1170262151Sluigi		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1171262151Sluigi	if (p[NETMAP_BUF_POOL].num < v)
1172262151Sluigi		p[NETMAP_BUF_POOL].num = v;
1173262151Sluigi
1174262151Sluigi	if (netmap_verbose)
1175262151Sluigi		D("req if %d*%d ring %d*%d buf %d*%d",
1176262151Sluigi			p[NETMAP_IF_POOL].num,
1177262151Sluigi			p[NETMAP_IF_POOL].size,
1178262151Sluigi			p[NETMAP_RING_POOL].num,
1179262151Sluigi			p[NETMAP_RING_POOL].size,
1180262151Sluigi			p[NETMAP_BUF_POOL].num,
1181262151Sluigi			p[NETMAP_BUF_POOL].size);
1182262151Sluigi
1183262151Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1184262151Sluigi		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1185262151Sluigi				nm_blueprint.pools[i].name,
1186262151Sluigi				name);
1187262151Sluigi		err = netmap_config_obj_allocator(&d->pools[i],
1188262151Sluigi				p[i].num, p[i].size);
1189262151Sluigi		if (err)
1190262151Sluigi			goto error;
1191262151Sluigi	}
1192262151Sluigi
1193262151Sluigi	d->flags &= ~NETMAP_MEM_FINALIZED;
1194262151Sluigi
1195262151Sluigi	NMA_LOCK_INIT(d);
1196262151Sluigi
1197262151Sluigi	return d;
1198262151Sluigierror:
1199262151Sluigi	netmap_mem_private_delete(d);
1200262151Sluigi	if (perr)
1201262151Sluigi		*perr = err;
1202262151Sluigi	return NULL;
1203262151Sluigi}
1204262151Sluigi
1205262151Sluigi
1206241719Sluigi/* call with lock held */
1207241719Sluigistatic int
1208262151Sluiginetmap_mem_global_config(struct netmap_mem_d *nmd)
1209241719Sluigi{
1210241719Sluigi	int i;
1211234228Sluigi
1212262151Sluigi	if (nmd->refcount)
1213262151Sluigi		/* already in use, we cannot change the configuration */
1214241719Sluigi		goto out;
1215234228Sluigi
1216262151Sluigi	if (!netmap_memory_config_changed(nmd))
1217262151Sluigi		goto out;
1218262151Sluigi
1219241719Sluigi	D("reconfiguring");
1220241719Sluigi
1221262151Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1222241719Sluigi		/* reset previous allocation */
1223241719Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1224262151Sluigi			netmap_reset_obj_allocator(&nmd->pools[i]);
1225250184Sluigi		}
1226262151Sluigi		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1227262151Sluigi	}
1228241719Sluigi
1229241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1230262151Sluigi		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1231241719Sluigi				netmap_params[i].num, netmap_params[i].size);
1232262151Sluigi		if (nmd->lasterr)
1233241719Sluigi			goto out;
1234241719Sluigi	}
1235241719Sluigi
1236241719Sluigiout:
1237241719Sluigi
1238262151Sluigi	return nmd->lasterr;
1239241719Sluigi}
1240241719Sluigi
1241241719Sluigistatic int
1242262151Sluiginetmap_mem_global_finalize(struct netmap_mem_d *nmd)
1243241719Sluigi{
1244262151Sluigi	int err;
1245241719Sluigi
1246262151Sluigi	NMA_LOCK(nmd);
1247241719Sluigi
1248262151Sluigi
1249241719Sluigi	/* update configuration if changed */
1250262151Sluigi	if (netmap_mem_global_config(nmd))
1251241719Sluigi		goto out;
1252241719Sluigi
1253262151Sluigi	nmd->refcount++;
1254262151Sluigi
1255262151Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1256241719Sluigi		/* may happen if config is not changed */
1257241719Sluigi		ND("nothing to do");
1258241719Sluigi		goto out;
1259241719Sluigi	}
1260241719Sluigi
1261262151Sluigi	if (netmap_mem_finalize_all(nmd))
1262262151Sluigi		goto out;
1263241719Sluigi
1264262151Sluigi	nmd->lasterr = 0;
1265241719Sluigi
1266241719Sluigiout:
1267262151Sluigi	if (nmd->lasterr)
1268262151Sluigi		nmd->refcount--;
1269262151Sluigi	err = nmd->lasterr;
1270241719Sluigi
1271262151Sluigi	NMA_UNLOCK(nmd);
1272241719Sluigi
1273262151Sluigi	return err;
1274241719Sluigi
1275234228Sluigi}
1276234228Sluigi
1277262151Sluigiint
1278262151Sluiginetmap_mem_init(void)
1279241719Sluigi{
1280262151Sluigi	NMA_LOCK_INIT(&nm_mem);
1281241719Sluigi	return (0);
1282241719Sluigi}
1283234228Sluigi
1284262151Sluigivoid
1285262151Sluiginetmap_mem_fini(void)
1286234228Sluigi{
1287241719Sluigi	int i;
1288241719Sluigi
1289241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1290241719Sluigi	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1291241719Sluigi	}
1292262151Sluigi	NMA_LOCK_DESTROY(&nm_mem);
1293234228Sluigi}
1294234228Sluigi
1295241719Sluigistatic void
1296241719Sluiginetmap_free_rings(struct netmap_adapter *na)
1297241719Sluigi{
1298262151Sluigi	struct netmap_kring *kring;
1299262151Sluigi	struct netmap_ring *ring;
1300245835Sluigi	if (!na->tx_rings)
1301245835Sluigi		return;
1302262151Sluigi	for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
1303262151Sluigi		ring = kring->ring;
1304262151Sluigi		if (ring == NULL)
1305262151Sluigi			continue;
1306262151Sluigi		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1307262151Sluigi		netmap_ring_free(na->nm_mem, ring);
1308262151Sluigi		kring->ring = NULL;
1309241719Sluigi	}
1310262151Sluigi	for (/* cont'd from above */; kring != na->tailroom; kring++) {
1311262151Sluigi		ring = kring->ring;
1312262151Sluigi		if (ring == NULL)
1313262151Sluigi			continue;
1314262151Sluigi		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1315262151Sluigi		netmap_ring_free(na->nm_mem, ring);
1316262151Sluigi		kring->ring = NULL;
1317241719Sluigi	}
1318241719Sluigi}
1319234228Sluigi
1320262151Sluigi/* call with NMA_LOCK held *
1321262151Sluigi *
1322262151Sluigi * Allocate netmap rings and buffers for this card
1323262151Sluigi * The rings are contiguous, but have variable size.
1324262151Sluigi * The kring array must follow the layout described
1325262151Sluigi * in netmap_krings_create().
1326245835Sluigi */
1327262151Sluigiint
1328262151Sluiginetmap_mem_rings_create(struct netmap_adapter *na)
1329234228Sluigi{
1330234228Sluigi	struct netmap_ring *ring;
1331262151Sluigi	u_int len, ndesc;
1332234228Sluigi	struct netmap_kring *kring;
1333262151Sluigi	u_int i;
1334234228Sluigi
1335262151Sluigi	NMA_LOCK(na->nm_mem);
1336234228Sluigi
1337262151Sluigi        /* transmit rings */
1338262151Sluigi	for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
1339262151Sluigi		if (kring->ring) {
1340262151Sluigi			ND("%s %ld already created", kring->name, kring - na->tx_rings);
1341262151Sluigi			continue; /* already created by somebody else */
1342262151Sluigi		}
1343262151Sluigi		ndesc = kring->nkr_num_slots;
1344234228Sluigi		len = sizeof(struct netmap_ring) +
1345234228Sluigi			  ndesc * sizeof(struct netmap_slot);
1346262151Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
1347234228Sluigi		if (ring == NULL) {
1348262151Sluigi			D("Cannot allocate tx_ring");
1349234228Sluigi			goto cleanup;
1350234228Sluigi		}
1351262151Sluigi		ND("txring at %p", ring);
1352234228Sluigi		kring->ring = ring;
1353262151Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1354262151Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1355262151Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1356262151Sluigi			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1357262151Sluigi			netmap_ring_offset(na->nm_mem, ring);
1358234228Sluigi
1359262151Sluigi		/* copy values from kring */
1360262151Sluigi		ring->head = kring->rhead;
1361262151Sluigi		ring->cur = kring->rcur;
1362262151Sluigi		ring->tail = kring->rtail;
1363262151Sluigi		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1364270252Sluigi			netmap_mem_bufsize(na->nm_mem);
1365262151Sluigi		ND("%s h %d c %d t %d", kring->name,
1366262151Sluigi			ring->head, ring->cur, ring->tail);
1367262151Sluigi		ND("initializing slots for txring");
1368262151Sluigi		if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1369262151Sluigi			/* this is a real ring */
1370262151Sluigi			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1371262151Sluigi				D("Cannot allocate buffers for tx_ring");
1372262151Sluigi				goto cleanup;
1373262151Sluigi			}
1374262151Sluigi		} else {
1375262151Sluigi			/* this is a fake tx ring, set all indices to 0 */
1376262151Sluigi			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1377241719Sluigi		}
1378234228Sluigi	}
1379234228Sluigi
1380262151Sluigi	/* receive rings */
1381262151Sluigi	for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
1382262151Sluigi		if (kring->ring) {
1383262151Sluigi			ND("%s %ld already created", kring->name, kring - na->rx_rings);
1384262151Sluigi			continue; /* already created by somebody else */
1385262151Sluigi		}
1386262151Sluigi		ndesc = kring->nkr_num_slots;
1387234228Sluigi		len = sizeof(struct netmap_ring) +
1388234228Sluigi			  ndesc * sizeof(struct netmap_slot);
1389262151Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
1390234228Sluigi		if (ring == NULL) {
1391262151Sluigi			D("Cannot allocate rx_ring");
1392234228Sluigi			goto cleanup;
1393234228Sluigi		}
1394262151Sluigi		ND("rxring at %p", ring);
1395234228Sluigi		kring->ring = ring;
1396262151Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1397262151Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1398262151Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1399262151Sluigi		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1400262151Sluigi			netmap_ring_offset(na->nm_mem, ring);
1401234228Sluigi
1402262151Sluigi		/* copy values from kring */
1403262151Sluigi		ring->head = kring->rhead;
1404262151Sluigi		ring->cur = kring->rcur;
1405262151Sluigi		ring->tail = kring->rtail;
1406262151Sluigi		*(int *)(uintptr_t)&ring->nr_buf_size =
1407270252Sluigi			netmap_mem_bufsize(na->nm_mem);
1408262151Sluigi		ND("%s h %d c %d t %d", kring->name,
1409262151Sluigi			ring->head, ring->cur, ring->tail);
1410262151Sluigi		ND("initializing slots for rxring %p", ring);
1411262151Sluigi		if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1412262151Sluigi			/* this is a real ring */
1413262151Sluigi			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1414262151Sluigi				D("Cannot allocate buffers for rx_ring");
1415262151Sluigi				goto cleanup;
1416262151Sluigi			}
1417262151Sluigi		} else {
1418262151Sluigi			/* this is a fake rx ring, set all indices to 1 */
1419262151Sluigi			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
1420241719Sluigi		}
1421234228Sluigi	}
1422262151Sluigi
1423262151Sluigi	NMA_UNLOCK(na->nm_mem);
1424262151Sluigi
1425262151Sluigi	return 0;
1426262151Sluigi
1427262151Sluigicleanup:
1428262151Sluigi	netmap_free_rings(na);
1429262151Sluigi
1430262151Sluigi	NMA_UNLOCK(na->nm_mem);
1431262151Sluigi
1432262151Sluigi	return ENOMEM;
1433262151Sluigi}
1434262151Sluigi
1435262151Sluigivoid
1436262151Sluiginetmap_mem_rings_delete(struct netmap_adapter *na)
1437262151Sluigi{
1438262151Sluigi	/* last instance, release bufs and rings */
1439262151Sluigi	NMA_LOCK(na->nm_mem);
1440262151Sluigi
1441262151Sluigi	netmap_free_rings(na);
1442262151Sluigi
1443262151Sluigi	NMA_UNLOCK(na->nm_mem);
1444262151Sluigi}
1445262151Sluigi
1446262151Sluigi
1447262151Sluigi/* call with NMA_LOCK held */
1448262151Sluigi/*
1449262151Sluigi * Allocate the per-fd structure netmap_if.
1450262151Sluigi *
1451262151Sluigi * We assume that the configuration stored in na
1452262151Sluigi * (number of tx/rx rings and descs) does not change while
1453262151Sluigi * the interface is in netmap mode.
1454262151Sluigi */
1455262151Sluigistruct netmap_if *
1456270252Sluiginetmap_mem_if_new(struct netmap_adapter *na)
1457262151Sluigi{
1458262151Sluigi	struct netmap_if *nifp;
1459262151Sluigi	ssize_t base; /* handy for relative offsets between rings and nifp */
1460262151Sluigi	u_int i, len, ntx, nrx;
1461262151Sluigi
1462262151Sluigi	/* account for the (eventually fake) host rings */
1463262151Sluigi	ntx = na->num_tx_rings + 1;
1464262151Sluigi	nrx = na->num_rx_rings + 1;
1465234228Sluigi	/*
1466262151Sluigi	 * the descriptor is followed inline by an array of offsets
1467262151Sluigi	 * to the tx and rx rings in the shared memory region.
1468262151Sluigi	 */
1469262151Sluigi
1470262151Sluigi	NMA_LOCK(na->nm_mem);
1471262151Sluigi
1472262151Sluigi	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1473262151Sluigi	nifp = netmap_if_malloc(na->nm_mem, len);
1474262151Sluigi	if (nifp == NULL) {
1475262151Sluigi		NMA_UNLOCK(na->nm_mem);
1476262151Sluigi		return NULL;
1477262151Sluigi	}
1478262151Sluigi
1479262151Sluigi	/* initialize base fields -- override const */
1480262151Sluigi	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1481262151Sluigi	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1482270252Sluigi	strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ);
1483262151Sluigi
1484262151Sluigi	/*
1485234228Sluigi	 * fill the slots for the rx and tx rings. They contain the offset
1486234228Sluigi	 * between the ring and nifp, so the information is usable in
1487234228Sluigi	 * userspace to reach the ring from the nifp.
1488234228Sluigi	 */
1489262151Sluigi	base = netmap_if_offset(na->nm_mem, nifp);
1490234228Sluigi	for (i = 0; i < ntx; i++) {
1491234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1492262151Sluigi			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1493234228Sluigi	}
1494234228Sluigi	for (i = 0; i < nrx; i++) {
1495234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1496262151Sluigi			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1497234228Sluigi	}
1498262151Sluigi
1499262151Sluigi	NMA_UNLOCK(na->nm_mem);
1500262151Sluigi
1501234228Sluigi	return (nifp);
1502234228Sluigi}
1503234228Sluigi
1504262151Sluigivoid
1505262151Sluiginetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1506262151Sluigi{
1507262151Sluigi	if (nifp == NULL)
1508262151Sluigi		/* nothing to do */
1509262151Sluigi		return;
1510262151Sluigi	NMA_LOCK(na->nm_mem);
1511262151Sluigi	if (nifp->ni_bufs_head)
1512262151Sluigi		netmap_extra_free(na, nifp->ni_bufs_head);
1513262151Sluigi	netmap_if_free(na->nm_mem, nifp);
1514262151Sluigi
1515262151Sluigi	NMA_UNLOCK(na->nm_mem);
1516262151Sluigi}
1517262151Sluigi
1518234228Sluigistatic void
1519262151Sluiginetmap_mem_global_deref(struct netmap_mem_d *nmd)
1520234228Sluigi{
1521262151Sluigi	NMA_LOCK(nmd);
1522262151Sluigi
1523262151Sluigi	nmd->refcount--;
1524270252Sluigi	if (!nmd->refcount)
1525270252Sluigi		nmd->nm_grp = -1;
1526245835Sluigi	if (netmap_verbose)
1527262151Sluigi		D("refcount = %d", nmd->refcount);
1528262151Sluigi
1529262151Sluigi	NMA_UNLOCK(nmd);
1530234228Sluigi}
1531262151Sluigi
1532262151Sluigiint
1533270252Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1534262151Sluigi{
1535270252Sluigi	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
1536270252Sluigi		return ENOMEM;
1537270252Sluigi	} else {
1538270252Sluigi		nmd->finalize(nmd);
1539270252Sluigi	}
1540270252Sluigi
1541270252Sluigi	if (!nmd->lasterr && na->pdev)
1542270252Sluigi		netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
1543270252Sluigi
1544270252Sluigi	return nmd->lasterr;
1545262151Sluigi}
1546262151Sluigi
1547262151Sluigivoid
1548270252Sluiginetmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1549262151Sluigi{
1550270252Sluigi	NMA_LOCK(nmd);
1551270252Sluigi	netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
1552270252Sluigi	NMA_UNLOCK(nmd);
1553262151Sluigi	return nmd->deref(nmd);
1554262151Sluigi}
1555