netmap_mem2.c revision 260368
1234228Sluigi/*
2260368Sluigi * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3234228Sluigi *
4234228Sluigi * Redistribution and use in source and binary forms, with or without
5234228Sluigi * modification, are permitted provided that the following conditions
6234228Sluigi * are met:
7234228Sluigi *   1. Redistributions of source code must retain the above copyright
8234228Sluigi *      notice, this list of conditions and the following disclaimer.
9234228Sluigi *   2. Redistributions in binary form must reproduce the above copyright
10234228Sluigi *      notice, this list of conditions and the following disclaimer in the
11259412Sluigi *      documentation and/or other materials provided with the distribution.
12234228Sluigi *
13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16234228Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23234228Sluigi * SUCH DAMAGE.
24234228Sluigi */
25234228Sluigi
26257529Sluigi#ifdef linux
27257529Sluigi#include "bsd_glue.h"
28257529Sluigi#endif /* linux */
29234228Sluigi
30257529Sluigi#ifdef __APPLE__
31257529Sluigi#include "osx_glue.h"
32257529Sluigi#endif /* __APPLE__ */
33234228Sluigi
34257529Sluigi#ifdef __FreeBSD__
35257529Sluigi#include <sys/cdefs.h> /* prerequisite */
36257529Sluigi__FBSDID("$FreeBSD: head/sys/dev/netmap/netmap_mem2.c 260368 2014-01-06 12:53:15Z luigi $");
37234228Sluigi
38257529Sluigi#include <sys/types.h>
39257529Sluigi#include <sys/malloc.h>
40257529Sluigi#include <sys/proc.h>
41257529Sluigi#include <vm/vm.h>	/* vtophys */
42257529Sluigi#include <vm/pmap.h>	/* vtophys */
43257529Sluigi#include <sys/socket.h> /* sockaddrs */
44257529Sluigi#include <sys/selinfo.h>
45257529Sluigi#include <sys/sysctl.h>
46257529Sluigi#include <net/if.h>
47257529Sluigi#include <net/if_var.h>
48257529Sluigi#include <net/vnet.h>
49257529Sluigi#include <machine/bus.h>	/* bus_dmamap_* */
50257529Sluigi
51257529Sluigi#endif /* __FreeBSD__ */
52257529Sluigi
53257529Sluigi#include <net/netmap.h>
54257529Sluigi#include <dev/netmap/netmap_kern.h>
55257529Sluigi#include "netmap_mem2.h"
56257529Sluigi
57241719Sluigi#ifdef linux
58257529Sluigi#define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
59257529Sluigi#define NMA_LOCK_DESTROY(n)
60257529Sluigi#define NMA_LOCK(n)		down(&(n)->nm_mtx)
61257529Sluigi#define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
62241719Sluigi#else /* !linux */
63257529Sluigi#define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
64257529Sluigi#define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
65257529Sluigi#define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
66257529Sluigi#define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
67241719Sluigi#endif /* linux */
68234228Sluigi
69241719Sluigi
70241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
71241719Sluigi	[NETMAP_IF_POOL] = {
72241719Sluigi		.size = 1024,
73241719Sluigi		.num  = 100,
74241719Sluigi	},
75241719Sluigi	[NETMAP_RING_POOL] = {
76241719Sluigi		.size = 9*PAGE_SIZE,
77241719Sluigi		.num  = 200,
78241719Sluigi	},
79241719Sluigi	[NETMAP_BUF_POOL] = {
80241719Sluigi		.size = 2048,
81241719Sluigi		.num  = NETMAP_BUF_MAX_NUM,
82241719Sluigi	},
83241719Sluigi};
84241719Sluigi
85241719Sluigi
86249659Sluigi/*
87249659Sluigi * nm_mem is the memory allocator used for all physical interfaces
88249659Sluigi * running in netmap mode.
89249659Sluigi * Virtual (VALE) ports will have each its own allocator.
90249659Sluigi */
91257529Sluigistatic int netmap_mem_global_config(struct netmap_mem_d *nmd);
92257529Sluigistatic int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
93257529Sluigistatic void netmap_mem_global_deref(struct netmap_mem_d *nmd);
94257529Sluigistruct netmap_mem_d nm_mem = {	/* Our memory allocator. */
95241719Sluigi	.pools = {
96241719Sluigi		[NETMAP_IF_POOL] = {
97241719Sluigi			.name 	= "netmap_if",
98241719Sluigi			.objminsize = sizeof(struct netmap_if),
99241719Sluigi			.objmaxsize = 4096,
100241719Sluigi			.nummin     = 10,	/* don't be stingy */
101241719Sluigi			.nummax	    = 10000,	/* XXX very large */
102241719Sluigi		},
103241719Sluigi		[NETMAP_RING_POOL] = {
104241719Sluigi			.name 	= "netmap_ring",
105241719Sluigi			.objminsize = sizeof(struct netmap_ring),
106241719Sluigi			.objmaxsize = 32*PAGE_SIZE,
107241719Sluigi			.nummin     = 2,
108241719Sluigi			.nummax	    = 1024,
109241719Sluigi		},
110241719Sluigi		[NETMAP_BUF_POOL] = {
111241719Sluigi			.name	= "netmap_buf",
112241719Sluigi			.objminsize = 64,
113241719Sluigi			.objmaxsize = 65536,
114241719Sluigi			.nummin     = 4,
115241719Sluigi			.nummax	    = 1000000, /* one million! */
116241719Sluigi		},
117241719Sluigi	},
118257529Sluigi	.config   = netmap_mem_global_config,
119257529Sluigi	.finalize = netmap_mem_global_finalize,
120257529Sluigi	.deref    = netmap_mem_global_deref,
121241719Sluigi};
122241719Sluigi
123257529Sluigi
124249659Sluigi// XXX logically belongs to nm_mem
125234228Sluigistruct lut_entry *netmap_buffer_lut;	/* exported */
126234228Sluigi
127257529Sluigi/* blueprint for the private memory allocators */
128257529Sluigistatic int netmap_mem_private_config(struct netmap_mem_d *nmd);
129257529Sluigistatic int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
130257529Sluigistatic void netmap_mem_private_deref(struct netmap_mem_d *nmd);
131257529Sluigiconst struct netmap_mem_d nm_blueprint = {
132257529Sluigi	.pools = {
133257529Sluigi		[NETMAP_IF_POOL] = {
134257529Sluigi			.name 	= "%s_if",
135257529Sluigi			.objminsize = sizeof(struct netmap_if),
136257529Sluigi			.objmaxsize = 4096,
137257529Sluigi			.nummin     = 1,
138257529Sluigi			.nummax	    = 10,
139257529Sluigi		},
140257529Sluigi		[NETMAP_RING_POOL] = {
141257529Sluigi			.name 	= "%s_ring",
142257529Sluigi			.objminsize = sizeof(struct netmap_ring),
143257529Sluigi			.objmaxsize = 32*PAGE_SIZE,
144257529Sluigi			.nummin     = 2,
145257529Sluigi			.nummax	    = 1024,
146257529Sluigi		},
147257529Sluigi		[NETMAP_BUF_POOL] = {
148257529Sluigi			.name	= "%s_buf",
149257529Sluigi			.objminsize = 64,
150257529Sluigi			.objmaxsize = 65536,
151257529Sluigi			.nummin     = 4,
152257529Sluigi			.nummax	    = 1000000, /* one million! */
153257529Sluigi		},
154257529Sluigi	},
155257529Sluigi	.config   = netmap_mem_private_config,
156257529Sluigi	.finalize = netmap_mem_private_finalize,
157257529Sluigi	.deref    = netmap_mem_private_deref,
158257529Sluigi
159257529Sluigi	.flags = NETMAP_MEM_PRIVATE,
160257529Sluigi};
161257529Sluigi
162241719Sluigi/* memory allocator related sysctls */
163234228Sluigi
164241719Sluigi#define STRINGIFY(x) #x
165241719Sluigi
166257529Sluigi
167241719Sluigi#define DECLARE_SYSCTLS(id, name) \
168241719Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
169241719Sluigi	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
170259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
171259412Sluigi	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
172259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
173259412Sluigi	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
174259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
175259412Sluigi	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
176241719Sluigi
177257529SluigiSYSCTL_DECL(_dev_netmap);
178241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if);
179241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
180241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
181241719Sluigi
182234228Sluigi/*
183249659Sluigi * First, find the allocator that contains the requested offset,
184249659Sluigi * then locate the cluster through a lookup table.
185234228Sluigi */
186257529Sluigivm_paddr_t
187257529Sluiginetmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
188234228Sluigi{
189234228Sluigi	int i;
190257529Sluigi	vm_ooffset_t o = offset;
191257529Sluigi	vm_paddr_t pa;
192257529Sluigi	struct netmap_obj_pool *p;
193234228Sluigi
194257529Sluigi	NMA_LOCK(nmd);
195257529Sluigi	p = nmd->pools;
196257529Sluigi
197257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
198257529Sluigi		if (offset >= p[i].memtotal)
199234228Sluigi			continue;
200249659Sluigi		// now lookup the cluster's address
201257529Sluigi		pa = p[i].lut[offset / p[i]._objsize].paddr +
202241719Sluigi			offset % p[i]._objsize;
203257529Sluigi		NMA_UNLOCK(nmd);
204257529Sluigi		return pa;
205234228Sluigi	}
206241719Sluigi	/* this is only in case of errors */
207234290Sluigi	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
208257529Sluigi		p[NETMAP_IF_POOL].memtotal,
209257529Sluigi		p[NETMAP_IF_POOL].memtotal
210257529Sluigi			+ p[NETMAP_RING_POOL].memtotal,
211257529Sluigi		p[NETMAP_IF_POOL].memtotal
212257529Sluigi			+ p[NETMAP_RING_POOL].memtotal
213257529Sluigi			+ p[NETMAP_BUF_POOL].memtotal);
214257529Sluigi	NMA_UNLOCK(nmd);
215234228Sluigi	return 0;	// XXX bad address
216234228Sluigi}
217234228Sluigi
218257529Sluigiint
219257529Sluiginetmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags)
220257529Sluigi{
221257529Sluigi	int error = 0;
222257529Sluigi	NMA_LOCK(nmd);
223257529Sluigi	error = nmd->config(nmd);
224257529Sluigi	if (error)
225257529Sluigi		goto out;
226257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
227257529Sluigi		*size = nmd->nm_totalsize;
228257529Sluigi	} else {
229257529Sluigi		int i;
230257529Sluigi		*size = 0;
231257529Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
232257529Sluigi			struct netmap_obj_pool *p = nmd->pools + i;
233257529Sluigi			*size += (p->_numclusters * p->_clustsize);
234257529Sluigi		}
235257529Sluigi	}
236257529Sluigi	*memflags = nmd->flags;
237257529Sluigiout:
238257529Sluigi	NMA_UNLOCK(nmd);
239257529Sluigi	return error;
240257529Sluigi}
241257529Sluigi
242234228Sluigi/*
243234228Sluigi * we store objects by kernel address, need to find the offset
244234228Sluigi * within the pool to export the value to userspace.
245234228Sluigi * Algorithm: scan until we find the cluster, then add the
246234228Sluigi * actual offset in the cluster
247234228Sluigi */
248234242Sluigistatic ssize_t
249234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
250234228Sluigi{
251257529Sluigi	int i, k = p->_clustentries, n = p->objtotal;
252234228Sluigi	ssize_t ofs = 0;
253234228Sluigi
254234228Sluigi	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
255234228Sluigi		const char *base = p->lut[i].vaddr;
256234228Sluigi		ssize_t relofs = (const char *) vaddr - base;
257234228Sluigi
258249504Sluigi		if (relofs < 0 || relofs >= p->_clustsize)
259234228Sluigi			continue;
260234228Sluigi
261234228Sluigi		ofs = ofs + relofs;
262234228Sluigi		ND("%s: return offset %d (cluster %d) for pointer %p",
263234228Sluigi		    p->name, ofs, i, vaddr);
264234228Sluigi		return ofs;
265234228Sluigi	}
266234228Sluigi	D("address %p is not contained inside any cluster (%s)",
267234228Sluigi	    vaddr, p->name);
268234228Sluigi	return 0; /* An error occurred */
269234228Sluigi}
270234228Sluigi
271234228Sluigi/* Helper functions which convert virtual addresses to offsets */
272257529Sluigi#define netmap_if_offset(n, v)					\
273257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
274234228Sluigi
275257529Sluigi#define netmap_ring_offset(n, v)				\
276257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
277257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
278234228Sluigi
279257529Sluigi#define netmap_buf_offset(n, v)					\
280257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal +			\
281257529Sluigi	(n)->pools[NETMAP_RING_POOL].memtotal +		\
282257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
283234228Sluigi
284234228Sluigi
285257529Sluigissize_t
286257529Sluiginetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
287257529Sluigi{
288257529Sluigi	ssize_t v;
289257529Sluigi	NMA_LOCK(nmd);
290257529Sluigi	v = netmap_if_offset(nmd, addr);
291257529Sluigi	NMA_UNLOCK(nmd);
292257529Sluigi	return v;
293257529Sluigi}
294257529Sluigi
295241719Sluigi/*
296241719Sluigi * report the index, and use start position as a hint,
297241719Sluigi * otherwise buffer allocation becomes terribly expensive.
298241719Sluigi */
299234228Sluigistatic void *
300257529Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
301234228Sluigi{
302234228Sluigi	uint32_t i = 0;			/* index in the bitmap */
303234228Sluigi	uint32_t mask, j;		/* slot counter */
304234228Sluigi	void *vaddr = NULL;
305234228Sluigi
306234228Sluigi	if (len > p->_objsize) {
307234228Sluigi		D("%s request size %d too large", p->name, len);
308234228Sluigi		// XXX cannot reduce the size
309234228Sluigi		return NULL;
310234228Sluigi	}
311234228Sluigi
312234228Sluigi	if (p->objfree == 0) {
313259412Sluigi		D("no more %s objects", p->name);
314234228Sluigi		return NULL;
315234228Sluigi	}
316241719Sluigi	if (start)
317241719Sluigi		i = *start;
318234228Sluigi
319241719Sluigi	/* termination is guaranteed by p->free, but better check bounds on i */
320241719Sluigi	while (vaddr == NULL && i < p->bitmap_slots)  {
321234228Sluigi		uint32_t cur = p->bitmap[i];
322234228Sluigi		if (cur == 0) { /* bitmask is fully used */
323234228Sluigi			i++;
324234228Sluigi			continue;
325234228Sluigi		}
326234228Sluigi		/* locate a slot */
327234228Sluigi		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
328234228Sluigi			;
329234228Sluigi
330234228Sluigi		p->bitmap[i] &= ~mask; /* mark object as in use */
331234228Sluigi		p->objfree--;
332234228Sluigi
333234228Sluigi		vaddr = p->lut[i * 32 + j].vaddr;
334241719Sluigi		if (index)
335241719Sluigi			*index = i * 32 + j;
336234228Sluigi	}
337234228Sluigi	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
338234228Sluigi
339241719Sluigi	if (start)
340241719Sluigi		*start = i;
341234228Sluigi	return vaddr;
342234228Sluigi}
343234228Sluigi
344234228Sluigi
345234228Sluigi/*
346249659Sluigi * free by index, not by address. This is slow, but is only used
347249659Sluigi * for a small number of objects (rings, nifp)
348234228Sluigi */
349234228Sluigistatic void
350234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
351234228Sluigi{
352234228Sluigi	if (j >= p->objtotal) {
353234228Sluigi		D("invalid index %u, max %u", j, p->objtotal);
354234228Sluigi		return;
355234228Sluigi	}
356234228Sluigi	p->bitmap[j / 32] |= (1 << (j % 32));
357234228Sluigi	p->objfree++;
358234228Sluigi	return;
359234228Sluigi}
360234228Sluigi
361234228Sluigistatic void
362234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
363234228Sluigi{
364257529Sluigi	u_int i, j, n = p->numclusters;
365234228Sluigi
366257529Sluigi	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
367257529Sluigi		void *base = p->lut[i * p->_clustentries].vaddr;
368234228Sluigi		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
369234228Sluigi
370234228Sluigi		/* Given address, is out of the scope of the current cluster.*/
371250441Sluigi		if (vaddr < base || relofs >= p->_clustsize)
372234228Sluigi			continue;
373234228Sluigi
374234228Sluigi		j = j + relofs / p->_objsize;
375257529Sluigi		/* KASSERT(j != 0, ("Cannot free object 0")); */
376234228Sluigi		netmap_obj_free(p, j);
377234228Sluigi		return;
378234228Sluigi	}
379245835Sluigi	D("address %p is not contained inside any cluster (%s)",
380234228Sluigi	    vaddr, p->name);
381234228Sluigi}
382234228Sluigi
383257529Sluigi#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
384257529Sluigi#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
385257529Sluigi#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
386257529Sluigi#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
387257529Sluigi#define netmap_buf_malloc(n, _pos, _index)			\
388257529Sluigi	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
389234228Sluigi
390234228Sluigi
391234228Sluigi/* Return the index associated to the given packet buffer */
392257529Sluigi#define netmap_buf_index(n, v)						\
393257529Sluigi    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
394234228Sluigi
395234228Sluigi
396241719Sluigi/* Return nonzero on error */
397241719Sluigistatic int
398259412Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
399234228Sluigi{
400257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
401257529Sluigi	u_int i = 0;	/* slot counter */
402241719Sluigi	uint32_t pos = 0;	/* slot in p->bitmap */
403241719Sluigi	uint32_t index = 0;	/* buffer index */
404234228Sluigi
405234228Sluigi	for (i = 0; i < n; i++) {
406257529Sluigi		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
407234228Sluigi		if (vaddr == NULL) {
408259412Sluigi			D("no more buffers after %d of %d", i, n);
409234228Sluigi			goto cleanup;
410234228Sluigi		}
411241719Sluigi		slot[i].buf_idx = index;
412234228Sluigi		slot[i].len = p->_objsize;
413259412Sluigi		slot[i].flags = 0;
414234228Sluigi	}
415234228Sluigi
416241719Sluigi	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
417241719Sluigi	return (0);
418234228Sluigi
419234228Sluigicleanup:
420241643Semaste	while (i > 0) {
421241643Semaste		i--;
422241719Sluigi		netmap_obj_free(p, slot[i].buf_idx);
423234228Sluigi	}
424241719Sluigi	bzero(slot, n * sizeof(slot[0]));
425241719Sluigi	return (ENOMEM);
426234228Sluigi}
427234228Sluigi
428234228Sluigi
429234228Sluigistatic void
430259412Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
431234228Sluigi{
432257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
433241719Sluigi
434234228Sluigi	if (i < 2 || i >= p->objtotal) {
435234228Sluigi		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
436234228Sluigi		return;
437234228Sluigi	}
438241719Sluigi	netmap_obj_free(p, i);
439234228Sluigi}
440234228Sluigi
441234228Sluigistatic void
442241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p)
443234228Sluigi{
444257529Sluigi
445234228Sluigi	if (p == NULL)
446234228Sluigi		return;
447234228Sluigi	if (p->bitmap)
448234228Sluigi		free(p->bitmap, M_NETMAP);
449241719Sluigi	p->bitmap = NULL;
450234228Sluigi	if (p->lut) {
451257529Sluigi		u_int i;
452257529Sluigi		size_t sz = p->_clustsize;
453257529Sluigi
454257529Sluigi		for (i = 0; i < p->objtotal; i += p->_clustentries) {
455234228Sluigi			if (p->lut[i].vaddr)
456257529Sluigi				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
457234228Sluigi		}
458234228Sluigi		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
459241719Sluigi#ifdef linux
460241719Sluigi		vfree(p->lut);
461241719Sluigi#else
462234228Sluigi		free(p->lut, M_NETMAP);
463241719Sluigi#endif
464234228Sluigi	}
465241719Sluigi	p->lut = NULL;
466257529Sluigi	p->objtotal = 0;
467257529Sluigi	p->memtotal = 0;
468257529Sluigi	p->numclusters = 0;
469257529Sluigi	p->objfree = 0;
470234228Sluigi}
471234228Sluigi
472234228Sluigi/*
473241719Sluigi * Free all resources related to an allocator.
474241719Sluigi */
475241719Sluigistatic void
476241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p)
477241719Sluigi{
478241719Sluigi	if (p == NULL)
479241719Sluigi		return;
480241719Sluigi	netmap_reset_obj_allocator(p);
481241719Sluigi}
482241719Sluigi
483241719Sluigi/*
484234228Sluigi * We receive a request for objtotal objects, of size objsize each.
485234228Sluigi * Internally we may round up both numbers, as we allocate objects
486234228Sluigi * in small clusters multiple of the page size.
487257529Sluigi * We need to keep track of objtotal and clustentries,
488234228Sluigi * as they are needed when freeing memory.
489234228Sluigi *
490234228Sluigi * XXX note -- userspace needs the buffers to be contiguous,
491234228Sluigi *	so we cannot afford gaps at the end of a cluster.
492234228Sluigi */
493241719Sluigi
494241719Sluigi
495241719Sluigi/* call with NMA_LOCK held */
496241719Sluigistatic int
497241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
498234228Sluigi{
499257529Sluigi	int i;
500234228Sluigi	u_int clustsize;	/* the cluster size, multiple of page size */
501234228Sluigi	u_int clustentries;	/* how many objects per entry */
502234228Sluigi
503257529Sluigi	/* we store the current request, so we can
504257529Sluigi	 * detect configuration changes later */
505257529Sluigi	p->r_objtotal = objtotal;
506257529Sluigi	p->r_objsize = objsize;
507257529Sluigi
508234228Sluigi#define MAX_CLUSTSIZE	(1<<17)
509260368Sluigi#define LINE_ROUND	NM_CACHE_ALIGN	// 64
510234228Sluigi	if (objsize >= MAX_CLUSTSIZE) {
511234228Sluigi		/* we could do it but there is no point */
512234228Sluigi		D("unsupported allocation for %d bytes", objsize);
513257529Sluigi		return EINVAL;
514234228Sluigi	}
515234228Sluigi	/* make sure objsize is a multiple of LINE_ROUND */
516234228Sluigi	i = (objsize & (LINE_ROUND - 1));
517234228Sluigi	if (i) {
518234228Sluigi		D("XXX aligning object by %d bytes", LINE_ROUND - i);
519234228Sluigi		objsize += LINE_ROUND - i;
520234228Sluigi	}
521241719Sluigi	if (objsize < p->objminsize || objsize > p->objmaxsize) {
522250184Sluigi		D("requested objsize %d out of range [%d, %d]",
523241719Sluigi			objsize, p->objminsize, p->objmaxsize);
524257529Sluigi		return EINVAL;
525241719Sluigi	}
526241719Sluigi	if (objtotal < p->nummin || objtotal > p->nummax) {
527250184Sluigi		D("requested objtotal %d out of range [%d, %d]",
528241719Sluigi			objtotal, p->nummin, p->nummax);
529257529Sluigi		return EINVAL;
530241719Sluigi	}
531234228Sluigi	/*
532234228Sluigi	 * Compute number of objects using a brute-force approach:
533234228Sluigi	 * given a max cluster size,
534234228Sluigi	 * we try to fill it with objects keeping track of the
535234228Sluigi	 * wasted space to the next page boundary.
536234228Sluigi	 */
537234228Sluigi	for (clustentries = 0, i = 1;; i++) {
538234228Sluigi		u_int delta, used = i * objsize;
539234228Sluigi		if (used > MAX_CLUSTSIZE)
540234228Sluigi			break;
541234228Sluigi		delta = used % PAGE_SIZE;
542234228Sluigi		if (delta == 0) { // exact solution
543234228Sluigi			clustentries = i;
544234228Sluigi			break;
545234228Sluigi		}
546234228Sluigi		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
547234228Sluigi			clustentries = i;
548234228Sluigi	}
549234228Sluigi	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
550234228Sluigi	/* compute clustsize and round to the next page */
551234228Sluigi	clustsize = clustentries * objsize;
552234228Sluigi	i =  (clustsize & (PAGE_SIZE - 1));
553234228Sluigi	if (i)
554234228Sluigi		clustsize += PAGE_SIZE - i;
555245835Sluigi	if (netmap_verbose)
556245835Sluigi		D("objsize %d clustsize %d objects %d",
557245835Sluigi			objsize, clustsize, clustentries);
558234228Sluigi
559234228Sluigi	/*
560234228Sluigi	 * The number of clusters is n = ceil(objtotal/clustentries)
561234228Sluigi	 * objtotal' = n * clustentries
562234228Sluigi	 */
563257529Sluigi	p->_clustentries = clustentries;
564234228Sluigi	p->_clustsize = clustsize;
565257529Sluigi	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
566257529Sluigi
567257529Sluigi	/* actual values (may be larger than requested) */
568234228Sluigi	p->_objsize = objsize;
569257529Sluigi	p->_objtotal = p->_numclusters * clustentries;
570234228Sluigi
571241719Sluigi	return 0;
572241719Sluigi}
573241719Sluigi
574241719Sluigi
575241719Sluigi/* call with NMA_LOCK held */
576241719Sluigistatic int
577241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p)
578241719Sluigi{
579257529Sluigi	int i; /* must be signed */
580257529Sluigi	size_t n;
581241719Sluigi
582257529Sluigi	/* optimistically assume we have enough memory */
583257529Sluigi	p->numclusters = p->_numclusters;
584257529Sluigi	p->objtotal = p->_objtotal;
585257529Sluigi
586241719Sluigi	n = sizeof(struct lut_entry) * p->objtotal;
587241719Sluigi#ifdef linux
588241719Sluigi	p->lut = vmalloc(n);
589241719Sluigi#else
590241750Semaste	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
591241719Sluigi#endif
592234228Sluigi	if (p->lut == NULL) {
593257529Sluigi		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
594234228Sluigi		goto clean;
595234228Sluigi	}
596234228Sluigi
597234228Sluigi	/* Allocate the bitmap */
598234228Sluigi	n = (p->objtotal + 31) / 32;
599241750Semaste	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
600234228Sluigi	if (p->bitmap == NULL) {
601257529Sluigi		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
602241719Sluigi		    p->name);
603234228Sluigi		goto clean;
604234228Sluigi	}
605241719Sluigi	p->bitmap_slots = n;
606234228Sluigi
607234228Sluigi	/*
608234228Sluigi	 * Allocate clusters, init pointers and bitmap
609234228Sluigi	 */
610257529Sluigi
611257529Sluigi	n = p->_clustsize;
612257529Sluigi	for (i = 0; i < (int)p->objtotal;) {
613257529Sluigi		int lim = i + p->_clustentries;
614234228Sluigi		char *clust;
615234228Sluigi
616257529Sluigi		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
617257529Sluigi		    (size_t)0, -1UL, PAGE_SIZE, 0);
618234228Sluigi		if (clust == NULL) {
619234228Sluigi			/*
620234228Sluigi			 * If we get here, there is a severe memory shortage,
621234228Sluigi			 * so halve the allocated memory to reclaim some.
622234228Sluigi			 */
623234228Sluigi			D("Unable to create cluster at %d for '%s' allocator",
624241719Sluigi			    i, p->name);
625257529Sluigi			if (i < 2) /* nothing to halve */
626257529Sluigi				goto out;
627234228Sluigi			lim = i / 2;
628241719Sluigi			for (i--; i >= lim; i--) {
629234228Sluigi				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
630257529Sluigi				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
631234228Sluigi					contigfree(p->lut[i].vaddr,
632257529Sluigi						n, M_NETMAP);
633234228Sluigi			}
634257529Sluigi		out:
635234228Sluigi			p->objtotal = i;
636257529Sluigi			/* we may have stopped in the middle of a cluster */
637257529Sluigi			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
638234228Sluigi			break;
639234228Sluigi		}
640241719Sluigi		for (; i < lim; i++, clust += p->_objsize) {
641234228Sluigi			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
642234228Sluigi			p->lut[i].vaddr = clust;
643234228Sluigi			p->lut[i].paddr = vtophys(clust);
644234228Sluigi		}
645234228Sluigi	}
646257529Sluigi	p->objfree = p->objtotal;
647257529Sluigi	p->memtotal = p->numclusters * p->_clustsize;
648257529Sluigi	if (p->objfree == 0)
649257529Sluigi		goto clean;
650245835Sluigi	if (netmap_verbose)
651245835Sluigi		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
652257529Sluigi		    p->numclusters, p->_clustsize >> 10,
653257529Sluigi		    p->memtotal >> 10, p->name);
654234228Sluigi
655241719Sluigi	return 0;
656234228Sluigi
657234228Sluigiclean:
658241719Sluigi	netmap_reset_obj_allocator(p);
659241719Sluigi	return ENOMEM;
660234228Sluigi}
661234228Sluigi
662241719Sluigi/* call with lock held */
663234228Sluigistatic int
664257529Sluiginetmap_memory_config_changed(struct netmap_mem_d *nmd)
665234228Sluigi{
666241719Sluigi	int i;
667234228Sluigi
668241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
669257529Sluigi		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
670257529Sluigi		    nmd->pools[i].r_objtotal != netmap_params[i].num)
671241719Sluigi		    return 1;
672241719Sluigi	}
673241719Sluigi	return 0;
674241719Sluigi}
675234228Sluigi
676257529Sluigistatic void
677257529Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd)
678257529Sluigi{
679257529Sluigi	int i;
680257529Sluigi	D("resetting %p", nmd);
681257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
682257529Sluigi		netmap_reset_obj_allocator(&nmd->pools[i]);
683257529Sluigi	}
684257529Sluigi	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
685257529Sluigi}
686234228Sluigi
687257529Sluigistatic int
688257529Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd)
689257529Sluigi{
690257529Sluigi	int i;
691257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED)
692257529Sluigi		return 0;
693257529Sluigi	nmd->lasterr = 0;
694257529Sluigi	nmd->nm_totalsize = 0;
695257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
696257529Sluigi		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
697257529Sluigi		if (nmd->lasterr)
698257529Sluigi			goto error;
699257529Sluigi		nmd->nm_totalsize += nmd->pools[i].memtotal;
700257529Sluigi	}
701257529Sluigi	/* buffers 0 and 1 are reserved */
702257529Sluigi	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
703257529Sluigi	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
704257529Sluigi	nmd->flags |= NETMAP_MEM_FINALIZED;
705257529Sluigi
706257529Sluigi	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
707257529Sluigi	    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
708257529Sluigi	    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
709257529Sluigi	    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
710257529Sluigi
711257529Sluigi	D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
712257529Sluigi
713257529Sluigi
714257529Sluigi	return 0;
715257529Sluigierror:
716257529Sluigi	netmap_mem_reset_all(nmd);
717257529Sluigi	return nmd->lasterr;
718257529Sluigi}
719257529Sluigi
720257529Sluigi
721257529Sluigi
722257529Sluigivoid
723257529Sluiginetmap_mem_private_delete(struct netmap_mem_d *nmd)
724257529Sluigi{
725257529Sluigi	if (nmd == NULL)
726257529Sluigi		return;
727257529Sluigi	D("deleting %p", nmd);
728257529Sluigi	if (nmd->refcount > 0)
729257529Sluigi		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
730257529Sluigi	D("done deleting %p", nmd);
731257529Sluigi	NMA_LOCK_DESTROY(nmd);
732257529Sluigi	free(nmd, M_DEVBUF);
733257529Sluigi}
734257529Sluigi
735257529Sluigistatic int
736257529Sluiginetmap_mem_private_config(struct netmap_mem_d *nmd)
737257529Sluigi{
738257529Sluigi	/* nothing to do, we are configured on creation
739257529Sluigi 	 * and configuration never changes thereafter
740257529Sluigi 	 */
741257529Sluigi	return 0;
742257529Sluigi}
743257529Sluigi
744257529Sluigistatic int
745257529Sluiginetmap_mem_private_finalize(struct netmap_mem_d *nmd)
746257529Sluigi{
747257529Sluigi	int err;
748257529Sluigi	NMA_LOCK(nmd);
749257529Sluigi	nmd->refcount++;
750257529Sluigi	err = netmap_mem_finalize_all(nmd);
751257529Sluigi	NMA_UNLOCK(nmd);
752257529Sluigi	return err;
753257529Sluigi
754257529Sluigi}
755257529Sluigi
756259412Sluigistatic void
757259412Sluiginetmap_mem_private_deref(struct netmap_mem_d *nmd)
758257529Sluigi{
759257529Sluigi	NMA_LOCK(nmd);
760257529Sluigi	if (--nmd->refcount <= 0)
761257529Sluigi		netmap_mem_reset_all(nmd);
762257529Sluigi	NMA_UNLOCK(nmd);
763257529Sluigi}
764257529Sluigi
765257529Sluigistruct netmap_mem_d *
766257529Sluiginetmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd)
767257529Sluigi{
768257529Sluigi	struct netmap_mem_d *d = NULL;
769257529Sluigi	struct netmap_obj_params p[NETMAP_POOLS_NR];
770257529Sluigi	int i;
771257529Sluigi	u_int maxd;
772257529Sluigi
773257529Sluigi	d = malloc(sizeof(struct netmap_mem_d),
774257529Sluigi			M_DEVBUF, M_NOWAIT | M_ZERO);
775257529Sluigi	if (d == NULL)
776257529Sluigi		return NULL;
777257529Sluigi
778257529Sluigi	*d = nm_blueprint;
779257529Sluigi
780257529Sluigi	/* XXX the rest of the code assumes the stack rings are alwasy present */
781257529Sluigi	txr++;
782257529Sluigi	rxr++;
783257529Sluigi	p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) +
784257529Sluigi		sizeof(ssize_t) * (txr + rxr);
785257529Sluigi	p[NETMAP_IF_POOL].num = 2;
786257529Sluigi	maxd = (txd > rxd) ? txd : rxd;
787257529Sluigi	p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) +
788257529Sluigi		sizeof(struct netmap_slot) * maxd;
789257529Sluigi	p[NETMAP_RING_POOL].num = txr + rxr;
790257529Sluigi	p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */
791257529Sluigi	p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2);
792257529Sluigi
793257529Sluigi	D("req if %d*%d ring %d*%d buf %d*%d",
794257529Sluigi			p[NETMAP_IF_POOL].num,
795257529Sluigi			p[NETMAP_IF_POOL].size,
796257529Sluigi			p[NETMAP_RING_POOL].num,
797257529Sluigi			p[NETMAP_RING_POOL].size,
798257529Sluigi			p[NETMAP_BUF_POOL].num,
799257529Sluigi			p[NETMAP_BUF_POOL].size);
800257529Sluigi
801257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
802257529Sluigi		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
803257529Sluigi				nm_blueprint.pools[i].name,
804257529Sluigi				name);
805257529Sluigi		if (netmap_config_obj_allocator(&d->pools[i],
806257529Sluigi				p[i].num, p[i].size))
807257529Sluigi			goto error;
808257529Sluigi	}
809257529Sluigi
810257529Sluigi	d->flags &= ~NETMAP_MEM_FINALIZED;
811257529Sluigi
812257529Sluigi	NMA_LOCK_INIT(d);
813257529Sluigi
814257529Sluigi	return d;
815257529Sluigierror:
816257529Sluigi	netmap_mem_private_delete(d);
817257529Sluigi	return NULL;
818257529Sluigi}
819257529Sluigi
820257529Sluigi
821241719Sluigi/* call with lock held */
822241719Sluigistatic int
823257529Sluiginetmap_mem_global_config(struct netmap_mem_d *nmd)
824241719Sluigi{
825241719Sluigi	int i;
826234228Sluigi
827257529Sluigi	if (nmd->refcount)
828257529Sluigi		/* already in use, we cannot change the configuration */
829241719Sluigi		goto out;
830234228Sluigi
831257529Sluigi	if (!netmap_memory_config_changed(nmd))
832257529Sluigi		goto out;
833257529Sluigi
834241719Sluigi	D("reconfiguring");
835241719Sluigi
836257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
837241719Sluigi		/* reset previous allocation */
838241719Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
839257529Sluigi			netmap_reset_obj_allocator(&nmd->pools[i]);
840250184Sluigi		}
841257529Sluigi		nmd->flags &= ~NETMAP_MEM_FINALIZED;
842259412Sluigi	}
843241719Sluigi
844241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
845257529Sluigi		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
846241719Sluigi				netmap_params[i].num, netmap_params[i].size);
847257529Sluigi		if (nmd->lasterr)
848241719Sluigi			goto out;
849241719Sluigi	}
850241719Sluigi
851241719Sluigiout:
852241719Sluigi
853257529Sluigi	return nmd->lasterr;
854241719Sluigi}
855241719Sluigi
856241719Sluigistatic int
857257529Sluiginetmap_mem_global_finalize(struct netmap_mem_d *nmd)
858241719Sluigi{
859257529Sluigi	int err;
860241719Sluigi
861257529Sluigi	NMA_LOCK(nmd);
862241719Sluigi
863257529Sluigi
864241719Sluigi	/* update configuration if changed */
865257529Sluigi	if (netmap_mem_global_config(nmd))
866241719Sluigi		goto out;
867241719Sluigi
868257529Sluigi	nmd->refcount++;
869257529Sluigi
870257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
871241719Sluigi		/* may happen if config is not changed */
872241719Sluigi		ND("nothing to do");
873241719Sluigi		goto out;
874241719Sluigi	}
875241719Sluigi
876257529Sluigi	if (netmap_mem_finalize_all(nmd))
877257529Sluigi		goto out;
878241719Sluigi
879241719Sluigi	/* backward compatibility */
880257529Sluigi	netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
881257529Sluigi	netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
882241719Sluigi
883257529Sluigi	netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
884257529Sluigi	netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
885241719Sluigi
886257529Sluigi	nmd->lasterr = 0;
887241719Sluigi
888241719Sluigiout:
889257529Sluigi	if (nmd->lasterr)
890257529Sluigi		nmd->refcount--;
891257529Sluigi	err = nmd->lasterr;
892241719Sluigi
893257529Sluigi	NMA_UNLOCK(nmd);
894241719Sluigi
895257529Sluigi	return err;
896241719Sluigi
897234228Sluigi}
898234228Sluigi
899257529Sluigiint
900257529Sluiginetmap_mem_init(void)
901241719Sluigi{
902257529Sluigi	NMA_LOCK_INIT(&nm_mem);
903241719Sluigi	return (0);
904241719Sluigi}
905234228Sluigi
906257529Sluigivoid
907257529Sluiginetmap_mem_fini(void)
908234228Sluigi{
909241719Sluigi	int i;
910241719Sluigi
911241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
912241719Sluigi	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
913241719Sluigi	}
914257529Sluigi	NMA_LOCK_DESTROY(&nm_mem);
915234228Sluigi}
916234228Sluigi
917241719Sluigistatic void
918241719Sluiginetmap_free_rings(struct netmap_adapter *na)
919241719Sluigi{
920257529Sluigi	u_int i;
921245835Sluigi	if (!na->tx_rings)
922245835Sluigi		return;
923241719Sluigi	for (i = 0; i < na->num_tx_rings + 1; i++) {
924257529Sluigi		if (na->tx_rings[i].ring) {
925257529Sluigi			netmap_ring_free(na->nm_mem, na->tx_rings[i].ring);
926257529Sluigi			na->tx_rings[i].ring = NULL;
927257529Sluigi		}
928241719Sluigi	}
929241719Sluigi	for (i = 0; i < na->num_rx_rings + 1; i++) {
930257529Sluigi		if (na->rx_rings[i].ring) {
931257529Sluigi			netmap_ring_free(na->nm_mem, na->rx_rings[i].ring);
932257529Sluigi			na->rx_rings[i].ring = NULL;
933257529Sluigi		}
934241719Sluigi	}
935241719Sluigi}
936234228Sluigi
937259412Sluigi/* call with NMA_LOCK held *
938257529Sluigi *
939259412Sluigi * Allocate netmap rings and buffers for this card
940259412Sluigi * The rings are contiguous, but have variable size.
941245835Sluigi */
942259412Sluigiint
943259412Sluiginetmap_mem_rings_create(struct netmap_adapter *na)
944234228Sluigi{
945234228Sluigi	struct netmap_ring *ring;
946259412Sluigi	u_int len, ndesc;
947234228Sluigi	struct netmap_kring *kring;
948234228Sluigi
949257529Sluigi	NMA_LOCK(na->nm_mem);
950257529Sluigi
951259412Sluigi	for (kring = na->tx_rings; kring != na->rx_rings; kring++) { /* Transmit rings */
952259412Sluigi		ndesc = kring->nkr_num_slots;
953234228Sluigi		len = sizeof(struct netmap_ring) +
954234228Sluigi			  ndesc * sizeof(struct netmap_slot);
955257529Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
956234228Sluigi		if (ring == NULL) {
957259412Sluigi			D("Cannot allocate tx_ring");
958234228Sluigi			goto cleanup;
959234228Sluigi		}
960234228Sluigi		ND("txring[%d] at %p ofs %d", i, ring);
961234228Sluigi		kring->ring = ring;
962259412Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
963260368Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
964257529Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
965257529Sluigi			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
966257529Sluigi			netmap_ring_offset(na->nm_mem, ring);
967234228Sluigi
968260368Sluigi		/* copy values from kring */
969260368Sluigi		ring->head = kring->rhead;
970260368Sluigi		ring->cur = kring->rcur;
971260368Sluigi		ring->tail = kring->rtail;
972257529Sluigi		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
973257529Sluigi			NETMAP_BDG_BUF_SIZE(na->nm_mem);
974259412Sluigi		ND("initializing slots for txring");
975259412Sluigi		if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
976259412Sluigi			D("Cannot allocate buffers for tx_ring");
977241719Sluigi			goto cleanup;
978241719Sluigi		}
979234228Sluigi	}
980234228Sluigi
981259412Sluigi	for ( ; kring != na->tailroom; kring++) { /* Receive rings */
982259412Sluigi		ndesc = kring->nkr_num_slots;
983234228Sluigi		len = sizeof(struct netmap_ring) +
984234228Sluigi			  ndesc * sizeof(struct netmap_slot);
985257529Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
986234228Sluigi		if (ring == NULL) {
987259412Sluigi			D("Cannot allocate rx_ring");
988234228Sluigi			goto cleanup;
989234228Sluigi		}
990259412Sluigi		ND("rxring at %p ofs %d", ring);
991234228Sluigi
992234228Sluigi		kring->ring = ring;
993259412Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
994260368Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
995257529Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
996257529Sluigi		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
997257529Sluigi			netmap_ring_offset(na->nm_mem, ring);
998234228Sluigi
999260368Sluigi		/* copy values from kring */
1000260368Sluigi		ring->head = kring->rhead;
1001260368Sluigi		ring->cur = kring->rcur;
1002260368Sluigi		ring->tail = kring->rtail;
1003257529Sluigi		*(int *)(uintptr_t)&ring->nr_buf_size =
1004257529Sluigi			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1005234228Sluigi		ND("initializing slots for rxring[%d]", i);
1006259412Sluigi		if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1007259412Sluigi			D("Cannot allocate buffers for rx_ring");
1008241719Sluigi			goto cleanup;
1009241719Sluigi		}
1010234228Sluigi	}
1011259412Sluigi
1012259412Sluigi	NMA_UNLOCK(na->nm_mem);
1013259412Sluigi
1014259412Sluigi	return 0;
1015259412Sluigi
1016259412Sluigicleanup:
1017259412Sluigi	netmap_free_rings(na);
1018259412Sluigi
1019259412Sluigi	NMA_UNLOCK(na->nm_mem);
1020259412Sluigi
1021259412Sluigi	return ENOMEM;
1022259412Sluigi}
1023259412Sluigi
1024259412Sluigivoid
1025259412Sluiginetmap_mem_rings_delete(struct netmap_adapter *na)
1026259412Sluigi{
1027259412Sluigi	/* last instance, release bufs and rings */
1028259412Sluigi	u_int i, lim;
1029259412Sluigi	struct netmap_kring *kring;
1030259412Sluigi	struct netmap_ring *ring;
1031259412Sluigi
1032259412Sluigi	NMA_LOCK(na->nm_mem);
1033259412Sluigi
1034259412Sluigi	for (kring = na->tx_rings; kring != na->tailroom; kring++) {
1035259412Sluigi		ring = kring->ring;
1036259412Sluigi		if (ring == NULL)
1037259412Sluigi			continue;
1038259412Sluigi		lim = kring->nkr_num_slots;
1039259412Sluigi		for (i = 0; i < lim; i++)
1040259412Sluigi			netmap_free_buf(na->nm_mem, ring->slot[i].buf_idx);
1041259412Sluigi	}
1042259412Sluigi	netmap_free_rings(na);
1043259412Sluigi
1044259412Sluigi	NMA_UNLOCK(na->nm_mem);
1045259412Sluigi}
1046259412Sluigi
1047259412Sluigi
1048259412Sluigi/* call with NMA_LOCK held */
1049259412Sluigi/*
1050259412Sluigi * Allocate the per-fd structure netmap_if.
1051259412Sluigi *
1052259412Sluigi * We assume that the configuration stored in na
1053259412Sluigi * (number of tx/rx rings and descs) does not change while
1054259412Sluigi * the interface is in netmap mode.
1055259412Sluigi */
1056259412Sluigistruct netmap_if *
1057259412Sluiginetmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
1058259412Sluigi{
1059259412Sluigi	struct netmap_if *nifp;
1060259412Sluigi	ssize_t base; /* handy for relative offsets between rings and nifp */
1061259412Sluigi	u_int i, len, ntx, nrx;
1062259412Sluigi
1063234228Sluigi	/*
1064259412Sluigi	 * verify whether virtual port need the stack ring
1065259412Sluigi	 */
1066259412Sluigi	ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
1067259412Sluigi	nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
1068259412Sluigi	/*
1069259412Sluigi	 * the descriptor is followed inline by an array of offsets
1070259412Sluigi	 * to the tx and rx rings in the shared memory region.
1071259412Sluigi	 * For virtual rx rings we also allocate an array of
1072259412Sluigi	 * pointers to assign to nkr_leases.
1073259412Sluigi	 */
1074259412Sluigi
1075259412Sluigi	NMA_LOCK(na->nm_mem);
1076259412Sluigi
1077259412Sluigi	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1078259412Sluigi	nifp = netmap_if_malloc(na->nm_mem, len);
1079259412Sluigi	if (nifp == NULL) {
1080259412Sluigi		NMA_UNLOCK(na->nm_mem);
1081259412Sluigi		return NULL;
1082259412Sluigi	}
1083259412Sluigi
1084259412Sluigi	/* initialize base fields -- override const */
1085259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1086259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1087259412Sluigi	strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
1088259412Sluigi
1089259412Sluigi	/*
1090234228Sluigi	 * fill the slots for the rx and tx rings. They contain the offset
1091234228Sluigi	 * between the ring and nifp, so the information is usable in
1092234228Sluigi	 * userspace to reach the ring from the nifp.
1093234228Sluigi	 */
1094257529Sluigi	base = netmap_if_offset(na->nm_mem, nifp);
1095234228Sluigi	for (i = 0; i < ntx; i++) {
1096234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1097257529Sluigi			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1098234228Sluigi	}
1099234228Sluigi	for (i = 0; i < nrx; i++) {
1100234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1101257529Sluigi			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1102234228Sluigi	}
1103257529Sluigi
1104257529Sluigi	NMA_UNLOCK(na->nm_mem);
1105257529Sluigi
1106234228Sluigi	return (nifp);
1107234228Sluigi}
1108234228Sluigi
1109257529Sluigivoid
1110257529Sluiginetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1111257529Sluigi{
1112257529Sluigi	if (nifp == NULL)
1113257529Sluigi		/* nothing to do */
1114257529Sluigi		return;
1115257529Sluigi	NMA_LOCK(na->nm_mem);
1116257529Sluigi
1117257529Sluigi	netmap_if_free(na->nm_mem, nifp);
1118257529Sluigi
1119257529Sluigi	NMA_UNLOCK(na->nm_mem);
1120257529Sluigi}
1121257529Sluigi
1122234228Sluigistatic void
1123257529Sluiginetmap_mem_global_deref(struct netmap_mem_d *nmd)
1124234228Sluigi{
1125257529Sluigi	NMA_LOCK(nmd);
1126257529Sluigi
1127257529Sluigi	nmd->refcount--;
1128245835Sluigi	if (netmap_verbose)
1129257529Sluigi		D("refcount = %d", nmd->refcount);
1130257529Sluigi
1131257529Sluigi	NMA_UNLOCK(nmd);
1132234228Sluigi}
1133257529Sluigi
1134259412Sluigiint
1135259412Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd)
1136257529Sluigi{
1137257529Sluigi	return nmd->finalize(nmd);
1138257529Sluigi}
1139257529Sluigi
1140259412Sluigivoid
1141259412Sluiginetmap_mem_deref(struct netmap_mem_d *nmd)
1142257529Sluigi{
1143257529Sluigi	return nmd->deref(nmd);
1144257529Sluigi}
1145