netmap_mem2.c revision 261909
1234228Sluigi/*
2260368Sluigi * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3234228Sluigi *
4234228Sluigi * Redistribution and use in source and binary forms, with or without
5234228Sluigi * modification, are permitted provided that the following conditions
6234228Sluigi * are met:
7234228Sluigi *   1. Redistributions of source code must retain the above copyright
8234228Sluigi *      notice, this list of conditions and the following disclaimer.
9234228Sluigi *   2. Redistributions in binary form must reproduce the above copyright
10234228Sluigi *      notice, this list of conditions and the following disclaimer in the
11259412Sluigi *      documentation and/or other materials provided with the distribution.
12234228Sluigi *
13234228Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14234228Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15234228Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16234228Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17234228Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18234228Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19234228Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20234228Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21234228Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22234228Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23234228Sluigi * SUCH DAMAGE.
24234228Sluigi */
25234228Sluigi
26257529Sluigi#ifdef linux
27257529Sluigi#include "bsd_glue.h"
28257529Sluigi#endif /* linux */
29234228Sluigi
30257529Sluigi#ifdef __APPLE__
31257529Sluigi#include "osx_glue.h"
32257529Sluigi#endif /* __APPLE__ */
33234228Sluigi
34257529Sluigi#ifdef __FreeBSD__
35257529Sluigi#include <sys/cdefs.h> /* prerequisite */
36257529Sluigi__FBSDID("$FreeBSD: head/sys/dev/netmap/netmap_mem2.c 261909 2014-02-15 04:53:04Z luigi $");
37234228Sluigi
38257529Sluigi#include <sys/types.h>
39257529Sluigi#include <sys/malloc.h>
40257529Sluigi#include <sys/proc.h>
41257529Sluigi#include <vm/vm.h>	/* vtophys */
42257529Sluigi#include <vm/pmap.h>	/* vtophys */
43257529Sluigi#include <sys/socket.h> /* sockaddrs */
44257529Sluigi#include <sys/selinfo.h>
45257529Sluigi#include <sys/sysctl.h>
46257529Sluigi#include <net/if.h>
47257529Sluigi#include <net/if_var.h>
48257529Sluigi#include <net/vnet.h>
49257529Sluigi#include <machine/bus.h>	/* bus_dmamap_* */
50257529Sluigi
51257529Sluigi#endif /* __FreeBSD__ */
52257529Sluigi
53257529Sluigi#include <net/netmap.h>
54257529Sluigi#include <dev/netmap/netmap_kern.h>
55257529Sluigi#include "netmap_mem2.h"
56257529Sluigi
57241719Sluigi#ifdef linux
58257529Sluigi#define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
59257529Sluigi#define NMA_LOCK_DESTROY(n)
60257529Sluigi#define NMA_LOCK(n)		down(&(n)->nm_mtx)
61257529Sluigi#define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
62241719Sluigi#else /* !linux */
63257529Sluigi#define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
64257529Sluigi#define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
65257529Sluigi#define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
66257529Sluigi#define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
67241719Sluigi#endif /* linux */
68234228Sluigi
69241719Sluigi
70241719Sluigistruct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
71241719Sluigi	[NETMAP_IF_POOL] = {
72241719Sluigi		.size = 1024,
73241719Sluigi		.num  = 100,
74241719Sluigi	},
75241719Sluigi	[NETMAP_RING_POOL] = {
76241719Sluigi		.size = 9*PAGE_SIZE,
77241719Sluigi		.num  = 200,
78241719Sluigi	},
79241719Sluigi	[NETMAP_BUF_POOL] = {
80241719Sluigi		.size = 2048,
81241719Sluigi		.num  = NETMAP_BUF_MAX_NUM,
82241719Sluigi	},
83241719Sluigi};
84241719Sluigi
85261909Sluigistruct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
86261909Sluigi	[NETMAP_IF_POOL] = {
87261909Sluigi		.size = 1024,
88261909Sluigi		.num  = 1,
89261909Sluigi	},
90261909Sluigi	[NETMAP_RING_POOL] = {
91261909Sluigi		.size = 5*PAGE_SIZE,
92261909Sluigi		.num  = 4,
93261909Sluigi	},
94261909Sluigi	[NETMAP_BUF_POOL] = {
95261909Sluigi		.size = 2048,
96261909Sluigi		.num  = 4098,
97261909Sluigi	},
98261909Sluigi};
99241719Sluigi
100261909Sluigi
101249659Sluigi/*
102249659Sluigi * nm_mem is the memory allocator used for all physical interfaces
103249659Sluigi * running in netmap mode.
104249659Sluigi * Virtual (VALE) ports will have each its own allocator.
105249659Sluigi */
106257529Sluigistatic int netmap_mem_global_config(struct netmap_mem_d *nmd);
107257529Sluigistatic int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
108257529Sluigistatic void netmap_mem_global_deref(struct netmap_mem_d *nmd);
109257529Sluigistruct netmap_mem_d nm_mem = {	/* Our memory allocator. */
110241719Sluigi	.pools = {
111241719Sluigi		[NETMAP_IF_POOL] = {
112241719Sluigi			.name 	= "netmap_if",
113241719Sluigi			.objminsize = sizeof(struct netmap_if),
114241719Sluigi			.objmaxsize = 4096,
115241719Sluigi			.nummin     = 10,	/* don't be stingy */
116241719Sluigi			.nummax	    = 10000,	/* XXX very large */
117241719Sluigi		},
118241719Sluigi		[NETMAP_RING_POOL] = {
119241719Sluigi			.name 	= "netmap_ring",
120241719Sluigi			.objminsize = sizeof(struct netmap_ring),
121241719Sluigi			.objmaxsize = 32*PAGE_SIZE,
122241719Sluigi			.nummin     = 2,
123241719Sluigi			.nummax	    = 1024,
124241719Sluigi		},
125241719Sluigi		[NETMAP_BUF_POOL] = {
126241719Sluigi			.name	= "netmap_buf",
127241719Sluigi			.objminsize = 64,
128241719Sluigi			.objmaxsize = 65536,
129241719Sluigi			.nummin     = 4,
130241719Sluigi			.nummax	    = 1000000, /* one million! */
131241719Sluigi		},
132241719Sluigi	},
133257529Sluigi	.config   = netmap_mem_global_config,
134257529Sluigi	.finalize = netmap_mem_global_finalize,
135257529Sluigi	.deref    = netmap_mem_global_deref,
136261909Sluigi
137261909Sluigi	.nm_id = 1,
138261909Sluigi
139261909Sluigi	.prev = &nm_mem,
140261909Sluigi	.next = &nm_mem,
141241719Sluigi};
142241719Sluigi
143257529Sluigi
144261909Sluigistruct netmap_mem_d *netmap_last_mem_d = &nm_mem;
145261909Sluigi
146249659Sluigi// XXX logically belongs to nm_mem
147234228Sluigistruct lut_entry *netmap_buffer_lut;	/* exported */
148234228Sluigi
149257529Sluigi/* blueprint for the private memory allocators */
150257529Sluigistatic int netmap_mem_private_config(struct netmap_mem_d *nmd);
151257529Sluigistatic int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
152257529Sluigistatic void netmap_mem_private_deref(struct netmap_mem_d *nmd);
153257529Sluigiconst struct netmap_mem_d nm_blueprint = {
154257529Sluigi	.pools = {
155257529Sluigi		[NETMAP_IF_POOL] = {
156257529Sluigi			.name 	= "%s_if",
157257529Sluigi			.objminsize = sizeof(struct netmap_if),
158257529Sluigi			.objmaxsize = 4096,
159257529Sluigi			.nummin     = 1,
160261909Sluigi			.nummax	    = 100,
161257529Sluigi		},
162257529Sluigi		[NETMAP_RING_POOL] = {
163257529Sluigi			.name 	= "%s_ring",
164257529Sluigi			.objminsize = sizeof(struct netmap_ring),
165257529Sluigi			.objmaxsize = 32*PAGE_SIZE,
166257529Sluigi			.nummin     = 2,
167257529Sluigi			.nummax	    = 1024,
168257529Sluigi		},
169257529Sluigi		[NETMAP_BUF_POOL] = {
170257529Sluigi			.name	= "%s_buf",
171257529Sluigi			.objminsize = 64,
172257529Sluigi			.objmaxsize = 65536,
173257529Sluigi			.nummin     = 4,
174257529Sluigi			.nummax	    = 1000000, /* one million! */
175257529Sluigi		},
176257529Sluigi	},
177257529Sluigi	.config   = netmap_mem_private_config,
178257529Sluigi	.finalize = netmap_mem_private_finalize,
179257529Sluigi	.deref    = netmap_mem_private_deref,
180257529Sluigi
181257529Sluigi	.flags = NETMAP_MEM_PRIVATE,
182257529Sluigi};
183257529Sluigi
184241719Sluigi/* memory allocator related sysctls */
185234228Sluigi
186241719Sluigi#define STRINGIFY(x) #x
187241719Sluigi
188257529Sluigi
189241719Sluigi#define DECLARE_SYSCTLS(id, name) \
190241719Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
191241719Sluigi	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
192259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
193259412Sluigi	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
194259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
195259412Sluigi	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
196259412Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
197261909Sluigi	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
198261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
199261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
200261909Sluigi	    "Default size of private netmap " STRINGIFY(name) "s"); \
201261909Sluigi	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
202261909Sluigi	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
203261909Sluigi	    "Default number of private netmap " STRINGIFY(name) "s")
204241719Sluigi
205257529SluigiSYSCTL_DECL(_dev_netmap);
206241719SluigiDECLARE_SYSCTLS(NETMAP_IF_POOL, if);
207241719SluigiDECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
208241719SluigiDECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
209241719Sluigi
210261909Sluigistatic int
211261909Sluiginm_mem_assign_id(struct netmap_mem_d *nmd)
212261909Sluigi{
213261909Sluigi	nm_memid_t id;
214261909Sluigi	struct netmap_mem_d *scan = netmap_last_mem_d;
215261909Sluigi	int error = ENOMEM;
216261909Sluigi
217261909Sluigi	NMA_LOCK(&nm_mem);
218261909Sluigi
219261909Sluigi	do {
220261909Sluigi		/* we rely on unsigned wrap around */
221261909Sluigi		id = scan->nm_id + 1;
222261909Sluigi		if (id == 0) /* reserve 0 as error value */
223261909Sluigi			id = 1;
224261909Sluigi		scan = scan->next;
225261909Sluigi		if (id != scan->nm_id) {
226261909Sluigi			nmd->nm_id = id;
227261909Sluigi			nmd->prev = scan->prev;
228261909Sluigi			nmd->next = scan;
229261909Sluigi			scan->prev->next = nmd;
230261909Sluigi			scan->prev = nmd;
231261909Sluigi			netmap_last_mem_d = nmd;
232261909Sluigi			error = 0;
233261909Sluigi			break;
234261909Sluigi		}
235261909Sluigi	} while (scan != netmap_last_mem_d);
236261909Sluigi
237261909Sluigi	NMA_UNLOCK(&nm_mem);
238261909Sluigi	return error;
239261909Sluigi}
240261909Sluigi
241261909Sluigistatic void
242261909Sluiginm_mem_release_id(struct netmap_mem_d *nmd)
243261909Sluigi{
244261909Sluigi	NMA_LOCK(&nm_mem);
245261909Sluigi
246261909Sluigi	nmd->prev->next = nmd->next;
247261909Sluigi	nmd->next->prev = nmd->prev;
248261909Sluigi
249261909Sluigi	if (netmap_last_mem_d == nmd)
250261909Sluigi		netmap_last_mem_d = nmd->prev;
251261909Sluigi
252261909Sluigi	nmd->prev = nmd->next = NULL;
253261909Sluigi
254261909Sluigi	NMA_UNLOCK(&nm_mem);
255261909Sluigi}
256261909Sluigi
257261909Sluigi
258234228Sluigi/*
259249659Sluigi * First, find the allocator that contains the requested offset,
260249659Sluigi * then locate the cluster through a lookup table.
261234228Sluigi */
262257529Sluigivm_paddr_t
263257529Sluiginetmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
264234228Sluigi{
265234228Sluigi	int i;
266257529Sluigi	vm_ooffset_t o = offset;
267257529Sluigi	vm_paddr_t pa;
268257529Sluigi	struct netmap_obj_pool *p;
269234228Sluigi
270257529Sluigi	NMA_LOCK(nmd);
271257529Sluigi	p = nmd->pools;
272257529Sluigi
273257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
274257529Sluigi		if (offset >= p[i].memtotal)
275234228Sluigi			continue;
276249659Sluigi		// now lookup the cluster's address
277257529Sluigi		pa = p[i].lut[offset / p[i]._objsize].paddr +
278241719Sluigi			offset % p[i]._objsize;
279257529Sluigi		NMA_UNLOCK(nmd);
280257529Sluigi		return pa;
281234228Sluigi	}
282241719Sluigi	/* this is only in case of errors */
283234290Sluigi	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
284257529Sluigi		p[NETMAP_IF_POOL].memtotal,
285257529Sluigi		p[NETMAP_IF_POOL].memtotal
286257529Sluigi			+ p[NETMAP_RING_POOL].memtotal,
287257529Sluigi		p[NETMAP_IF_POOL].memtotal
288257529Sluigi			+ p[NETMAP_RING_POOL].memtotal
289257529Sluigi			+ p[NETMAP_BUF_POOL].memtotal);
290257529Sluigi	NMA_UNLOCK(nmd);
291234228Sluigi	return 0;	// XXX bad address
292234228Sluigi}
293234228Sluigi
294257529Sluigiint
295261909Sluiginetmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
296261909Sluigi	nm_memid_t *id)
297257529Sluigi{
298257529Sluigi	int error = 0;
299257529Sluigi	NMA_LOCK(nmd);
300257529Sluigi	error = nmd->config(nmd);
301257529Sluigi	if (error)
302257529Sluigi		goto out;
303257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
304257529Sluigi		*size = nmd->nm_totalsize;
305257529Sluigi	} else {
306257529Sluigi		int i;
307257529Sluigi		*size = 0;
308257529Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
309257529Sluigi			struct netmap_obj_pool *p = nmd->pools + i;
310257529Sluigi			*size += (p->_numclusters * p->_clustsize);
311257529Sluigi		}
312257529Sluigi	}
313257529Sluigi	*memflags = nmd->flags;
314261909Sluigi	*id = nmd->nm_id;
315257529Sluigiout:
316257529Sluigi	NMA_UNLOCK(nmd);
317257529Sluigi	return error;
318257529Sluigi}
319257529Sluigi
320234228Sluigi/*
321234228Sluigi * we store objects by kernel address, need to find the offset
322234228Sluigi * within the pool to export the value to userspace.
323234228Sluigi * Algorithm: scan until we find the cluster, then add the
324234228Sluigi * actual offset in the cluster
325234228Sluigi */
326234242Sluigistatic ssize_t
327234228Sluiginetmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
328234228Sluigi{
329257529Sluigi	int i, k = p->_clustentries, n = p->objtotal;
330234228Sluigi	ssize_t ofs = 0;
331234228Sluigi
332234228Sluigi	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
333234228Sluigi		const char *base = p->lut[i].vaddr;
334234228Sluigi		ssize_t relofs = (const char *) vaddr - base;
335234228Sluigi
336249504Sluigi		if (relofs < 0 || relofs >= p->_clustsize)
337234228Sluigi			continue;
338234228Sluigi
339234228Sluigi		ofs = ofs + relofs;
340234228Sluigi		ND("%s: return offset %d (cluster %d) for pointer %p",
341234228Sluigi		    p->name, ofs, i, vaddr);
342234228Sluigi		return ofs;
343234228Sluigi	}
344234228Sluigi	D("address %p is not contained inside any cluster (%s)",
345234228Sluigi	    vaddr, p->name);
346234228Sluigi	return 0; /* An error occurred */
347234228Sluigi}
348234228Sluigi
349234228Sluigi/* Helper functions which convert virtual addresses to offsets */
350257529Sluigi#define netmap_if_offset(n, v)					\
351257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
352234228Sluigi
353257529Sluigi#define netmap_ring_offset(n, v)				\
354257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
355257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
356234228Sluigi
357257529Sluigi#define netmap_buf_offset(n, v)					\
358257529Sluigi    ((n)->pools[NETMAP_IF_POOL].memtotal +			\
359257529Sluigi	(n)->pools[NETMAP_RING_POOL].memtotal +		\
360257529Sluigi	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
361234228Sluigi
362234228Sluigi
363257529Sluigissize_t
364257529Sluiginetmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
365257529Sluigi{
366257529Sluigi	ssize_t v;
367257529Sluigi	NMA_LOCK(nmd);
368257529Sluigi	v = netmap_if_offset(nmd, addr);
369257529Sluigi	NMA_UNLOCK(nmd);
370257529Sluigi	return v;
371257529Sluigi}
372257529Sluigi
373241719Sluigi/*
374241719Sluigi * report the index, and use start position as a hint,
375241719Sluigi * otherwise buffer allocation becomes terribly expensive.
376241719Sluigi */
377234228Sluigistatic void *
378257529Sluiginetmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
379234228Sluigi{
380234228Sluigi	uint32_t i = 0;			/* index in the bitmap */
381234228Sluigi	uint32_t mask, j;		/* slot counter */
382234228Sluigi	void *vaddr = NULL;
383234228Sluigi
384234228Sluigi	if (len > p->_objsize) {
385234228Sluigi		D("%s request size %d too large", p->name, len);
386234228Sluigi		// XXX cannot reduce the size
387234228Sluigi		return NULL;
388234228Sluigi	}
389234228Sluigi
390234228Sluigi	if (p->objfree == 0) {
391259412Sluigi		D("no more %s objects", p->name);
392234228Sluigi		return NULL;
393234228Sluigi	}
394241719Sluigi	if (start)
395241719Sluigi		i = *start;
396234228Sluigi
397241719Sluigi	/* termination is guaranteed by p->free, but better check bounds on i */
398241719Sluigi	while (vaddr == NULL && i < p->bitmap_slots)  {
399234228Sluigi		uint32_t cur = p->bitmap[i];
400234228Sluigi		if (cur == 0) { /* bitmask is fully used */
401234228Sluigi			i++;
402234228Sluigi			continue;
403234228Sluigi		}
404234228Sluigi		/* locate a slot */
405234228Sluigi		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
406234228Sluigi			;
407234228Sluigi
408234228Sluigi		p->bitmap[i] &= ~mask; /* mark object as in use */
409234228Sluigi		p->objfree--;
410234228Sluigi
411234228Sluigi		vaddr = p->lut[i * 32 + j].vaddr;
412241719Sluigi		if (index)
413241719Sluigi			*index = i * 32 + j;
414234228Sluigi	}
415234228Sluigi	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
416234228Sluigi
417241719Sluigi	if (start)
418241719Sluigi		*start = i;
419234228Sluigi	return vaddr;
420234228Sluigi}
421234228Sluigi
422234228Sluigi
423234228Sluigi/*
424261909Sluigi * free by index, not by address.
425261909Sluigi * XXX should we also cleanup the content ?
426234228Sluigi */
427261909Sluigistatic int
428234228Sluiginetmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
429234228Sluigi{
430261909Sluigi	uint32_t *ptr, mask;
431261909Sluigi
432234228Sluigi	if (j >= p->objtotal) {
433234228Sluigi		D("invalid index %u, max %u", j, p->objtotal);
434261909Sluigi		return 1;
435234228Sluigi	}
436261909Sluigi	ptr = &p->bitmap[j / 32];
437261909Sluigi	mask = (1 << (j % 32));
438261909Sluigi	if (*ptr & mask) {
439261909Sluigi		D("ouch, double free on buffer %d", j);
440261909Sluigi		return 1;
441261909Sluigi	} else {
442261909Sluigi		*ptr |= mask;
443261909Sluigi		p->objfree++;
444261909Sluigi		return 0;
445261909Sluigi	}
446234228Sluigi}
447234228Sluigi
448261909Sluigi/*
449261909Sluigi * free by address. This is slow but is only used for a few
450261909Sluigi * objects (rings, nifp)
451261909Sluigi */
452234228Sluigistatic void
453234228Sluiginetmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
454234228Sluigi{
455257529Sluigi	u_int i, j, n = p->numclusters;
456234228Sluigi
457257529Sluigi	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
458257529Sluigi		void *base = p->lut[i * p->_clustentries].vaddr;
459234228Sluigi		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
460234228Sluigi
461234228Sluigi		/* Given address, is out of the scope of the current cluster.*/
462250441Sluigi		if (vaddr < base || relofs >= p->_clustsize)
463234228Sluigi			continue;
464234228Sluigi
465234228Sluigi		j = j + relofs / p->_objsize;
466257529Sluigi		/* KASSERT(j != 0, ("Cannot free object 0")); */
467234228Sluigi		netmap_obj_free(p, j);
468234228Sluigi		return;
469234228Sluigi	}
470245835Sluigi	D("address %p is not contained inside any cluster (%s)",
471234228Sluigi	    vaddr, p->name);
472234228Sluigi}
473234228Sluigi
474257529Sluigi#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
475257529Sluigi#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
476257529Sluigi#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
477257529Sluigi#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
478257529Sluigi#define netmap_buf_malloc(n, _pos, _index)			\
479257529Sluigi	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
480234228Sluigi
481234228Sluigi
482261909Sluigi#if 0 // XXX unused
483234228Sluigi/* Return the index associated to the given packet buffer */
484257529Sluigi#define netmap_buf_index(n, v)						\
485257529Sluigi    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
486261909Sluigi#endif
487234228Sluigi
488261909Sluigi/*
489261909Sluigi * allocate extra buffers in a linked list.
490261909Sluigi * returns the actual number.
491261909Sluigi */
492261909Sluigiuint32_t
493261909Sluiginetmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
494261909Sluigi{
495261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
496261909Sluigi	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
497234228Sluigi
498261909Sluigi	NMA_LOCK(nmd);
499261909Sluigi
500261909Sluigi	*head = 0;	/* default, 'null' index ie empty list */
501261909Sluigi	for (i = 0 ; i < n; i++) {
502261909Sluigi		uint32_t cur = *head;	/* save current head */
503261909Sluigi		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
504261909Sluigi		if (p == NULL) {
505261909Sluigi			D("no more buffers after %d of %d", i, n);
506261909Sluigi			*head = cur; /* restore */
507261909Sluigi			break;
508261909Sluigi		}
509261909Sluigi		RD(5, "allocate buffer %d -> %d", *head, cur);
510261909Sluigi		*p = cur; /* link to previous head */
511261909Sluigi	}
512261909Sluigi
513261909Sluigi	NMA_UNLOCK(nmd);
514261909Sluigi
515261909Sluigi	return i;
516261909Sluigi}
517261909Sluigi
518261909Sluigistatic void
519261909Sluiginetmap_extra_free(struct netmap_adapter *na, uint32_t head)
520261909Sluigi{
521261909Sluigi        struct lut_entry *lut = na->na_lut;
522261909Sluigi	struct netmap_mem_d *nmd = na->nm_mem;
523261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
524261909Sluigi	uint32_t i, cur, *buf;
525261909Sluigi
526261909Sluigi	D("freeing the extra list");
527261909Sluigi	for (i = 0; head >=2 && head < p->objtotal; i++) {
528261909Sluigi		cur = head;
529261909Sluigi		buf = lut[head].vaddr;
530261909Sluigi		head = *buf;
531261909Sluigi		*buf = 0;
532261909Sluigi		if (netmap_obj_free(p, cur))
533261909Sluigi			break;
534261909Sluigi	}
535261909Sluigi	if (head != 0)
536261909Sluigi		D("breaking with head %d", head);
537261909Sluigi	D("freed %d buffers", i);
538261909Sluigi}
539261909Sluigi
540261909Sluigi
541241719Sluigi/* Return nonzero on error */
542241719Sluigistatic int
543259412Sluiginetmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
544234228Sluigi{
545257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
546257529Sluigi	u_int i = 0;	/* slot counter */
547241719Sluigi	uint32_t pos = 0;	/* slot in p->bitmap */
548241719Sluigi	uint32_t index = 0;	/* buffer index */
549234228Sluigi
550234228Sluigi	for (i = 0; i < n; i++) {
551257529Sluigi		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
552234228Sluigi		if (vaddr == NULL) {
553259412Sluigi			D("no more buffers after %d of %d", i, n);
554234228Sluigi			goto cleanup;
555234228Sluigi		}
556241719Sluigi		slot[i].buf_idx = index;
557234228Sluigi		slot[i].len = p->_objsize;
558259412Sluigi		slot[i].flags = 0;
559234228Sluigi	}
560234228Sluigi
561241719Sluigi	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
562241719Sluigi	return (0);
563234228Sluigi
564234228Sluigicleanup:
565241643Semaste	while (i > 0) {
566241643Semaste		i--;
567241719Sluigi		netmap_obj_free(p, slot[i].buf_idx);
568234228Sluigi	}
569241719Sluigi	bzero(slot, n * sizeof(slot[0]));
570241719Sluigi	return (ENOMEM);
571234228Sluigi}
572234228Sluigi
573261909Sluigistatic void
574261909Sluiginetmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
575261909Sluigi{
576261909Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
577261909Sluigi	u_int i;
578234228Sluigi
579261909Sluigi	for (i = 0; i < n; i++) {
580261909Sluigi		slot[i].buf_idx = index;
581261909Sluigi		slot[i].len = p->_objsize;
582261909Sluigi		slot[i].flags = 0;
583261909Sluigi	}
584261909Sluigi}
585261909Sluigi
586261909Sluigi
587234228Sluigistatic void
588259412Sluiginetmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
589234228Sluigi{
590257529Sluigi	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
591241719Sluigi
592234228Sluigi	if (i < 2 || i >= p->objtotal) {
593234228Sluigi		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
594234228Sluigi		return;
595234228Sluigi	}
596241719Sluigi	netmap_obj_free(p, i);
597234228Sluigi}
598234228Sluigi
599261909Sluigi
600234228Sluigistatic void
601261909Sluiginetmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
602261909Sluigi{
603261909Sluigi	u_int i;
604261909Sluigi
605261909Sluigi	for (i = 0; i < n; i++) {
606261909Sluigi		if (slot[i].buf_idx > 2)
607261909Sluigi			netmap_free_buf(nmd, slot[i].buf_idx);
608261909Sluigi	}
609261909Sluigi}
610261909Sluigi
611261909Sluigistatic void
612241719Sluiginetmap_reset_obj_allocator(struct netmap_obj_pool *p)
613234228Sluigi{
614257529Sluigi
615234228Sluigi	if (p == NULL)
616234228Sluigi		return;
617234228Sluigi	if (p->bitmap)
618234228Sluigi		free(p->bitmap, M_NETMAP);
619241719Sluigi	p->bitmap = NULL;
620234228Sluigi	if (p->lut) {
621257529Sluigi		u_int i;
622257529Sluigi		size_t sz = p->_clustsize;
623257529Sluigi
624257529Sluigi		for (i = 0; i < p->objtotal; i += p->_clustentries) {
625234228Sluigi			if (p->lut[i].vaddr)
626257529Sluigi				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
627234228Sluigi		}
628234228Sluigi		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
629241719Sluigi#ifdef linux
630241719Sluigi		vfree(p->lut);
631241719Sluigi#else
632234228Sluigi		free(p->lut, M_NETMAP);
633241719Sluigi#endif
634234228Sluigi	}
635241719Sluigi	p->lut = NULL;
636257529Sluigi	p->objtotal = 0;
637257529Sluigi	p->memtotal = 0;
638257529Sluigi	p->numclusters = 0;
639257529Sluigi	p->objfree = 0;
640234228Sluigi}
641234228Sluigi
642234228Sluigi/*
643241719Sluigi * Free all resources related to an allocator.
644241719Sluigi */
645241719Sluigistatic void
646241719Sluiginetmap_destroy_obj_allocator(struct netmap_obj_pool *p)
647241719Sluigi{
648241719Sluigi	if (p == NULL)
649241719Sluigi		return;
650241719Sluigi	netmap_reset_obj_allocator(p);
651241719Sluigi}
652241719Sluigi
653241719Sluigi/*
654234228Sluigi * We receive a request for objtotal objects, of size objsize each.
655234228Sluigi * Internally we may round up both numbers, as we allocate objects
656234228Sluigi * in small clusters multiple of the page size.
657257529Sluigi * We need to keep track of objtotal and clustentries,
658234228Sluigi * as they are needed when freeing memory.
659234228Sluigi *
660234228Sluigi * XXX note -- userspace needs the buffers to be contiguous,
661234228Sluigi *	so we cannot afford gaps at the end of a cluster.
662234228Sluigi */
663241719Sluigi
664241719Sluigi
665241719Sluigi/* call with NMA_LOCK held */
666241719Sluigistatic int
667241719Sluiginetmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
668234228Sluigi{
669257529Sluigi	int i;
670234228Sluigi	u_int clustsize;	/* the cluster size, multiple of page size */
671234228Sluigi	u_int clustentries;	/* how many objects per entry */
672234228Sluigi
673257529Sluigi	/* we store the current request, so we can
674257529Sluigi	 * detect configuration changes later */
675257529Sluigi	p->r_objtotal = objtotal;
676257529Sluigi	p->r_objsize = objsize;
677257529Sluigi
678234228Sluigi#define MAX_CLUSTSIZE	(1<<17)
679260368Sluigi#define LINE_ROUND	NM_CACHE_ALIGN	// 64
680234228Sluigi	if (objsize >= MAX_CLUSTSIZE) {
681234228Sluigi		/* we could do it but there is no point */
682234228Sluigi		D("unsupported allocation for %d bytes", objsize);
683257529Sluigi		return EINVAL;
684234228Sluigi	}
685234228Sluigi	/* make sure objsize is a multiple of LINE_ROUND */
686234228Sluigi	i = (objsize & (LINE_ROUND - 1));
687234228Sluigi	if (i) {
688234228Sluigi		D("XXX aligning object by %d bytes", LINE_ROUND - i);
689234228Sluigi		objsize += LINE_ROUND - i;
690234228Sluigi	}
691241719Sluigi	if (objsize < p->objminsize || objsize > p->objmaxsize) {
692250184Sluigi		D("requested objsize %d out of range [%d, %d]",
693241719Sluigi			objsize, p->objminsize, p->objmaxsize);
694257529Sluigi		return EINVAL;
695241719Sluigi	}
696241719Sluigi	if (objtotal < p->nummin || objtotal > p->nummax) {
697250184Sluigi		D("requested objtotal %d out of range [%d, %d]",
698241719Sluigi			objtotal, p->nummin, p->nummax);
699257529Sluigi		return EINVAL;
700241719Sluigi	}
701234228Sluigi	/*
702234228Sluigi	 * Compute number of objects using a brute-force approach:
703234228Sluigi	 * given a max cluster size,
704234228Sluigi	 * we try to fill it with objects keeping track of the
705234228Sluigi	 * wasted space to the next page boundary.
706234228Sluigi	 */
707234228Sluigi	for (clustentries = 0, i = 1;; i++) {
708234228Sluigi		u_int delta, used = i * objsize;
709234228Sluigi		if (used > MAX_CLUSTSIZE)
710234228Sluigi			break;
711234228Sluigi		delta = used % PAGE_SIZE;
712234228Sluigi		if (delta == 0) { // exact solution
713234228Sluigi			clustentries = i;
714234228Sluigi			break;
715234228Sluigi		}
716234228Sluigi		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
717234228Sluigi			clustentries = i;
718234228Sluigi	}
719234228Sluigi	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
720234228Sluigi	/* compute clustsize and round to the next page */
721234228Sluigi	clustsize = clustentries * objsize;
722234228Sluigi	i =  (clustsize & (PAGE_SIZE - 1));
723234228Sluigi	if (i)
724234228Sluigi		clustsize += PAGE_SIZE - i;
725245835Sluigi	if (netmap_verbose)
726245835Sluigi		D("objsize %d clustsize %d objects %d",
727245835Sluigi			objsize, clustsize, clustentries);
728234228Sluigi
729234228Sluigi	/*
730234228Sluigi	 * The number of clusters is n = ceil(objtotal/clustentries)
731234228Sluigi	 * objtotal' = n * clustentries
732234228Sluigi	 */
733257529Sluigi	p->_clustentries = clustentries;
734234228Sluigi	p->_clustsize = clustsize;
735257529Sluigi	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
736257529Sluigi
737257529Sluigi	/* actual values (may be larger than requested) */
738234228Sluigi	p->_objsize = objsize;
739257529Sluigi	p->_objtotal = p->_numclusters * clustentries;
740234228Sluigi
741241719Sluigi	return 0;
742241719Sluigi}
743241719Sluigi
744241719Sluigi
745241719Sluigi/* call with NMA_LOCK held */
746241719Sluigistatic int
747241719Sluiginetmap_finalize_obj_allocator(struct netmap_obj_pool *p)
748241719Sluigi{
749257529Sluigi	int i; /* must be signed */
750257529Sluigi	size_t n;
751241719Sluigi
752257529Sluigi	/* optimistically assume we have enough memory */
753257529Sluigi	p->numclusters = p->_numclusters;
754257529Sluigi	p->objtotal = p->_objtotal;
755257529Sluigi
756241719Sluigi	n = sizeof(struct lut_entry) * p->objtotal;
757241719Sluigi#ifdef linux
758241719Sluigi	p->lut = vmalloc(n);
759241719Sluigi#else
760241750Semaste	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
761241719Sluigi#endif
762234228Sluigi	if (p->lut == NULL) {
763257529Sluigi		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
764234228Sluigi		goto clean;
765234228Sluigi	}
766234228Sluigi
767234228Sluigi	/* Allocate the bitmap */
768234228Sluigi	n = (p->objtotal + 31) / 32;
769241750Semaste	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
770234228Sluigi	if (p->bitmap == NULL) {
771257529Sluigi		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
772241719Sluigi		    p->name);
773234228Sluigi		goto clean;
774234228Sluigi	}
775241719Sluigi	p->bitmap_slots = n;
776234228Sluigi
777234228Sluigi	/*
778234228Sluigi	 * Allocate clusters, init pointers and bitmap
779234228Sluigi	 */
780257529Sluigi
781257529Sluigi	n = p->_clustsize;
782257529Sluigi	for (i = 0; i < (int)p->objtotal;) {
783257529Sluigi		int lim = i + p->_clustentries;
784234228Sluigi		char *clust;
785234228Sluigi
786257529Sluigi		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
787257529Sluigi		    (size_t)0, -1UL, PAGE_SIZE, 0);
788234228Sluigi		if (clust == NULL) {
789234228Sluigi			/*
790234228Sluigi			 * If we get here, there is a severe memory shortage,
791234228Sluigi			 * so halve the allocated memory to reclaim some.
792234228Sluigi			 */
793234228Sluigi			D("Unable to create cluster at %d for '%s' allocator",
794241719Sluigi			    i, p->name);
795257529Sluigi			if (i < 2) /* nothing to halve */
796257529Sluigi				goto out;
797234228Sluigi			lim = i / 2;
798241719Sluigi			for (i--; i >= lim; i--) {
799234228Sluigi				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
800257529Sluigi				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
801234228Sluigi					contigfree(p->lut[i].vaddr,
802257529Sluigi						n, M_NETMAP);
803234228Sluigi			}
804257529Sluigi		out:
805234228Sluigi			p->objtotal = i;
806257529Sluigi			/* we may have stopped in the middle of a cluster */
807257529Sluigi			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
808234228Sluigi			break;
809234228Sluigi		}
810241719Sluigi		for (; i < lim; i++, clust += p->_objsize) {
811234228Sluigi			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
812234228Sluigi			p->lut[i].vaddr = clust;
813234228Sluigi			p->lut[i].paddr = vtophys(clust);
814234228Sluigi		}
815234228Sluigi	}
816257529Sluigi	p->objfree = p->objtotal;
817257529Sluigi	p->memtotal = p->numclusters * p->_clustsize;
818257529Sluigi	if (p->objfree == 0)
819257529Sluigi		goto clean;
820245835Sluigi	if (netmap_verbose)
821245835Sluigi		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
822257529Sluigi		    p->numclusters, p->_clustsize >> 10,
823257529Sluigi		    p->memtotal >> 10, p->name);
824234228Sluigi
825241719Sluigi	return 0;
826234228Sluigi
827234228Sluigiclean:
828241719Sluigi	netmap_reset_obj_allocator(p);
829241719Sluigi	return ENOMEM;
830234228Sluigi}
831234228Sluigi
832241719Sluigi/* call with lock held */
833234228Sluigistatic int
834257529Sluiginetmap_memory_config_changed(struct netmap_mem_d *nmd)
835234228Sluigi{
836241719Sluigi	int i;
837234228Sluigi
838241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
839257529Sluigi		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
840257529Sluigi		    nmd->pools[i].r_objtotal != netmap_params[i].num)
841241719Sluigi		    return 1;
842241719Sluigi	}
843241719Sluigi	return 0;
844241719Sluigi}
845234228Sluigi
846257529Sluigistatic void
847257529Sluiginetmap_mem_reset_all(struct netmap_mem_d *nmd)
848257529Sluigi{
849257529Sluigi	int i;
850261909Sluigi
851261909Sluigi	if (netmap_verbose)
852261909Sluigi		D("resetting %p", nmd);
853257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
854257529Sluigi		netmap_reset_obj_allocator(&nmd->pools[i]);
855257529Sluigi	}
856257529Sluigi	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
857257529Sluigi}
858234228Sluigi
859257529Sluigistatic int
860257529Sluiginetmap_mem_finalize_all(struct netmap_mem_d *nmd)
861257529Sluigi{
862257529Sluigi	int i;
863257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED)
864257529Sluigi		return 0;
865257529Sluigi	nmd->lasterr = 0;
866257529Sluigi	nmd->nm_totalsize = 0;
867257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
868257529Sluigi		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
869257529Sluigi		if (nmd->lasterr)
870257529Sluigi			goto error;
871257529Sluigi		nmd->nm_totalsize += nmd->pools[i].memtotal;
872257529Sluigi	}
873257529Sluigi	/* buffers 0 and 1 are reserved */
874257529Sluigi	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
875257529Sluigi	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
876257529Sluigi	nmd->flags |= NETMAP_MEM_FINALIZED;
877257529Sluigi
878261909Sluigi	if (netmap_verbose)
879261909Sluigi		D("interfaces %d KB, rings %d KB, buffers %d MB",
880261909Sluigi		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
881261909Sluigi		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
882261909Sluigi		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
883257529Sluigi
884261909Sluigi	if (netmap_verbose)
885261909Sluigi		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
886257529Sluigi
887257529Sluigi
888257529Sluigi	return 0;
889257529Sluigierror:
890257529Sluigi	netmap_mem_reset_all(nmd);
891257529Sluigi	return nmd->lasterr;
892257529Sluigi}
893257529Sluigi
894257529Sluigi
895257529Sluigi
896257529Sluigivoid
897257529Sluiginetmap_mem_private_delete(struct netmap_mem_d *nmd)
898257529Sluigi{
899257529Sluigi	if (nmd == NULL)
900257529Sluigi		return;
901261909Sluigi	if (netmap_verbose)
902261909Sluigi		D("deleting %p", nmd);
903257529Sluigi	if (nmd->refcount > 0)
904257529Sluigi		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
905261909Sluigi	nm_mem_release_id(nmd);
906261909Sluigi	if (netmap_verbose)
907261909Sluigi		D("done deleting %p", nmd);
908257529Sluigi	NMA_LOCK_DESTROY(nmd);
909257529Sluigi	free(nmd, M_DEVBUF);
910257529Sluigi}
911257529Sluigi
912257529Sluigistatic int
913257529Sluiginetmap_mem_private_config(struct netmap_mem_d *nmd)
914257529Sluigi{
915257529Sluigi	/* nothing to do, we are configured on creation
916257529Sluigi 	 * and configuration never changes thereafter
917257529Sluigi 	 */
918257529Sluigi	return 0;
919257529Sluigi}
920257529Sluigi
921257529Sluigistatic int
922257529Sluiginetmap_mem_private_finalize(struct netmap_mem_d *nmd)
923257529Sluigi{
924257529Sluigi	int err;
925257529Sluigi	NMA_LOCK(nmd);
926257529Sluigi	nmd->refcount++;
927257529Sluigi	err = netmap_mem_finalize_all(nmd);
928257529Sluigi	NMA_UNLOCK(nmd);
929257529Sluigi	return err;
930257529Sluigi
931257529Sluigi}
932257529Sluigi
933259412Sluigistatic void
934259412Sluiginetmap_mem_private_deref(struct netmap_mem_d *nmd)
935257529Sluigi{
936257529Sluigi	NMA_LOCK(nmd);
937257529Sluigi	if (--nmd->refcount <= 0)
938257529Sluigi		netmap_mem_reset_all(nmd);
939257529Sluigi	NMA_UNLOCK(nmd);
940257529Sluigi}
941257529Sluigi
942261909Sluigi
943261909Sluigi/*
944261909Sluigi * allocator for private memory
945261909Sluigi */
946257529Sluigistruct netmap_mem_d *
947261909Sluiginetmap_mem_private_new(const char *name, u_int txr, u_int txd,
948261909Sluigi	u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
949257529Sluigi{
950257529Sluigi	struct netmap_mem_d *d = NULL;
951257529Sluigi	struct netmap_obj_params p[NETMAP_POOLS_NR];
952261909Sluigi	int i, err;
953261909Sluigi	u_int v, maxd;
954257529Sluigi
955257529Sluigi	d = malloc(sizeof(struct netmap_mem_d),
956257529Sluigi			M_DEVBUF, M_NOWAIT | M_ZERO);
957261909Sluigi	if (d == NULL) {
958261909Sluigi		err = ENOMEM;
959261909Sluigi		goto error;
960261909Sluigi	}
961257529Sluigi
962257529Sluigi	*d = nm_blueprint;
963257529Sluigi
964261909Sluigi	err = nm_mem_assign_id(d);
965261909Sluigi	if (err)
966261909Sluigi		goto error;
967261909Sluigi
968261909Sluigi	/* account for the fake host rings */
969257529Sluigi	txr++;
970257529Sluigi	rxr++;
971261909Sluigi
972261909Sluigi	/* copy the min values */
973261909Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
974261909Sluigi		p[i] = netmap_min_priv_params[i];
975261909Sluigi	}
976261909Sluigi
977261909Sluigi	/* possibly increase them to fit user request */
978261909Sluigi	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
979261909Sluigi	if (p[NETMAP_IF_POOL].size < v)
980261909Sluigi		p[NETMAP_IF_POOL].size = v;
981261909Sluigi	v = 2 + 4 * npipes;
982261909Sluigi	if (p[NETMAP_IF_POOL].num < v)
983261909Sluigi		p[NETMAP_IF_POOL].num = v;
984257529Sluigi	maxd = (txd > rxd) ? txd : rxd;
985261909Sluigi	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
986261909Sluigi	if (p[NETMAP_RING_POOL].size < v)
987261909Sluigi		p[NETMAP_RING_POOL].size = v;
988261909Sluigi	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
989261909Sluigi         * and two rx rings (again, 1 normal and 1 fake host)
990261909Sluigi         */
991261909Sluigi	v = txr + rxr + 8 * npipes;
992261909Sluigi	if (p[NETMAP_RING_POOL].num < v)
993261909Sluigi		p[NETMAP_RING_POOL].num = v;
994261909Sluigi	/* for each pipe we only need the buffers for the 4 "real" rings.
995261909Sluigi         * On the other end, the pipe ring dimension may be different from
996261909Sluigi         * the parent port ring dimension. As a compromise, we allocate twice the
997261909Sluigi         * space actually needed if the pipe rings were the same size as the parent rings
998261909Sluigi         */
999261909Sluigi	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1000261909Sluigi		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1001261909Sluigi	if (p[NETMAP_BUF_POOL].num < v)
1002261909Sluigi		p[NETMAP_BUF_POOL].num = v;
1003257529Sluigi
1004261909Sluigi	if (netmap_verbose)
1005261909Sluigi		D("req if %d*%d ring %d*%d buf %d*%d",
1006257529Sluigi			p[NETMAP_IF_POOL].num,
1007257529Sluigi			p[NETMAP_IF_POOL].size,
1008257529Sluigi			p[NETMAP_RING_POOL].num,
1009257529Sluigi			p[NETMAP_RING_POOL].size,
1010257529Sluigi			p[NETMAP_BUF_POOL].num,
1011257529Sluigi			p[NETMAP_BUF_POOL].size);
1012257529Sluigi
1013257529Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1014257529Sluigi		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1015257529Sluigi				nm_blueprint.pools[i].name,
1016257529Sluigi				name);
1017261909Sluigi		err = netmap_config_obj_allocator(&d->pools[i],
1018261909Sluigi				p[i].num, p[i].size);
1019261909Sluigi		if (err)
1020257529Sluigi			goto error;
1021257529Sluigi	}
1022257529Sluigi
1023257529Sluigi	d->flags &= ~NETMAP_MEM_FINALIZED;
1024257529Sluigi
1025257529Sluigi	NMA_LOCK_INIT(d);
1026257529Sluigi
1027257529Sluigi	return d;
1028257529Sluigierror:
1029257529Sluigi	netmap_mem_private_delete(d);
1030261909Sluigi	if (perr)
1031261909Sluigi		*perr = err;
1032257529Sluigi	return NULL;
1033257529Sluigi}
1034257529Sluigi
1035257529Sluigi
1036241719Sluigi/* call with lock held */
1037241719Sluigistatic int
1038257529Sluiginetmap_mem_global_config(struct netmap_mem_d *nmd)
1039241719Sluigi{
1040241719Sluigi	int i;
1041234228Sluigi
1042257529Sluigi	if (nmd->refcount)
1043257529Sluigi		/* already in use, we cannot change the configuration */
1044241719Sluigi		goto out;
1045234228Sluigi
1046257529Sluigi	if (!netmap_memory_config_changed(nmd))
1047257529Sluigi		goto out;
1048257529Sluigi
1049241719Sluigi	D("reconfiguring");
1050241719Sluigi
1051257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1052241719Sluigi		/* reset previous allocation */
1053241719Sluigi		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1054257529Sluigi			netmap_reset_obj_allocator(&nmd->pools[i]);
1055250184Sluigi		}
1056257529Sluigi		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1057259412Sluigi	}
1058241719Sluigi
1059241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1060257529Sluigi		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1061241719Sluigi				netmap_params[i].num, netmap_params[i].size);
1062257529Sluigi		if (nmd->lasterr)
1063241719Sluigi			goto out;
1064241719Sluigi	}
1065241719Sluigi
1066241719Sluigiout:
1067241719Sluigi
1068257529Sluigi	return nmd->lasterr;
1069241719Sluigi}
1070241719Sluigi
1071241719Sluigistatic int
1072257529Sluiginetmap_mem_global_finalize(struct netmap_mem_d *nmd)
1073241719Sluigi{
1074257529Sluigi	int err;
1075241719Sluigi
1076257529Sluigi	NMA_LOCK(nmd);
1077241719Sluigi
1078257529Sluigi
1079241719Sluigi	/* update configuration if changed */
1080257529Sluigi	if (netmap_mem_global_config(nmd))
1081241719Sluigi		goto out;
1082241719Sluigi
1083257529Sluigi	nmd->refcount++;
1084257529Sluigi
1085257529Sluigi	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1086241719Sluigi		/* may happen if config is not changed */
1087241719Sluigi		ND("nothing to do");
1088241719Sluigi		goto out;
1089241719Sluigi	}
1090241719Sluigi
1091257529Sluigi	if (netmap_mem_finalize_all(nmd))
1092257529Sluigi		goto out;
1093241719Sluigi
1094241719Sluigi	/* backward compatibility */
1095257529Sluigi	netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
1096257529Sluigi	netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
1097241719Sluigi
1098257529Sluigi	netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
1099257529Sluigi	netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
1100241719Sluigi
1101257529Sluigi	nmd->lasterr = 0;
1102241719Sluigi
1103241719Sluigiout:
1104257529Sluigi	if (nmd->lasterr)
1105257529Sluigi		nmd->refcount--;
1106257529Sluigi	err = nmd->lasterr;
1107241719Sluigi
1108257529Sluigi	NMA_UNLOCK(nmd);
1109241719Sluigi
1110257529Sluigi	return err;
1111241719Sluigi
1112234228Sluigi}
1113234228Sluigi
1114257529Sluigiint
1115257529Sluiginetmap_mem_init(void)
1116241719Sluigi{
1117257529Sluigi	NMA_LOCK_INIT(&nm_mem);
1118241719Sluigi	return (0);
1119241719Sluigi}
1120234228Sluigi
1121257529Sluigivoid
1122257529Sluiginetmap_mem_fini(void)
1123234228Sluigi{
1124241719Sluigi	int i;
1125241719Sluigi
1126241719Sluigi	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1127241719Sluigi	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1128241719Sluigi	}
1129257529Sluigi	NMA_LOCK_DESTROY(&nm_mem);
1130234228Sluigi}
1131234228Sluigi
1132241719Sluigistatic void
1133241719Sluiginetmap_free_rings(struct netmap_adapter *na)
1134241719Sluigi{
1135261909Sluigi	struct netmap_kring *kring;
1136261909Sluigi	struct netmap_ring *ring;
1137245835Sluigi	if (!na->tx_rings)
1138245835Sluigi		return;
1139261909Sluigi	for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
1140261909Sluigi		ring = kring->ring;
1141261909Sluigi		if (ring == NULL)
1142261909Sluigi			continue;
1143261909Sluigi		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1144261909Sluigi		netmap_ring_free(na->nm_mem, ring);
1145261909Sluigi		kring->ring = NULL;
1146241719Sluigi	}
1147261909Sluigi	for (/* cont'd from above */; kring != na->tailroom; kring++) {
1148261909Sluigi		ring = kring->ring;
1149261909Sluigi		if (ring == NULL)
1150261909Sluigi			continue;
1151261909Sluigi		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1152261909Sluigi		netmap_ring_free(na->nm_mem, ring);
1153261909Sluigi		kring->ring = NULL;
1154241719Sluigi	}
1155241719Sluigi}
1156234228Sluigi
1157259412Sluigi/* call with NMA_LOCK held *
1158257529Sluigi *
1159259412Sluigi * Allocate netmap rings and buffers for this card
1160259412Sluigi * The rings are contiguous, but have variable size.
1161261909Sluigi * The kring array must follow the layout described
1162261909Sluigi * in netmap_krings_create().
1163245835Sluigi */
1164259412Sluigiint
1165259412Sluiginetmap_mem_rings_create(struct netmap_adapter *na)
1166234228Sluigi{
1167234228Sluigi	struct netmap_ring *ring;
1168259412Sluigi	u_int len, ndesc;
1169234228Sluigi	struct netmap_kring *kring;
1170261909Sluigi	u_int i;
1171234228Sluigi
1172257529Sluigi	NMA_LOCK(na->nm_mem);
1173257529Sluigi
1174261909Sluigi        /* transmit rings */
1175261909Sluigi	for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
1176261909Sluigi		if (kring->ring) {
1177261909Sluigi			ND("%s %ld already created", kring->name, kring - na->tx_rings);
1178261909Sluigi			continue; /* already created by somebody else */
1179261909Sluigi		}
1180259412Sluigi		ndesc = kring->nkr_num_slots;
1181234228Sluigi		len = sizeof(struct netmap_ring) +
1182234228Sluigi			  ndesc * sizeof(struct netmap_slot);
1183257529Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
1184234228Sluigi		if (ring == NULL) {
1185259412Sluigi			D("Cannot allocate tx_ring");
1186234228Sluigi			goto cleanup;
1187234228Sluigi		}
1188260515Sluigi		ND("txring at %p", ring);
1189234228Sluigi		kring->ring = ring;
1190259412Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1191260368Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1192257529Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1193257529Sluigi			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1194257529Sluigi			netmap_ring_offset(na->nm_mem, ring);
1195234228Sluigi
1196260368Sluigi		/* copy values from kring */
1197260368Sluigi		ring->head = kring->rhead;
1198260368Sluigi		ring->cur = kring->rcur;
1199260368Sluigi		ring->tail = kring->rtail;
1200257529Sluigi		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1201257529Sluigi			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1202261909Sluigi		ND("%s h %d c %d t %d", kring->name,
1203261909Sluigi			ring->head, ring->cur, ring->tail);
1204259412Sluigi		ND("initializing slots for txring");
1205261909Sluigi		if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1206261909Sluigi			/* this is a real ring */
1207261909Sluigi			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1208261909Sluigi				D("Cannot allocate buffers for tx_ring");
1209261909Sluigi				goto cleanup;
1210261909Sluigi			}
1211261909Sluigi		} else {
1212261909Sluigi			/* this is a fake tx ring, set all indices to 0 */
1213261909Sluigi			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1214241719Sluigi		}
1215234228Sluigi	}
1216234228Sluigi
1217261909Sluigi	/* receive rings */
1218261909Sluigi	for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
1219261909Sluigi		if (kring->ring) {
1220261909Sluigi			ND("%s %ld already created", kring->name, kring - na->rx_rings);
1221261909Sluigi			continue; /* already created by somebody else */
1222261909Sluigi		}
1223259412Sluigi		ndesc = kring->nkr_num_slots;
1224234228Sluigi		len = sizeof(struct netmap_ring) +
1225234228Sluigi			  ndesc * sizeof(struct netmap_slot);
1226257529Sluigi		ring = netmap_ring_malloc(na->nm_mem, len);
1227234228Sluigi		if (ring == NULL) {
1228259412Sluigi			D("Cannot allocate rx_ring");
1229234228Sluigi			goto cleanup;
1230234228Sluigi		}
1231260515Sluigi		ND("rxring at %p", ring);
1232234228Sluigi		kring->ring = ring;
1233259412Sluigi		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1234260368Sluigi		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1235257529Sluigi		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1236257529Sluigi		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1237257529Sluigi			netmap_ring_offset(na->nm_mem, ring);
1238234228Sluigi
1239260368Sluigi		/* copy values from kring */
1240260368Sluigi		ring->head = kring->rhead;
1241260368Sluigi		ring->cur = kring->rcur;
1242260368Sluigi		ring->tail = kring->rtail;
1243257529Sluigi		*(int *)(uintptr_t)&ring->nr_buf_size =
1244257529Sluigi			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1245261909Sluigi		ND("%s h %d c %d t %d", kring->name,
1246261909Sluigi			ring->head, ring->cur, ring->tail);
1247260515Sluigi		ND("initializing slots for rxring %p", ring);
1248261909Sluigi		if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1249261909Sluigi			/* this is a real ring */
1250261909Sluigi			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1251261909Sluigi				D("Cannot allocate buffers for rx_ring");
1252261909Sluigi				goto cleanup;
1253261909Sluigi			}
1254261909Sluigi		} else {
1255261909Sluigi			/* this is a fake rx ring, set all indices to 1 */
1256261909Sluigi			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
1257241719Sluigi		}
1258234228Sluigi	}
1259259412Sluigi
1260259412Sluigi	NMA_UNLOCK(na->nm_mem);
1261259412Sluigi
1262259412Sluigi	return 0;
1263259412Sluigi
1264259412Sluigicleanup:
1265259412Sluigi	netmap_free_rings(na);
1266259412Sluigi
1267259412Sluigi	NMA_UNLOCK(na->nm_mem);
1268259412Sluigi
1269259412Sluigi	return ENOMEM;
1270259412Sluigi}
1271259412Sluigi
1272259412Sluigivoid
1273259412Sluiginetmap_mem_rings_delete(struct netmap_adapter *na)
1274259412Sluigi{
1275259412Sluigi	/* last instance, release bufs and rings */
1276259412Sluigi	NMA_LOCK(na->nm_mem);
1277259412Sluigi
1278259412Sluigi	netmap_free_rings(na);
1279259412Sluigi
1280259412Sluigi	NMA_UNLOCK(na->nm_mem);
1281259412Sluigi}
1282259412Sluigi
1283259412Sluigi
1284259412Sluigi/* call with NMA_LOCK held */
1285259412Sluigi/*
1286259412Sluigi * Allocate the per-fd structure netmap_if.
1287259412Sluigi *
1288259412Sluigi * We assume that the configuration stored in na
1289259412Sluigi * (number of tx/rx rings and descs) does not change while
1290259412Sluigi * the interface is in netmap mode.
1291259412Sluigi */
1292259412Sluigistruct netmap_if *
1293259412Sluiginetmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
1294259412Sluigi{
1295259412Sluigi	struct netmap_if *nifp;
1296259412Sluigi	ssize_t base; /* handy for relative offsets between rings and nifp */
1297259412Sluigi	u_int i, len, ntx, nrx;
1298259412Sluigi
1299261909Sluigi	/* account for the (eventually fake) host rings */
1300261909Sluigi	ntx = na->num_tx_rings + 1;
1301261909Sluigi	nrx = na->num_rx_rings + 1;
1302234228Sluigi	/*
1303259412Sluigi	 * the descriptor is followed inline by an array of offsets
1304259412Sluigi	 * to the tx and rx rings in the shared memory region.
1305259412Sluigi	 */
1306259412Sluigi
1307259412Sluigi	NMA_LOCK(na->nm_mem);
1308259412Sluigi
1309259412Sluigi	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1310259412Sluigi	nifp = netmap_if_malloc(na->nm_mem, len);
1311259412Sluigi	if (nifp == NULL) {
1312259412Sluigi		NMA_UNLOCK(na->nm_mem);
1313259412Sluigi		return NULL;
1314259412Sluigi	}
1315259412Sluigi
1316259412Sluigi	/* initialize base fields -- override const */
1317259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1318259412Sluigi	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1319259412Sluigi	strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
1320259412Sluigi
1321259412Sluigi	/*
1322234228Sluigi	 * fill the slots for the rx and tx rings. They contain the offset
1323234228Sluigi	 * between the ring and nifp, so the information is usable in
1324234228Sluigi	 * userspace to reach the ring from the nifp.
1325234228Sluigi	 */
1326257529Sluigi	base = netmap_if_offset(na->nm_mem, nifp);
1327234228Sluigi	for (i = 0; i < ntx; i++) {
1328234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1329257529Sluigi			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1330234228Sluigi	}
1331234228Sluigi	for (i = 0; i < nrx; i++) {
1332234228Sluigi		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1333257529Sluigi			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1334234228Sluigi	}
1335257529Sluigi
1336257529Sluigi	NMA_UNLOCK(na->nm_mem);
1337257529Sluigi
1338234228Sluigi	return (nifp);
1339234228Sluigi}
1340234228Sluigi
1341257529Sluigivoid
1342257529Sluiginetmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1343257529Sluigi{
1344257529Sluigi	if (nifp == NULL)
1345257529Sluigi		/* nothing to do */
1346257529Sluigi		return;
1347257529Sluigi	NMA_LOCK(na->nm_mem);
1348261909Sluigi	if (nifp->ni_bufs_head)
1349261909Sluigi		netmap_extra_free(na, nifp->ni_bufs_head);
1350257529Sluigi	netmap_if_free(na->nm_mem, nifp);
1351257529Sluigi
1352257529Sluigi	NMA_UNLOCK(na->nm_mem);
1353257529Sluigi}
1354257529Sluigi
1355234228Sluigistatic void
1356257529Sluiginetmap_mem_global_deref(struct netmap_mem_d *nmd)
1357234228Sluigi{
1358257529Sluigi	NMA_LOCK(nmd);
1359257529Sluigi
1360257529Sluigi	nmd->refcount--;
1361245835Sluigi	if (netmap_verbose)
1362257529Sluigi		D("refcount = %d", nmd->refcount);
1363257529Sluigi
1364257529Sluigi	NMA_UNLOCK(nmd);
1365234228Sluigi}
1366257529Sluigi
1367259412Sluigiint
1368259412Sluiginetmap_mem_finalize(struct netmap_mem_d *nmd)
1369257529Sluigi{
1370257529Sluigi	return nmd->finalize(nmd);
1371257529Sluigi}
1372257529Sluigi
1373259412Sluigivoid
1374259412Sluiginetmap_mem_deref(struct netmap_mem_d *nmd)
1375257529Sluigi{
1376257529Sluigi	return nmd->deref(nmd);
1377257529Sluigi}
1378