netmap_mem2.c revision 257529
1/*
2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *   1. Redistributions of source code must retain the above copyright
8 *      notice, this list of conditions and the following disclaimer.
9 *   2. Redistributions in binary form must reproduce the above copyright
10 *      notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#ifdef linux
27#include "bsd_glue.h"
28#endif /* linux */
29
30#ifdef __APPLE__
31#include "osx_glue.h"
32#endif /* __APPLE__ */
33
34#ifdef __FreeBSD__
35#include <sys/cdefs.h> /* prerequisite */
36__FBSDID("$FreeBSD: head/sys/dev/netmap/netmap_mem2.c 257529 2013-11-01 21:21:14Z luigi $");
37
38#include <sys/types.h>
39#include <sys/malloc.h>
40#include <sys/proc.h>
41#include <vm/vm.h>	/* vtophys */
42#include <vm/pmap.h>	/* vtophys */
43#include <sys/socket.h> /* sockaddrs */
44#include <sys/selinfo.h>
45#include <sys/sysctl.h>
46#include <net/if.h>
47#include <net/if_var.h>
48#include <net/vnet.h>
49#include <machine/bus.h>	/* bus_dmamap_* */
50
51#endif /* __FreeBSD__ */
52
53#include <net/netmap.h>
54#include <dev/netmap/netmap_kern.h>
55#include "netmap_mem2.h"
56
57#ifdef linux
58#define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
59#define NMA_LOCK_DESTROY(n)
60#define NMA_LOCK(n)		down(&(n)->nm_mtx)
61#define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
62#else /* !linux */
63#define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
64#define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
65#define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
66#define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
67#endif /* linux */
68
69
70struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
71	[NETMAP_IF_POOL] = {
72		.size = 1024,
73		.num  = 100,
74	},
75	[NETMAP_RING_POOL] = {
76		.size = 9*PAGE_SIZE,
77		.num  = 200,
78	},
79	[NETMAP_BUF_POOL] = {
80		.size = 2048,
81		.num  = NETMAP_BUF_MAX_NUM,
82	},
83};
84
85
86/*
87 * nm_mem is the memory allocator used for all physical interfaces
88 * running in netmap mode.
89 * Virtual (VALE) ports will have each its own allocator.
90 */
91static int netmap_mem_global_config(struct netmap_mem_d *nmd);
92static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
93static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
94struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
95	.pools = {
96		[NETMAP_IF_POOL] = {
97			.name 	= "netmap_if",
98			.objminsize = sizeof(struct netmap_if),
99			.objmaxsize = 4096,
100			.nummin     = 10,	/* don't be stingy */
101			.nummax	    = 10000,	/* XXX very large */
102		},
103		[NETMAP_RING_POOL] = {
104			.name 	= "netmap_ring",
105			.objminsize = sizeof(struct netmap_ring),
106			.objmaxsize = 32*PAGE_SIZE,
107			.nummin     = 2,
108			.nummax	    = 1024,
109		},
110		[NETMAP_BUF_POOL] = {
111			.name	= "netmap_buf",
112			.objminsize = 64,
113			.objmaxsize = 65536,
114			.nummin     = 4,
115			.nummax	    = 1000000, /* one million! */
116		},
117	},
118	.config   = netmap_mem_global_config,
119	.finalize = netmap_mem_global_finalize,
120	.deref    = netmap_mem_global_deref,
121};
122
123
124// XXX logically belongs to nm_mem
125struct lut_entry *netmap_buffer_lut;	/* exported */
126
127/* blueprint for the private memory allocators */
128static int netmap_mem_private_config(struct netmap_mem_d *nmd);
129static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
130static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
131const struct netmap_mem_d nm_blueprint = {
132	.pools = {
133		[NETMAP_IF_POOL] = {
134			.name 	= "%s_if",
135			.objminsize = sizeof(struct netmap_if),
136			.objmaxsize = 4096,
137			.nummin     = 1,
138			.nummax	    = 10,
139		},
140		[NETMAP_RING_POOL] = {
141			.name 	= "%s_ring",
142			.objminsize = sizeof(struct netmap_ring),
143			.objmaxsize = 32*PAGE_SIZE,
144			.nummin     = 2,
145			.nummax	    = 1024,
146		},
147		[NETMAP_BUF_POOL] = {
148			.name	= "%s_buf",
149			.objminsize = 64,
150			.objmaxsize = 65536,
151			.nummin     = 4,
152			.nummax	    = 1000000, /* one million! */
153		},
154	},
155	.config   = netmap_mem_private_config,
156	.finalize = netmap_mem_private_finalize,
157	.deref    = netmap_mem_private_deref,
158
159	.flags = NETMAP_MEM_PRIVATE,
160};
161
162/* memory allocator related sysctls */
163
164#define STRINGIFY(x) #x
165
166
167#define DECLARE_SYSCTLS(id, name) \
168	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
169	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
170        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
171            CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
172        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
173            CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
174        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
175            CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
176
177SYSCTL_DECL(_dev_netmap);
178DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
179DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
180DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
181
182/*
183 * First, find the allocator that contains the requested offset,
184 * then locate the cluster through a lookup table.
185 */
186vm_paddr_t
187netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
188{
189	int i;
190	vm_ooffset_t o = offset;
191	vm_paddr_t pa;
192	struct netmap_obj_pool *p;
193
194	NMA_LOCK(nmd);
195	p = nmd->pools;
196
197	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
198		if (offset >= p[i].memtotal)
199			continue;
200		// now lookup the cluster's address
201		pa = p[i].lut[offset / p[i]._objsize].paddr +
202			offset % p[i]._objsize;
203		NMA_UNLOCK(nmd);
204		return pa;
205	}
206	/* this is only in case of errors */
207	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
208		p[NETMAP_IF_POOL].memtotal,
209		p[NETMAP_IF_POOL].memtotal
210			+ p[NETMAP_RING_POOL].memtotal,
211		p[NETMAP_IF_POOL].memtotal
212			+ p[NETMAP_RING_POOL].memtotal
213			+ p[NETMAP_BUF_POOL].memtotal);
214	NMA_UNLOCK(nmd);
215	return 0;	// XXX bad address
216}
217
218int
219netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags)
220{
221	int error = 0;
222	NMA_LOCK(nmd);
223	error = nmd->config(nmd);
224	if (error)
225		goto out;
226	if (nmd->flags & NETMAP_MEM_FINALIZED) {
227		*size = nmd->nm_totalsize;
228	} else {
229		int i;
230		*size = 0;
231		for (i = 0; i < NETMAP_POOLS_NR; i++) {
232			struct netmap_obj_pool *p = nmd->pools + i;
233			*size += (p->_numclusters * p->_clustsize);
234		}
235	}
236	*memflags = nmd->flags;
237out:
238	NMA_UNLOCK(nmd);
239	return error;
240}
241
242/*
243 * we store objects by kernel address, need to find the offset
244 * within the pool to export the value to userspace.
245 * Algorithm: scan until we find the cluster, then add the
246 * actual offset in the cluster
247 */
248static ssize_t
249netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
250{
251	int i, k = p->_clustentries, n = p->objtotal;
252	ssize_t ofs = 0;
253
254	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
255		const char *base = p->lut[i].vaddr;
256		ssize_t relofs = (const char *) vaddr - base;
257
258		if (relofs < 0 || relofs >= p->_clustsize)
259			continue;
260
261		ofs = ofs + relofs;
262		ND("%s: return offset %d (cluster %d) for pointer %p",
263		    p->name, ofs, i, vaddr);
264		return ofs;
265	}
266	D("address %p is not contained inside any cluster (%s)",
267	    vaddr, p->name);
268	return 0; /* An error occurred */
269}
270
271/* Helper functions which convert virtual addresses to offsets */
272#define netmap_if_offset(n, v)					\
273	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
274
275#define netmap_ring_offset(n, v)				\
276    ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
277	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
278
279#define netmap_buf_offset(n, v)					\
280    ((n)->pools[NETMAP_IF_POOL].memtotal +			\
281	(n)->pools[NETMAP_RING_POOL].memtotal +		\
282	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
283
284
285ssize_t
286netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
287{
288	ssize_t v;
289	NMA_LOCK(nmd);
290	v = netmap_if_offset(nmd, addr);
291	NMA_UNLOCK(nmd);
292	return v;
293}
294
295/*
296 * report the index, and use start position as a hint,
297 * otherwise buffer allocation becomes terribly expensive.
298 */
299static void *
300netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
301{
302	uint32_t i = 0;			/* index in the bitmap */
303	uint32_t mask, j;		/* slot counter */
304	void *vaddr = NULL;
305
306	if (len > p->_objsize) {
307		D("%s request size %d too large", p->name, len);
308		// XXX cannot reduce the size
309		return NULL;
310	}
311
312	if (p->objfree == 0) {
313		D("%s allocator: run out of memory", p->name);
314		return NULL;
315	}
316	if (start)
317		i = *start;
318
319	/* termination is guaranteed by p->free, but better check bounds on i */
320	while (vaddr == NULL && i < p->bitmap_slots)  {
321		uint32_t cur = p->bitmap[i];
322		if (cur == 0) { /* bitmask is fully used */
323			i++;
324			continue;
325		}
326		/* locate a slot */
327		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
328			;
329
330		p->bitmap[i] &= ~mask; /* mark object as in use */
331		p->objfree--;
332
333		vaddr = p->lut[i * 32 + j].vaddr;
334		if (index)
335			*index = i * 32 + j;
336	}
337	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
338
339	if (start)
340		*start = i;
341	return vaddr;
342}
343
344
345/*
346 * free by index, not by address. This is slow, but is only used
347 * for a small number of objects (rings, nifp)
348 */
349static void
350netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
351{
352	if (j >= p->objtotal) {
353		D("invalid index %u, max %u", j, p->objtotal);
354		return;
355	}
356	p->bitmap[j / 32] |= (1 << (j % 32));
357	p->objfree++;
358	return;
359}
360
361static void
362netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
363{
364	u_int i, j, n = p->numclusters;
365
366	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
367		void *base = p->lut[i * p->_clustentries].vaddr;
368		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
369
370		/* Given address, is out of the scope of the current cluster.*/
371		if (vaddr < base || relofs >= p->_clustsize)
372			continue;
373
374		j = j + relofs / p->_objsize;
375		/* KASSERT(j != 0, ("Cannot free object 0")); */
376		netmap_obj_free(p, j);
377		return;
378	}
379	D("address %p is not contained inside any cluster (%s)",
380	    vaddr, p->name);
381}
382
383#define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
384#define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
385#define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
386#define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
387#define netmap_buf_malloc(n, _pos, _index)			\
388	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
389
390
391/* Return the index associated to the given packet buffer */
392#define netmap_buf_index(n, v)						\
393    (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
394
395
396/* Return nonzero on error */
397static int
398netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_if *nifp,
399                struct netmap_slot *slot, u_int n)
400{
401	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
402	u_int i = 0;	/* slot counter */
403	uint32_t pos = 0;	/* slot in p->bitmap */
404	uint32_t index = 0;	/* buffer index */
405
406	(void)nifp;	/* UNUSED */
407	for (i = 0; i < n; i++) {
408		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
409		if (vaddr == NULL) {
410			D("unable to locate empty packet buffer");
411			goto cleanup;
412		}
413		slot[i].buf_idx = index;
414		slot[i].len = p->_objsize;
415		/* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
416		 * in the NIC ring. This is a hack that hides missing
417		 * initializations in the drivers, and should go away.
418		 */
419		// slot[i].flags = NS_BUF_CHANGED;
420	}
421
422	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
423	return (0);
424
425cleanup:
426	while (i > 0) {
427		i--;
428		netmap_obj_free(p, slot[i].buf_idx);
429	}
430	bzero(slot, n * sizeof(slot[0]));
431	return (ENOMEM);
432}
433
434
435static void
436netmap_free_buf(struct netmap_mem_d *nmd, struct netmap_if *nifp, uint32_t i)
437{
438	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
439
440	(void)nifp;
441	if (i < 2 || i >= p->objtotal) {
442		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
443		return;
444	}
445	netmap_obj_free(p, i);
446}
447
448static void
449netmap_reset_obj_allocator(struct netmap_obj_pool *p)
450{
451
452	if (p == NULL)
453		return;
454	if (p->bitmap)
455		free(p->bitmap, M_NETMAP);
456	p->bitmap = NULL;
457	if (p->lut) {
458		u_int i;
459		size_t sz = p->_clustsize;
460
461		for (i = 0; i < p->objtotal; i += p->_clustentries) {
462			if (p->lut[i].vaddr)
463				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
464		}
465		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
466#ifdef linux
467		vfree(p->lut);
468#else
469		free(p->lut, M_NETMAP);
470#endif
471	}
472	p->lut = NULL;
473	p->objtotal = 0;
474	p->memtotal = 0;
475	p->numclusters = 0;
476	p->objfree = 0;
477}
478
479/*
480 * Free all resources related to an allocator.
481 */
482static void
483netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
484{
485	if (p == NULL)
486		return;
487	netmap_reset_obj_allocator(p);
488}
489
490/*
491 * We receive a request for objtotal objects, of size objsize each.
492 * Internally we may round up both numbers, as we allocate objects
493 * in small clusters multiple of the page size.
494 * We need to keep track of objtotal and clustentries,
495 * as they are needed when freeing memory.
496 *
497 * XXX note -- userspace needs the buffers to be contiguous,
498 *	so we cannot afford gaps at the end of a cluster.
499 */
500
501
502/* call with NMA_LOCK held */
503static int
504netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
505{
506	int i;
507	u_int clustsize;	/* the cluster size, multiple of page size */
508	u_int clustentries;	/* how many objects per entry */
509
510	/* we store the current request, so we can
511	 * detect configuration changes later */
512	p->r_objtotal = objtotal;
513	p->r_objsize = objsize;
514
515#define MAX_CLUSTSIZE	(1<<17)
516#define LINE_ROUND	64
517	if (objsize >= MAX_CLUSTSIZE) {
518		/* we could do it but there is no point */
519		D("unsupported allocation for %d bytes", objsize);
520		return EINVAL;
521	}
522	/* make sure objsize is a multiple of LINE_ROUND */
523	i = (objsize & (LINE_ROUND - 1));
524	if (i) {
525		D("XXX aligning object by %d bytes", LINE_ROUND - i);
526		objsize += LINE_ROUND - i;
527	}
528	if (objsize < p->objminsize || objsize > p->objmaxsize) {
529		D("requested objsize %d out of range [%d, %d]",
530			objsize, p->objminsize, p->objmaxsize);
531		return EINVAL;
532	}
533	if (objtotal < p->nummin || objtotal > p->nummax) {
534		D("requested objtotal %d out of range [%d, %d]",
535			objtotal, p->nummin, p->nummax);
536		return EINVAL;
537	}
538	/*
539	 * Compute number of objects using a brute-force approach:
540	 * given a max cluster size,
541	 * we try to fill it with objects keeping track of the
542	 * wasted space to the next page boundary.
543	 */
544	for (clustentries = 0, i = 1;; i++) {
545		u_int delta, used = i * objsize;
546		if (used > MAX_CLUSTSIZE)
547			break;
548		delta = used % PAGE_SIZE;
549		if (delta == 0) { // exact solution
550			clustentries = i;
551			break;
552		}
553		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
554			clustentries = i;
555	}
556	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
557	/* compute clustsize and round to the next page */
558	clustsize = clustentries * objsize;
559	i =  (clustsize & (PAGE_SIZE - 1));
560	if (i)
561		clustsize += PAGE_SIZE - i;
562	if (netmap_verbose)
563		D("objsize %d clustsize %d objects %d",
564			objsize, clustsize, clustentries);
565
566	/*
567	 * The number of clusters is n = ceil(objtotal/clustentries)
568	 * objtotal' = n * clustentries
569	 */
570	p->_clustentries = clustentries;
571	p->_clustsize = clustsize;
572	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
573
574	/* actual values (may be larger than requested) */
575	p->_objsize = objsize;
576	p->_objtotal = p->_numclusters * clustentries;
577
578	return 0;
579}
580
581
582/* call with NMA_LOCK held */
583static int
584netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
585{
586	int i; /* must be signed */
587	size_t n;
588
589	/* optimistically assume we have enough memory */
590	p->numclusters = p->_numclusters;
591	p->objtotal = p->_objtotal;
592
593	n = sizeof(struct lut_entry) * p->objtotal;
594#ifdef linux
595	p->lut = vmalloc(n);
596#else
597	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
598#endif
599	if (p->lut == NULL) {
600		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
601		goto clean;
602	}
603
604	/* Allocate the bitmap */
605	n = (p->objtotal + 31) / 32;
606	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
607	if (p->bitmap == NULL) {
608		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
609		    p->name);
610		goto clean;
611	}
612	p->bitmap_slots = n;
613
614	/*
615	 * Allocate clusters, init pointers and bitmap
616	 */
617
618	n = p->_clustsize;
619	for (i = 0; i < (int)p->objtotal;) {
620		int lim = i + p->_clustentries;
621		char *clust;
622
623		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
624		    (size_t)0, -1UL, PAGE_SIZE, 0);
625		if (clust == NULL) {
626			/*
627			 * If we get here, there is a severe memory shortage,
628			 * so halve the allocated memory to reclaim some.
629			 */
630			D("Unable to create cluster at %d for '%s' allocator",
631			    i, p->name);
632			if (i < 2) /* nothing to halve */
633				goto out;
634			lim = i / 2;
635			for (i--; i >= lim; i--) {
636				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
637				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
638					contigfree(p->lut[i].vaddr,
639						n, M_NETMAP);
640			}
641		out:
642			p->objtotal = i;
643			/* we may have stopped in the middle of a cluster */
644			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
645			break;
646		}
647		for (; i < lim; i++, clust += p->_objsize) {
648			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
649			p->lut[i].vaddr = clust;
650			p->lut[i].paddr = vtophys(clust);
651		}
652	}
653	p->objfree = p->objtotal;
654	p->memtotal = p->numclusters * p->_clustsize;
655	if (p->objfree == 0)
656		goto clean;
657	if (netmap_verbose)
658		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
659		    p->numclusters, p->_clustsize >> 10,
660		    p->memtotal >> 10, p->name);
661
662	return 0;
663
664clean:
665	netmap_reset_obj_allocator(p);
666	return ENOMEM;
667}
668
669/* call with lock held */
670static int
671netmap_memory_config_changed(struct netmap_mem_d *nmd)
672{
673	int i;
674
675	for (i = 0; i < NETMAP_POOLS_NR; i++) {
676		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
677		    nmd->pools[i].r_objtotal != netmap_params[i].num)
678		    return 1;
679	}
680	return 0;
681}
682
683static void
684netmap_mem_reset_all(struct netmap_mem_d *nmd)
685{
686	int i;
687	D("resetting %p", nmd);
688	for (i = 0; i < NETMAP_POOLS_NR; i++) {
689		netmap_reset_obj_allocator(&nmd->pools[i]);
690	}
691	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
692}
693
694static int
695netmap_mem_finalize_all(struct netmap_mem_d *nmd)
696{
697	int i;
698	if (nmd->flags & NETMAP_MEM_FINALIZED)
699		return 0;
700	nmd->lasterr = 0;
701	nmd->nm_totalsize = 0;
702	for (i = 0; i < NETMAP_POOLS_NR; i++) {
703		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
704		if (nmd->lasterr)
705			goto error;
706		nmd->nm_totalsize += nmd->pools[i].memtotal;
707	}
708	/* buffers 0 and 1 are reserved */
709	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
710	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
711	nmd->flags |= NETMAP_MEM_FINALIZED;
712
713	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
714	    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
715	    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
716	    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
717
718	D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
719
720
721	return 0;
722error:
723	netmap_mem_reset_all(nmd);
724	return nmd->lasterr;
725}
726
727
728
729void
730netmap_mem_private_delete(struct netmap_mem_d *nmd)
731{
732	if (nmd == NULL)
733		return;
734	D("deleting %p", nmd);
735	if (nmd->refcount > 0)
736		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
737	D("done deleting %p", nmd);
738	NMA_LOCK_DESTROY(nmd);
739	free(nmd, M_DEVBUF);
740}
741
742static int
743netmap_mem_private_config(struct netmap_mem_d *nmd)
744{
745	/* nothing to do, we are configured on creation
746 	 * and configuration never changes thereafter
747 	 */
748	return 0;
749}
750
751static int
752netmap_mem_private_finalize(struct netmap_mem_d *nmd)
753{
754	int err;
755	NMA_LOCK(nmd);
756	nmd->refcount++;
757	err = netmap_mem_finalize_all(nmd);
758	NMA_UNLOCK(nmd);
759	return err;
760
761}
762
763static void netmap_mem_private_deref(struct netmap_mem_d *nmd)
764{
765	NMA_LOCK(nmd);
766	if (--nmd->refcount <= 0)
767		netmap_mem_reset_all(nmd);
768	NMA_UNLOCK(nmd);
769}
770
771struct netmap_mem_d *
772netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd)
773{
774	struct netmap_mem_d *d = NULL;
775	struct netmap_obj_params p[NETMAP_POOLS_NR];
776	int i;
777	u_int maxd;
778
779	d = malloc(sizeof(struct netmap_mem_d),
780			M_DEVBUF, M_NOWAIT | M_ZERO);
781	if (d == NULL)
782		return NULL;
783
784	*d = nm_blueprint;
785
786	/* XXX the rest of the code assumes the stack rings are alwasy present */
787	txr++;
788	rxr++;
789	p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) +
790		sizeof(ssize_t) * (txr + rxr);
791	p[NETMAP_IF_POOL].num = 2;
792	maxd = (txd > rxd) ? txd : rxd;
793	p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) +
794		sizeof(struct netmap_slot) * maxd;
795	p[NETMAP_RING_POOL].num = txr + rxr;
796	p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */
797	p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2);
798
799	D("req if %d*%d ring %d*%d buf %d*%d",
800			p[NETMAP_IF_POOL].num,
801			p[NETMAP_IF_POOL].size,
802			p[NETMAP_RING_POOL].num,
803			p[NETMAP_RING_POOL].size,
804			p[NETMAP_BUF_POOL].num,
805			p[NETMAP_BUF_POOL].size);
806
807	for (i = 0; i < NETMAP_POOLS_NR; i++) {
808		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
809				nm_blueprint.pools[i].name,
810				name);
811		if (netmap_config_obj_allocator(&d->pools[i],
812				p[i].num, p[i].size))
813			goto error;
814	}
815
816	d->flags &= ~NETMAP_MEM_FINALIZED;
817
818	NMA_LOCK_INIT(d);
819
820	return d;
821error:
822	netmap_mem_private_delete(d);
823	return NULL;
824}
825
826
827/* call with lock held */
828static int
829netmap_mem_global_config(struct netmap_mem_d *nmd)
830{
831	int i;
832
833	if (nmd->refcount)
834		/* already in use, we cannot change the configuration */
835		goto out;
836
837	if (!netmap_memory_config_changed(nmd))
838		goto out;
839
840	D("reconfiguring");
841
842	if (nmd->flags & NETMAP_MEM_FINALIZED) {
843		/* reset previous allocation */
844		for (i = 0; i < NETMAP_POOLS_NR; i++) {
845			netmap_reset_obj_allocator(&nmd->pools[i]);
846		}
847		nmd->flags &= ~NETMAP_MEM_FINALIZED;
848        }
849
850	for (i = 0; i < NETMAP_POOLS_NR; i++) {
851		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
852				netmap_params[i].num, netmap_params[i].size);
853		if (nmd->lasterr)
854			goto out;
855	}
856
857out:
858
859	return nmd->lasterr;
860}
861
862static int
863netmap_mem_global_finalize(struct netmap_mem_d *nmd)
864{
865	int err;
866
867	NMA_LOCK(nmd);
868
869
870	/* update configuration if changed */
871	if (netmap_mem_global_config(nmd))
872		goto out;
873
874	nmd->refcount++;
875
876	if (nmd->flags & NETMAP_MEM_FINALIZED) {
877		/* may happen if config is not changed */
878		ND("nothing to do");
879		goto out;
880	}
881
882	if (netmap_mem_finalize_all(nmd))
883		goto out;
884
885	/* backward compatibility */
886	netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
887	netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
888
889	netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
890	netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
891
892	nmd->lasterr = 0;
893
894out:
895	if (nmd->lasterr)
896		nmd->refcount--;
897	err = nmd->lasterr;
898
899	NMA_UNLOCK(nmd);
900
901	return err;
902
903}
904
905int
906netmap_mem_init(void)
907{
908	NMA_LOCK_INIT(&nm_mem);
909	return (0);
910}
911
912void
913netmap_mem_fini(void)
914{
915	int i;
916
917	for (i = 0; i < NETMAP_POOLS_NR; i++) {
918	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
919	}
920	NMA_LOCK_DESTROY(&nm_mem);
921}
922
923static void
924netmap_free_rings(struct netmap_adapter *na)
925{
926	u_int i;
927	if (!na->tx_rings)
928		return;
929	for (i = 0; i < na->num_tx_rings + 1; i++) {
930		if (na->tx_rings[i].ring) {
931			netmap_ring_free(na->nm_mem, na->tx_rings[i].ring);
932			na->tx_rings[i].ring = NULL;
933		}
934	}
935	for (i = 0; i < na->num_rx_rings + 1; i++) {
936		if (na->rx_rings[i].ring) {
937			netmap_ring_free(na->nm_mem, na->rx_rings[i].ring);
938			na->rx_rings[i].ring = NULL;
939		}
940	}
941	free(na->tx_rings, M_DEVBUF);
942	na->tx_rings = na->rx_rings = NULL;
943}
944
945
946
947/* call with NMA_LOCK held */
948/*
949 * Allocate the per-fd structure netmap_if.
950 * If this is the first instance, also allocate the krings, rings etc.
951 *
952 * We assume that the configuration stored in na
953 * (number of tx/rx rings and descs) does not change while
954 * the interface is in netmap mode.
955 */
956extern int nma_is_vp(struct netmap_adapter *na);
957struct netmap_if *
958netmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
959{
960	struct netmap_if *nifp;
961	struct netmap_ring *ring;
962	ssize_t base; /* handy for relative offsets between rings and nifp */
963	u_int i, len, ndesc, ntx, nrx;
964	struct netmap_kring *kring;
965	uint32_t *tx_leases = NULL, *rx_leases = NULL;
966
967	/*
968	 * verify whether virtual port need the stack ring
969	 */
970	ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
971	nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
972	/*
973	 * the descriptor is followed inline by an array of offsets
974	 * to the tx and rx rings in the shared memory region.
975	 * For virtual rx rings we also allocate an array of
976	 * pointers to assign to nkr_leases.
977	 */
978
979	NMA_LOCK(na->nm_mem);
980
981	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
982	nifp = netmap_if_malloc(na->nm_mem, len);
983	if (nifp == NULL) {
984		NMA_UNLOCK(na->nm_mem);
985		return NULL;
986	}
987
988	/* initialize base fields -- override const */
989	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
990	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
991	strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
992
993	if (na->refcount) { /* already setup, we are done */
994		goto final;
995	}
996
997	len = (ntx + nrx) * sizeof(struct netmap_kring);
998	/*
999	 * Leases are attached to TX rings on NIC/host ports,
1000	 * and to RX rings on VALE ports.
1001	 */
1002	if (nma_is_vp(na)) {
1003		len += sizeof(uint32_t) * na->num_rx_desc * na->num_rx_rings;
1004	} else {
1005		len += sizeof(uint32_t) * na->num_tx_desc * ntx;
1006	}
1007
1008	na->tx_rings = malloc((size_t)len, M_DEVBUF, M_NOWAIT | M_ZERO);
1009	if (na->tx_rings == NULL) {
1010		D("Cannot allocate krings for %s", ifname);
1011		goto cleanup;
1012	}
1013	na->rx_rings = na->tx_rings + ntx;
1014
1015	if (nma_is_vp(na)) {
1016		rx_leases = (uint32_t *)(na->rx_rings + nrx);
1017	} else {
1018		tx_leases = (uint32_t *)(na->rx_rings + nrx);
1019	}
1020
1021	/*
1022	 * First instance, allocate netmap rings and buffers for this card
1023	 * The rings are contiguous, but have variable size.
1024	 */
1025	for (i = 0; i < ntx; i++) { /* Transmit rings */
1026		kring = &na->tx_rings[i];
1027		ndesc = na->num_tx_desc;
1028		bzero(kring, sizeof(*kring));
1029		len = sizeof(struct netmap_ring) +
1030			  ndesc * sizeof(struct netmap_slot);
1031		ring = netmap_ring_malloc(na->nm_mem, len);
1032		if (ring == NULL) {
1033			D("Cannot allocate tx_ring[%d] for %s", i, ifname);
1034			goto cleanup;
1035		}
1036		ND("txring[%d] at %p ofs %d", i, ring);
1037		kring->na = na;
1038		kring->ring = ring;
1039		if (tx_leases) {
1040			kring->nkr_leases = tx_leases;
1041			tx_leases += ndesc;
1042		}
1043		*(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
1044		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
1045		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1046			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1047			netmap_ring_offset(na->nm_mem, ring);
1048
1049		/*
1050		 * IMPORTANT:
1051		 * Always keep one slot empty, so we can detect new
1052		 * transmissions comparing cur and nr_hwcur (they are
1053		 * the same only if there are no new transmissions).
1054		 */
1055		ring->avail = kring->nr_hwavail = ndesc - 1;
1056		ring->cur = kring->nr_hwcur = 0;
1057		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1058			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1059		ND("initializing slots for txring[%d]", i);
1060		if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) {
1061			D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
1062			goto cleanup;
1063		}
1064	}
1065
1066	for (i = 0; i < nrx; i++) { /* Receive rings */
1067		kring = &na->rx_rings[i];
1068		ndesc = na->num_rx_desc;
1069		bzero(kring, sizeof(*kring));
1070		len = sizeof(struct netmap_ring) +
1071			  ndesc * sizeof(struct netmap_slot);
1072		ring = netmap_ring_malloc(na->nm_mem, len);
1073		if (ring == NULL) {
1074			D("Cannot allocate rx_ring[%d] for %s", i, ifname);
1075			goto cleanup;
1076		}
1077		ND("rxring[%d] at %p ofs %d", i, ring);
1078
1079		kring->na = na;
1080		kring->ring = ring;
1081		if (rx_leases && i < na->num_rx_rings) {
1082			kring->nkr_leases = rx_leases;
1083			rx_leases += ndesc;
1084		}
1085		*(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
1086		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
1087		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1088		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1089			netmap_ring_offset(na->nm_mem, ring);
1090
1091		ring->cur = kring->nr_hwcur = 0;
1092		ring->avail = kring->nr_hwavail = 0; /* empty */
1093		*(int *)(uintptr_t)&ring->nr_buf_size =
1094			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1095		ND("initializing slots for rxring[%d]", i);
1096		if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) {
1097			D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
1098			goto cleanup;
1099		}
1100	}
1101#ifdef linux
1102	// XXX initialize the selrecord structs.
1103	for (i = 0; i < ntx; i++)
1104		init_waitqueue_head(&na->tx_rings[i].si);
1105	for (i = 0; i < nrx; i++)
1106		init_waitqueue_head(&na->rx_rings[i].si);
1107	init_waitqueue_head(&na->tx_si);
1108	init_waitqueue_head(&na->rx_si);
1109#endif
1110final:
1111	/*
1112	 * fill the slots for the rx and tx rings. They contain the offset
1113	 * between the ring and nifp, so the information is usable in
1114	 * userspace to reach the ring from the nifp.
1115	 */
1116	base = netmap_if_offset(na->nm_mem, nifp);
1117	for (i = 0; i < ntx; i++) {
1118		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1119			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1120	}
1121	for (i = 0; i < nrx; i++) {
1122		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1123			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1124	}
1125
1126	NMA_UNLOCK(na->nm_mem);
1127
1128	return (nifp);
1129cleanup:
1130	netmap_free_rings(na);
1131	netmap_if_free(na->nm_mem, nifp);
1132
1133	NMA_UNLOCK(na->nm_mem);
1134
1135	return NULL;
1136}
1137
1138void
1139netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1140{
1141	if (nifp == NULL)
1142		/* nothing to do */
1143		return;
1144	NMA_LOCK(na->nm_mem);
1145
1146	if (na->refcount <= 0) {
1147		/* last instance, release bufs and rings */
1148		u_int i, j, lim;
1149		struct netmap_ring *ring;
1150
1151		for (i = 0; i < na->num_tx_rings + 1; i++) {
1152			ring = na->tx_rings[i].ring;
1153			lim = na->tx_rings[i].nkr_num_slots;
1154			for (j = 0; j < lim; j++)
1155				netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx);
1156		}
1157		for (i = 0; i < na->num_rx_rings + 1; i++) {
1158			ring = na->rx_rings[i].ring;
1159			lim = na->rx_rings[i].nkr_num_slots;
1160			for (j = 0; j < lim; j++)
1161				netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx);
1162		}
1163		netmap_free_rings(na);
1164	}
1165	netmap_if_free(na->nm_mem, nifp);
1166
1167	NMA_UNLOCK(na->nm_mem);
1168}
1169
1170static void
1171netmap_mem_global_deref(struct netmap_mem_d *nmd)
1172{
1173	NMA_LOCK(nmd);
1174
1175	nmd->refcount--;
1176	if (netmap_verbose)
1177		D("refcount = %d", nmd->refcount);
1178
1179	NMA_UNLOCK(nmd);
1180}
1181
1182int netmap_mem_finalize(struct netmap_mem_d *nmd)
1183{
1184	return nmd->finalize(nmd);
1185}
1186
1187void netmap_mem_deref(struct netmap_mem_d *nmd)
1188{
1189	return nmd->deref(nmd);
1190}
1191