netmap_mem2.c revision 241719
1/*
2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *   1. Redistributions of source code must retain the above copyright
8 *      notice, this list of conditions and the following disclaimer.
9 *   2. Redistributions in binary form must reproduce the above copyright
10 *      notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 241719 2012-10-19 04:13:12Z luigi $
28 * $Id: netmap_mem2.c 11881 2012-10-18 23:24:15Z luigi $
29 *
30 * (New) memory allocator for netmap
31 */
32
33/*
34 * This allocator creates three memory regions:
35 *	nm_if_pool	for the struct netmap_if
36 *	nm_ring_pool	for the struct netmap_ring
37 *	nm_buf_pool	for the packet buffers.
38 *
39 * All regions need to be multiple of a page size as we export them to
40 * userspace through mmap. Only the latter needs to be dma-able,
41 * but for convenience use the same type of allocator for all.
42 *
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 *    nm_if     nm_ring            nm_buf
48 *
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
52 * of the region.
53 *
54 * The invididual allocators manage a pool of memory for objects of
55 * the same size.
56 * The pool is split into smaller clusters, whose size is a
57 * multiple of the page size. The cluster size is chosen
58 * to minimize the waste for a given max cluster size
59 * (we do it by brute force, as we have relatively few object
60 * per cluster).
61 *
62 * Objects are aligned to the cache line (64 bytes) rounding up object
63 * sizes when needed. A bitmap contains the state of each object.
64 * Allocation scans the bitmap; this is done only on attach, so we are not
65 * too worried about performance
66 *
67 * For each allocator we can define (thorugh sysctl) the size and
68 * number of each object. Memory is allocated at the first use of a
69 * netmap file descriptor, and can be freed when all such descriptors
70 * have been released (including unmapping the memory).
71 * If memory is scarce, the system tries to get as much as possible
72 * and the sysctl values reflect the actual allocation.
73 * Together with desired values, the sysctl export also absolute
74 * min and maximum values that cannot be overridden.
75 *
76 * struct netmap_if:
77 *	variable size, max 16 bytes per ring pair plus some fixed amount.
78 *	1024 bytes should be large enough in practice.
79 *
80 *	In the worst case we have one netmap_if per ring in the system.
81 *
82 * struct netmap_ring
83 *	variable too, 8 byte per slot plus some fixed amount.
84 *	Rings can be large (e.g. 4k slots, or >32Kbytes).
85 *	We default to 36 KB (9 pages), and a few hundred rings.
86 *
87 * struct netmap_buffer
88 *	The more the better, both because fast interfaces tend to have
89 *	many slots, and because we may want to use buffers to store
90 *	packets in userspace avoiding copies.
91 *	Must contain a full frame (eg 1518, or more for vlans, jumbo
92 *	frames etc.) plus be nicely aligned, plus some NICs restrict
93 *	the size to multiple of 1K or so. Default to 2K
94 */
95
96#ifndef CONSERVATIVE
97#define NETMAP_BUF_MAX_NUM	20*4096*2	/* large machine */
98#else /* CONSERVATIVE */
99#define NETMAP_BUF_MAX_NUM      20000   /* 40MB */
100#endif
101
102#ifdef linux
103#define NMA_LOCK_T		struct semaphore
104#define NMA_LOCK_INIT()		sema_init(&nm_mem.nm_mtx, 1)
105#define NMA_LOCK_DESTROY()
106#define NMA_LOCK()		down(&nm_mem.nm_mtx)
107#define NMA_UNLOCK()		up(&nm_mem.nm_mtx)
108#else /* !linux */
109#define NMA_LOCK_T		struct mtx
110#define NMA_LOCK_INIT()		mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
111#define NMA_LOCK_DESTROY()	mtx_destroy(&nm_mem.nm_mtx)
112#define NMA_LOCK()		mtx_lock(&nm_mem.nm_mtx)
113#define NMA_UNLOCK()		mtx_unlock(&nm_mem.nm_mtx)
114#endif /* linux */
115
116enum {
117	NETMAP_IF_POOL   = 0,
118	NETMAP_RING_POOL,
119	NETMAP_BUF_POOL,
120	NETMAP_POOLS_NR
121};
122
123
124struct netmap_obj_params {
125	u_int size;
126	u_int num;
127};
128
129
130struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
131	[NETMAP_IF_POOL] = {
132		.size = 1024,
133		.num  = 100,
134	},
135	[NETMAP_RING_POOL] = {
136		.size = 9*PAGE_SIZE,
137		.num  = 200,
138	},
139	[NETMAP_BUF_POOL] = {
140		.size = 2048,
141		.num  = NETMAP_BUF_MAX_NUM,
142	},
143};
144
145
146struct netmap_obj_pool {
147	char name[16];		/* name of the allocator */
148	u_int objtotal;         /* actual total number of objects. */
149	u_int objfree;          /* number of free objects. */
150	u_int clustentries;	/* actual objects per cluster */
151
152	/* limits */
153	u_int objminsize;	/* minimum object size */
154	u_int objmaxsize;	/* maximum object size */
155	u_int nummin;		/* minimum number of objects */
156	u_int nummax;		/* maximum number of objects */
157
158	/* the total memory space is _numclusters*_clustsize */
159	u_int _numclusters;	/* how many clusters */
160	u_int _clustsize;        /* cluster size */
161	u_int _objsize;		/* actual object size */
162
163	u_int _memtotal;	/* _numclusters*_clustsize */
164	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
165	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
166	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
167};
168
169
170struct netmap_mem_d {
171	NMA_LOCK_T nm_mtx;  /* protect the allocator */
172	u_int nm_totalsize; /* shorthand */
173
174	int finalized;		/* !=0 iff preallocation done */
175	int lasterr;		/* last error for curr config */
176	int refcount;		/* existing priv structures */
177	/* the three allocators */
178	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
179};
180
181
182static struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
183	.pools = {
184		[NETMAP_IF_POOL] = {
185			.name 	= "netmap_if",
186			.objminsize = sizeof(struct netmap_if),
187			.objmaxsize = 4096,
188			.nummin     = 10,	/* don't be stingy */
189			.nummax	    = 10000,	/* XXX very large */
190		},
191		[NETMAP_RING_POOL] = {
192			.name 	= "netmap_ring",
193			.objminsize = sizeof(struct netmap_ring),
194			.objmaxsize = 32*PAGE_SIZE,
195			.nummin     = 2,
196			.nummax	    = 1024,
197		},
198		[NETMAP_BUF_POOL] = {
199			.name	= "netmap_buf",
200			.objminsize = 64,
201			.objmaxsize = 65536,
202			.nummin     = 4,
203			.nummax	    = 1000000, /* one million! */
204		},
205	},
206};
207
208struct lut_entry *netmap_buffer_lut;	/* exported */
209
210/* memory allocator related sysctls */
211
212#define STRINGIFY(x) #x
213
214#define DECLARE_SYSCTLS(id, name) \
215	/* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_size", &netmap_params[id].size); */ \
216	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
217	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
218        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
219            CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
220	/* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_num", &netmap_params[id].num); */ \
221        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
222            CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
223        SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
224            CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
225
226DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
227DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
228DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
229
230/*
231 * Convert a userspace offset to a phisical address.
232 * XXX re-do in a simpler way.
233 *
234 * The idea here is to hide userspace applications the fact that pre-allocated
235 * memory is not contiguous, but fragmented across different clusters and
236 * smaller memory allocators. Consequently, first of all we need to find which
237 * allocator is owning provided offset, then we need to find out the physical
238 * address associated to target page (this is done using the look-up table.
239 */
240static inline vm_paddr_t
241netmap_ofstophys(vm_offset_t offset)
242{
243	int i;
244	vm_offset_t o = offset;
245	struct netmap_obj_pool *p = nm_mem.pools;
246
247	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) {
248		if (offset >= p[i]._memtotal)
249			continue;
250		// XXX now scan the clusters
251		return p[i].lut[offset / p[i]._objsize].paddr +
252			offset % p[i]._objsize;
253	}
254	/* this is only in case of errors */
255	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
256		p[NETMAP_IF_POOL]._memtotal,
257		p[NETMAP_IF_POOL]._memtotal
258			+ p[NETMAP_RING_POOL]._memtotal,
259		p[NETMAP_IF_POOL]._memtotal
260			+ p[NETMAP_RING_POOL]._memtotal
261			+ p[NETMAP_BUF_POOL]._memtotal);
262	return 0;	// XXX bad address
263}
264
265/*
266 * we store objects by kernel address, need to find the offset
267 * within the pool to export the value to userspace.
268 * Algorithm: scan until we find the cluster, then add the
269 * actual offset in the cluster
270 */
271static ssize_t
272netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
273{
274	int i, k = p->clustentries, n = p->objtotal;
275	ssize_t ofs = 0;
276
277	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
278		const char *base = p->lut[i].vaddr;
279		ssize_t relofs = (const char *) vaddr - base;
280
281		if (relofs < 0 || relofs > p->_clustsize)
282			continue;
283
284		ofs = ofs + relofs;
285		ND("%s: return offset %d (cluster %d) for pointer %p",
286		    p->name, ofs, i, vaddr);
287		return ofs;
288	}
289	D("address %p is not contained inside any cluster (%s)",
290	    vaddr, p->name);
291	return 0; /* An error occurred */
292}
293
294/* Helper functions which convert virtual addresses to offsets */
295#define netmap_if_offset(v)					\
296	netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v))
297
298#define netmap_ring_offset(v)					\
299    (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 				\
300	netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v)))
301
302#define netmap_buf_offset(v)					\
303    (nm_mem.pools[NETMAP_IF_POOL]._memtotal +				\
304	nm_mem.pools[NETMAP_RING_POOL]._memtotal +			\
305	netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)))
306
307
308/*
309 * report the index, and use start position as a hint,
310 * otherwise buffer allocation becomes terribly expensive.
311 */
312static void *
313netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index)
314{
315	uint32_t i = 0;			/* index in the bitmap */
316	uint32_t mask, j;		/* slot counter */
317	void *vaddr = NULL;
318
319	if (len > p->_objsize) {
320		D("%s request size %d too large", p->name, len);
321		// XXX cannot reduce the size
322		return NULL;
323	}
324
325	if (p->objfree == 0) {
326		D("%s allocator: run out of memory", p->name);
327		return NULL;
328	}
329	if (start)
330		i = *start;
331
332	/* termination is guaranteed by p->free, but better check bounds on i */
333	while (vaddr == NULL && i < p->bitmap_slots)  {
334		uint32_t cur = p->bitmap[i];
335		if (cur == 0) { /* bitmask is fully used */
336			i++;
337			continue;
338		}
339		/* locate a slot */
340		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
341			;
342
343		p->bitmap[i] &= ~mask; /* mark object as in use */
344		p->objfree--;
345
346		vaddr = p->lut[i * 32 + j].vaddr;
347		if (index)
348			*index = i * 32 + j;
349	}
350	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
351
352	if (start)
353		*start = i;
354	return vaddr;
355}
356
357
358/*
359 * free by index, not by address
360 */
361static void
362netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
363{
364	if (j >= p->objtotal) {
365		D("invalid index %u, max %u", j, p->objtotal);
366		return;
367	}
368	p->bitmap[j / 32] |= (1 << (j % 32));
369	p->objfree++;
370	return;
371}
372
373static void
374netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
375{
376	int i, j, n = p->_memtotal / p->_clustsize;
377
378	for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
379		void *base = p->lut[i * p->clustentries].vaddr;
380		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
381
382		/* Given address, is out of the scope of the current cluster.*/
383		if (vaddr < base || relofs > p->_clustsize)
384			continue;
385
386		j = j + relofs / p->_objsize;
387		KASSERT(j != 0, ("Cannot free object 0"));
388		netmap_obj_free(p, j);
389		return;
390	}
391	ND("address %p is not contained inside any cluster (%s)",
392	    vaddr, p->name);
393}
394
395#define netmap_if_malloc(len)	netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL)
396#define netmap_if_free(v)	netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v))
397#define netmap_ring_malloc(len)	netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL)
398#define netmap_ring_free(v)	netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v))
399#define netmap_buf_malloc(_pos, _index)			\
400	netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index)
401
402
403/* Return the index associated to the given packet buffer */
404#define netmap_buf_index(v)						\
405    (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize)
406
407
408/* Return nonzero on error */
409static int
410netmap_new_bufs(struct netmap_if *nifp,
411                struct netmap_slot *slot, u_int n)
412{
413	struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
414	int i = 0;	/* slot counter */
415	uint32_t pos = 0;	/* slot in p->bitmap */
416	uint32_t index = 0;	/* buffer index */
417
418	(void)nifp;	/* UNUSED */
419	for (i = 0; i < n; i++) {
420		void *vaddr = netmap_buf_malloc(&pos, &index);
421		if (vaddr == NULL) {
422			D("unable to locate empty packet buffer");
423			goto cleanup;
424		}
425		slot[i].buf_idx = index;
426		slot[i].len = p->_objsize;
427		/* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
428		 * in the NIC ring. This is a hack that hides missing
429		 * initializations in the drivers, and should go away.
430		 */
431		slot[i].flags = NS_BUF_CHANGED;
432	}
433
434	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
435	return (0);
436
437cleanup:
438	while (i > 0) {
439		i--;
440		netmap_obj_free(p, slot[i].buf_idx);
441	}
442	bzero(slot, n * sizeof(slot[0]));
443	return (ENOMEM);
444}
445
446
447static void
448netmap_free_buf(struct netmap_if *nifp, uint32_t i)
449{
450	struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
451
452	if (i < 2 || i >= p->objtotal) {
453		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
454		return;
455	}
456	netmap_obj_free(p, i);
457}
458
459static void
460netmap_reset_obj_allocator(struct netmap_obj_pool *p)
461{
462	if (p == NULL)
463		return;
464	if (p->bitmap)
465		free(p->bitmap, M_NETMAP);
466	p->bitmap = NULL;
467	if (p->lut) {
468		int i;
469		for (i = 0; i < p->objtotal; i += p->clustentries) {
470			if (p->lut[i].vaddr)
471				contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
472		}
473		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
474#ifdef linux
475		vfree(p->lut);
476#else
477		free(p->lut, M_NETMAP);
478#endif
479	}
480	p->lut = NULL;
481}
482
483/*
484 * Free all resources related to an allocator.
485 */
486static void
487netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
488{
489	if (p == NULL)
490		return;
491	netmap_reset_obj_allocator(p);
492}
493
494/*
495 * We receive a request for objtotal objects, of size objsize each.
496 * Internally we may round up both numbers, as we allocate objects
497 * in small clusters multiple of the page size.
498 * In the allocator we don't need to store the objsize,
499 * but we do need to keep track of objtotal' and clustentries,
500 * as they are needed when freeing memory.
501 *
502 * XXX note -- userspace needs the buffers to be contiguous,
503 *	so we cannot afford gaps at the end of a cluster.
504 */
505
506
507/* call with NMA_LOCK held */
508static int
509netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
510{
511	int i, n;
512	u_int clustsize;	/* the cluster size, multiple of page size */
513	u_int clustentries;	/* how many objects per entry */
514
515#define MAX_CLUSTSIZE	(1<<17)
516#define LINE_ROUND	64
517	if (objsize >= MAX_CLUSTSIZE) {
518		/* we could do it but there is no point */
519		D("unsupported allocation for %d bytes", objsize);
520		goto error;
521	}
522	/* make sure objsize is a multiple of LINE_ROUND */
523	i = (objsize & (LINE_ROUND - 1));
524	if (i) {
525		D("XXX aligning object by %d bytes", LINE_ROUND - i);
526		objsize += LINE_ROUND - i;
527	}
528	if (objsize < p->objminsize || objsize > p->objmaxsize) {
529		D("requested objsize %d out of range [%d, %d]",
530			objsize, p->objminsize, p->objmaxsize);
531		goto error;
532	}
533	if (objtotal < p->nummin || objtotal > p->nummax) {
534		D("requested objtotal %d out of range [%d, %d]",
535			objtotal, p->nummin, p->nummax);
536		goto error;
537	}
538	/*
539	 * Compute number of objects using a brute-force approach:
540	 * given a max cluster size,
541	 * we try to fill it with objects keeping track of the
542	 * wasted space to the next page boundary.
543	 */
544	for (clustentries = 0, i = 1;; i++) {
545		u_int delta, used = i * objsize;
546		if (used > MAX_CLUSTSIZE)
547			break;
548		delta = used % PAGE_SIZE;
549		if (delta == 0) { // exact solution
550			clustentries = i;
551			break;
552		}
553		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
554			clustentries = i;
555	}
556	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
557	/* compute clustsize and round to the next page */
558	clustsize = clustentries * objsize;
559	i =  (clustsize & (PAGE_SIZE - 1));
560	if (i)
561		clustsize += PAGE_SIZE - i;
562	D("objsize %d clustsize %d objects %d",
563		objsize, clustsize, clustentries);
564
565	/*
566	 * The number of clusters is n = ceil(objtotal/clustentries)
567	 * objtotal' = n * clustentries
568	 */
569	p->clustentries = clustentries;
570	p->_clustsize = clustsize;
571	n = (objtotal + clustentries - 1) / clustentries;
572	p->_numclusters = n;
573	p->objtotal = n * clustentries;
574	p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
575	p->_memtotal = p->_numclusters * p->_clustsize;
576	p->_objsize = objsize;
577
578	return 0;
579
580error:
581	p->_objsize = objsize;
582	p->objtotal = objtotal;
583
584	return EINVAL;
585}
586
587
588/* call with NMA_LOCK held */
589static int
590netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
591{
592	int i, n;
593
594	n = sizeof(struct lut_entry) * p->objtotal;
595#ifdef linux
596	p->lut = vmalloc(n);
597#else
598	p->lut = malloc(n, M_NETMAP, M_WAITOK | M_ZERO);
599#endif
600	if (p->lut == NULL) {
601		D("Unable to create lookup table (%d bytes) for '%s'", n, p->name);
602		goto clean;
603	}
604
605	/* Allocate the bitmap */
606	n = (p->objtotal + 31) / 32;
607	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
608	if (p->bitmap == NULL) {
609		D("Unable to create bitmap (%d entries) for allocator '%s'", n,
610		    p->name);
611		goto clean;
612	}
613	p->bitmap_slots = n;
614
615	/*
616	 * Allocate clusters, init pointers and bitmap
617	 */
618	for (i = 0; i < p->objtotal;) {
619		int lim = i + p->clustentries;
620		char *clust;
621
622		clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO,
623		    0, -1UL, PAGE_SIZE, 0);
624		if (clust == NULL) {
625			/*
626			 * If we get here, there is a severe memory shortage,
627			 * so halve the allocated memory to reclaim some.
628			 * XXX check boundaries
629			 */
630			D("Unable to create cluster at %d for '%s' allocator",
631			    i, p->name);
632			lim = i / 2;
633			for (i--; i >= lim; i--) {
634				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
635				if (i % p->clustentries == 0 && p->lut[i].vaddr)
636					contigfree(p->lut[i].vaddr,
637						p->_clustsize, M_NETMAP);
638			}
639			p->objtotal = i;
640			p->objfree = p->objtotal - 2;
641			p->_numclusters = i / p->clustentries;
642			p->_memtotal = p->_numclusters * p->_clustsize;
643			break;
644		}
645		for (; i < lim; i++, clust += p->_objsize) {
646			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
647			p->lut[i].vaddr = clust;
648			p->lut[i].paddr = vtophys(clust);
649		}
650	}
651	p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
652	D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
653	    p->_numclusters, p->_clustsize >> 10,
654	    p->_memtotal >> 10, p->name);
655
656	return 0;
657
658clean:
659	netmap_reset_obj_allocator(p);
660	return ENOMEM;
661}
662
663/* call with lock held */
664static int
665netmap_memory_config_changed(void)
666{
667	int i;
668
669	for (i = 0; i < NETMAP_POOLS_NR; i++) {
670		if (nm_mem.pools[i]._objsize != netmap_params[i].size ||
671		    nm_mem.pools[i].objtotal != netmap_params[i].num)
672		    return 1;
673	}
674	return 0;
675}
676
677
678/* call with lock held */
679static int
680netmap_memory_config(void)
681{
682	int i;
683
684
685	if (!netmap_memory_config_changed())
686		goto out;
687
688	D("reconfiguring");
689
690	if (nm_mem.finalized) {
691		/* reset previous allocation */
692		for (i = 0; i < NETMAP_POOLS_NR; i++) {
693			netmap_reset_obj_allocator(&nm_mem.pools[i]);
694		}
695		nm_mem.finalized = 0;
696        }
697
698	for (i = 0; i < NETMAP_POOLS_NR; i++) {
699		nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i],
700				netmap_params[i].num, netmap_params[i].size);
701		if (nm_mem.lasterr)
702			goto out;
703	}
704
705	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
706	    nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10,
707	    nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10,
708	    nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20);
709
710out:
711
712	return nm_mem.lasterr;
713}
714
715/* call with lock held */
716static int
717netmap_memory_finalize(void)
718{
719	int i;
720	u_int totalsize = 0;
721
722	nm_mem.refcount++;
723	if (nm_mem.refcount > 1) {
724		D("busy (refcount %d)", nm_mem.refcount);
725		goto out;
726	}
727
728	/* update configuration if changed */
729	if (netmap_memory_config())
730		goto out;
731
732	if (nm_mem.finalized) {
733		/* may happen if config is not changed */
734		ND("nothing to do");
735		goto out;
736	}
737
738	for (i = 0; i < NETMAP_POOLS_NR; i++) {
739		nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]);
740		if (nm_mem.lasterr)
741			goto cleanup;
742		totalsize += nm_mem.pools[i]._memtotal;
743	}
744	nm_mem.nm_totalsize = totalsize;
745
746	/* backward compatibility */
747	netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize;
748	netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal;
749
750	netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut;
751	netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr;
752
753	nm_mem.finalized = 1;
754	nm_mem.lasterr = 0;
755
756	/* make sysctl values match actual values in the pools */
757	for (i = 0; i < NETMAP_POOLS_NR; i++) {
758		netmap_params[i].size = nm_mem.pools[i]._objsize;
759		netmap_params[i].num  = nm_mem.pools[i].objtotal;
760	}
761
762out:
763	if (nm_mem.lasterr)
764		nm_mem.refcount--;
765
766	return nm_mem.lasterr;
767
768cleanup:
769	for (i = 0; i < NETMAP_POOLS_NR; i++) {
770		netmap_reset_obj_allocator(&nm_mem.pools[i]);
771	}
772	nm_mem.refcount--;
773
774	return nm_mem.lasterr;
775}
776
777static int
778netmap_memory_init(void)
779{
780	NMA_LOCK_INIT();
781	return (0);
782}
783
784static void
785netmap_memory_fini(void)
786{
787	int i;
788
789	for (i = 0; i < NETMAP_POOLS_NR; i++) {
790	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
791	}
792	NMA_LOCK_DESTROY();
793}
794
795static void
796netmap_free_rings(struct netmap_adapter *na)
797{
798	int i;
799	for (i = 0; i < na->num_tx_rings + 1; i++) {
800		netmap_ring_free(na->tx_rings[i].ring);
801		na->tx_rings[i].ring = NULL;
802	}
803	for (i = 0; i < na->num_rx_rings + 1; i++) {
804		netmap_ring_free(na->rx_rings[i].ring);
805		na->rx_rings[i].ring = NULL;
806	}
807}
808
809
810
811/* call with NMA_LOCK held */
812static void *
813netmap_if_new(const char *ifname, struct netmap_adapter *na)
814{
815	struct netmap_if *nifp;
816	struct netmap_ring *ring;
817	ssize_t base; /* handy for relative offsets between rings and nifp */
818	u_int i, len, ndesc;
819	u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
820	u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
821	struct netmap_kring *kring;
822
823	/*
824	 * the descriptor is followed inline by an array of offsets
825	 * to the tx and rx rings in the shared memory region.
826	 */
827	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
828	nifp = netmap_if_malloc(len);
829	if (nifp == NULL) {
830		return NULL;
831	}
832
833	/* initialize base fields -- override const */
834	*(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
835	*(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
836	strncpy(nifp->ni_name, ifname, IFNAMSIZ);
837
838	(na->refcount)++;	/* XXX atomic ? we are under lock */
839	if (na->refcount > 1) { /* already setup, we are done */
840		goto final;
841	}
842
843	/*
844	 * First instance, allocate netmap rings and buffers for this card
845	 * The rings are contiguous, but have variable size.
846	 */
847	for (i = 0; i < ntx; i++) { /* Transmit rings */
848		kring = &na->tx_rings[i];
849		ndesc = na->num_tx_desc;
850		bzero(kring, sizeof(*kring));
851		len = sizeof(struct netmap_ring) +
852			  ndesc * sizeof(struct netmap_slot);
853		ring = netmap_ring_malloc(len);
854		if (ring == NULL) {
855			D("Cannot allocate tx_ring[%d] for %s", i, ifname);
856			goto cleanup;
857		}
858		ND("txring[%d] at %p ofs %d", i, ring);
859		kring->na = na;
860		kring->ring = ring;
861		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
862		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
863		    (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
864			nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
865			netmap_ring_offset(ring);
866
867		/*
868		 * IMPORTANT:
869		 * Always keep one slot empty, so we can detect new
870		 * transmissions comparing cur and nr_hwcur (they are
871		 * the same only if there are no new transmissions).
872		 */
873		ring->avail = kring->nr_hwavail = ndesc - 1;
874		ring->cur = kring->nr_hwcur = 0;
875		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
876		ND("initializing slots for txring[%d]", i);
877		if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
878			D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
879			goto cleanup;
880		}
881	}
882
883	for (i = 0; i < nrx; i++) { /* Receive rings */
884		kring = &na->rx_rings[i];
885		ndesc = na->num_rx_desc;
886		bzero(kring, sizeof(*kring));
887		len = sizeof(struct netmap_ring) +
888			  ndesc * sizeof(struct netmap_slot);
889		ring = netmap_ring_malloc(len);
890		if (ring == NULL) {
891			D("Cannot allocate rx_ring[%d] for %s", i, ifname);
892			goto cleanup;
893		}
894		ND("rxring[%d] at %p ofs %d", i, ring);
895
896		kring->na = na;
897		kring->ring = ring;
898		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
899		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
900		    (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
901		        nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
902			netmap_ring_offset(ring);
903
904		ring->cur = kring->nr_hwcur = 0;
905		ring->avail = kring->nr_hwavail = 0; /* empty */
906		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
907		ND("initializing slots for rxring[%d]", i);
908		if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
909			D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
910			goto cleanup;
911		}
912	}
913#ifdef linux
914	// XXX initialize the selrecord structs.
915	for (i = 0; i < ntx; i++)
916		init_waitqueue_head(&na->tx_rings[i].si);
917	for (i = 0; i < nrx; i++)
918		init_waitqueue_head(&na->rx_rings[i].si);
919	init_waitqueue_head(&na->tx_si);
920	init_waitqueue_head(&na->rx_si);
921#endif
922final:
923	/*
924	 * fill the slots for the rx and tx rings. They contain the offset
925	 * between the ring and nifp, so the information is usable in
926	 * userspace to reach the ring from the nifp.
927	 */
928	base = netmap_if_offset(nifp);
929	for (i = 0; i < ntx; i++) {
930		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
931			netmap_ring_offset(na->tx_rings[i].ring) - base;
932	}
933	for (i = 0; i < nrx; i++) {
934		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
935			netmap_ring_offset(na->rx_rings[i].ring) - base;
936	}
937	return (nifp);
938cleanup:
939	netmap_free_rings(na);
940	netmap_if_free(nifp);
941	(na->refcount)--;
942	return NULL;
943}
944
945/* call with NMA_LOCK held */
946static void
947netmap_memory_deref(void)
948{
949	nm_mem.refcount--;
950	D("refcount = %d", nm_mem.refcount);
951}
952