netmap_mem2.c revision 241643
1/*
2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *   1. Redistributions of source code must retain the above copyright
8 *      notice, this list of conditions and the following disclaimer.
9 *   2. Redistributions in binary form must reproduce the above copyright
10 *      notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 241643 2012-10-17 18:21:14Z emaste $
28 * $Id: netmap_mem2.c 11445 2012-07-30 10:49:07Z luigi $
29 *
30 * New memory allocator for netmap
31 */
32
33/*
34 * The new version allocates three regions:
35 *	nm_if_pool      for the struct netmap_if
36 *	nm_ring_pool    for the struct netmap_ring
37 *	nm_buf_pool    for the packet buffers.
38 *
39 * All regions need to be page-sized as we export them to
40 * userspace through mmap. Only the latter need to be dma-able,
41 * but for convenience use the same type of allocator for all.
42 *
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 *    nm_if     nm_ring            nm_buf
48 *
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
52 * of the region.
53 *
54 * Allocator for a pool of memory objects of the same size.
55 * The pool is split into smaller clusters, whose size is a
56 * multiple of the page size. The cluster size is chosen
57 * to minimize the waste for a given max cluster size
58 * (we do it by brute force, as we have relatively few object
59 * per cluster).
60 *
61 * To be polite with the cache, objects are aligned to
62 * the cache line, or 64 bytes. Sizes are rounded to multiple of 64.
63 * For each object we have
64 * one entry in the bitmap to signal the state. Allocation scans
65 * the bitmap, but since this is done only on attach, we are not
66 * too worried about performance
67 */
68
69/*
70 *	MEMORY SIZES:
71 *
72 * (all the parameters below will become tunables)
73 *
74 * struct netmap_if is variable size but small.
75 * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if
76 * uses 120 bytes on a 64-bit machine.
77 * We allocate NETMAP_IF_MAX_SIZE  (1024) which should work even for
78 * cards with 48 ring pairs.
79 * The total number of 'struct netmap_if' could be slightly larger
80 * that the total number of rings on all interfaces on the system.
81 */
82#define NETMAP_IF_MAX_SIZE      1024
83#define NETMAP_IF_MAX_NUM       512
84
85/*
86 * netmap rings are up to 2..4k descriptors, 8 bytes each,
87 * plus some glue at the beginning (32 bytes).
88 * We set the default ring size to 9 pages (36K) and enable
89 * a few hundreds of them.
90 */
91#define NETMAP_RING_MAX_SIZE    (9*PAGE_SIZE)
92#define NETMAP_RING_MAX_NUM     200	/* approx 8MB */
93
94/*
95 * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE,
96 * 2k or slightly less, aligned to 64 bytes.
97 * A large 10G interface can have 2k*18 = 36k buffers per interface,
98 * or about 72MB of memory. Up to us to use more.
99 */
100#ifndef CONSERVATIVE
101#define NETMAP_BUF_MAX_NUM      100000  /* 200MB */
102#else /* CONSERVATIVE */
103#define NETMAP_BUF_MAX_NUM      20000   /* 40MB */
104#endif
105
106
107struct netmap_obj_pool {
108	char name[16];		/* name of the allocator */
109	u_int objtotal;         /* actual total number of objects. */
110	u_int objfree;          /* number of free objects. */
111	u_int clustentries;	/* actual objects per cluster */
112
113	/* the total memory space is _numclusters*_clustsize */
114	u_int _numclusters;	/* how many clusters */
115	u_int _clustsize;        /* cluster size */
116	u_int _objsize;		/* actual object size */
117
118	u_int _memtotal;	/* _numclusters*_clustsize */
119	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
120	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
121};
122
123struct netmap_mem_d {
124	NM_LOCK_T nm_mtx; /* protect the allocator ? */
125	u_int nm_totalsize; /* shorthand */
126
127	/* pointers to the three allocators */
128	struct netmap_obj_pool *nm_if_pool;
129	struct netmap_obj_pool *nm_ring_pool;
130	struct netmap_obj_pool *nm_buf_pool;
131};
132
133struct lut_entry *netmap_buffer_lut;	/* exported */
134
135
136/*
137 * Convert a userspace offset to a phisical address.
138 * XXX re-do in a simpler way.
139 *
140 * The idea here is to hide userspace applications the fact that pre-allocated
141 * memory is not contiguous, but fragmented across different clusters and
142 * smaller memory allocators. Consequently, first of all we need to find which
143 * allocator is owning provided offset, then we need to find out the physical
144 * address associated to target page (this is done using the look-up table.
145 */
146static inline vm_paddr_t
147netmap_ofstophys(vm_offset_t offset)
148{
149	const struct netmap_obj_pool *p[] = {
150		nm_mem->nm_if_pool,
151		nm_mem->nm_ring_pool,
152		nm_mem->nm_buf_pool };
153	int i;
154	vm_offset_t o = offset;
155
156
157	for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) {
158		if (offset >= p[i]->_memtotal)
159			continue;
160		// XXX now scan the clusters
161		return p[i]->lut[offset / p[i]->_objsize].paddr +
162			offset % p[i]->_objsize;
163	}
164	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
165		p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal,
166		p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal);
167	return 0;	// XXX bad address
168}
169
170/*
171 * we store objects by kernel address, need to find the offset
172 * within the pool to export the value to userspace.
173 * Algorithm: scan until we find the cluster, then add the
174 * actual offset in the cluster
175 */
176static ssize_t
177netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
178{
179	int i, k = p->clustentries, n = p->objtotal;
180	ssize_t ofs = 0;
181
182	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
183		const char *base = p->lut[i].vaddr;
184		ssize_t relofs = (const char *) vaddr - base;
185
186		if (relofs < 0 || relofs > p->_clustsize)
187			continue;
188
189		ofs = ofs + relofs;
190		ND("%s: return offset %d (cluster %d) for pointer %p",
191		    p->name, ofs, i, vaddr);
192		return ofs;
193	}
194	D("address %p is not contained inside any cluster (%s)",
195	    vaddr, p->name);
196	return 0; /* An error occurred */
197}
198
199/* Helper functions which convert virtual addresses to offsets */
200#define netmap_if_offset(v)					\
201	netmap_obj_offset(nm_mem->nm_if_pool, (v))
202
203#define netmap_ring_offset(v)					\
204    (nm_mem->nm_if_pool->_memtotal + 				\
205	netmap_obj_offset(nm_mem->nm_ring_pool, (v)))
206
207#define netmap_buf_offset(v)					\
208    (nm_mem->nm_if_pool->_memtotal +				\
209	nm_mem->nm_ring_pool->_memtotal +			\
210	netmap_obj_offset(nm_mem->nm_buf_pool, (v)))
211
212
213static void *
214netmap_obj_malloc(struct netmap_obj_pool *p, int len)
215{
216	uint32_t i = 0;			/* index in the bitmap */
217	uint32_t mask, j;		/* slot counter */
218	void *vaddr = NULL;
219
220	if (len > p->_objsize) {
221		D("%s request size %d too large", p->name, len);
222		// XXX cannot reduce the size
223		return NULL;
224	}
225
226	if (p->objfree == 0) {
227		D("%s allocator: run out of memory", p->name);
228		return NULL;
229	}
230
231	/* termination is guaranteed by p->free */
232	while (vaddr == NULL) {
233		uint32_t cur = p->bitmap[i];
234		if (cur == 0) { /* bitmask is fully used */
235			i++;
236			continue;
237		}
238		/* locate a slot */
239		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
240			;
241
242		p->bitmap[i] &= ~mask; /* mark object as in use */
243		p->objfree--;
244
245		vaddr = p->lut[i * 32 + j].vaddr;
246	}
247	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
248
249	return vaddr;
250}
251
252
253/*
254 * free by index, not by address
255 */
256static void
257netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
258{
259	if (j >= p->objtotal) {
260		D("invalid index %u, max %u", j, p->objtotal);
261		return;
262	}
263	p->bitmap[j / 32] |= (1 << (j % 32));
264	p->objfree++;
265	return;
266}
267
268static void
269netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
270{
271	int i, j, n = p->_memtotal / p->_clustsize;
272
273	for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
274		void *base = p->lut[i * p->clustentries].vaddr;
275		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
276
277		/* Given address, is out of the scope of the current cluster.*/
278		if (vaddr < base || relofs > p->_clustsize)
279			continue;
280
281		j = j + relofs / p->_objsize;
282		KASSERT(j != 0, ("Cannot free object 0"));
283		netmap_obj_free(p, j);
284		return;
285	}
286	ND("address %p is not contained inside any cluster (%s)",
287	    vaddr, p->name);
288}
289
290#define netmap_if_malloc(len)	netmap_obj_malloc(nm_mem->nm_if_pool, len)
291#define netmap_if_free(v)	netmap_obj_free_va(nm_mem->nm_if_pool, (v))
292#define netmap_ring_malloc(len)	netmap_obj_malloc(nm_mem->nm_ring_pool, len)
293#define netmap_buf_malloc()			\
294	netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE)
295
296
297/* Return the index associated to the given packet buffer */
298#define netmap_buf_index(v)						\
299    (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize)
300
301
302static void
303netmap_new_bufs(struct netmap_if *nifp,
304                struct netmap_slot *slot, u_int n)
305{
306	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
307	uint32_t i = 0;	/* slot counter */
308
309	(void)nifp;	/* UNUSED */
310	for (i = 0; i < n; i++) {
311		void *vaddr = netmap_buf_malloc();
312		if (vaddr == NULL) {
313			D("unable to locate empty packet buffer");
314			goto cleanup;
315		}
316
317		slot[i].buf_idx = netmap_buf_index(vaddr);
318		KASSERT(slot[i].buf_idx != 0,
319		    ("Assigning buf_idx=0 to just created slot"));
320		slot[i].len = p->_objsize;
321		slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack
322	}
323
324	ND("allocated %d buffers, %d available", n, p->objfree);
325	return;
326
327cleanup:
328	while (i > 0) {
329		i--;
330		netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx);
331	}
332}
333
334
335static void
336netmap_free_buf(struct netmap_if *nifp, uint32_t i)
337{
338	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
339	if (i < 2 || i >= p->objtotal) {
340		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
341		return;
342	}
343	netmap_obj_free(nm_mem->nm_buf_pool, i);
344}
345
346
347/*
348 * Free all resources related to an allocator.
349 */
350static void
351netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
352{
353	if (p == NULL)
354		return;
355	if (p->bitmap)
356		free(p->bitmap, M_NETMAP);
357	if (p->lut) {
358		int i;
359		for (i = 0; i < p->objtotal; i += p->clustentries) {
360			if (p->lut[i].vaddr)
361				contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
362		}
363		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
364		free(p->lut, M_NETMAP);
365	}
366	bzero(p, sizeof(*p));
367	free(p, M_NETMAP);
368}
369
370/*
371 * We receive a request for objtotal objects, of size objsize each.
372 * Internally we may round up both numbers, as we allocate objects
373 * in small clusters multiple of the page size.
374 * In the allocator we don't need to store the objsize,
375 * but we do need to keep track of objtotal' and clustentries,
376 * as they are needed when freeing memory.
377 *
378 * XXX note -- userspace needs the buffers to be contiguous,
379 *	so we cannot afford gaps at the end of a cluster.
380 */
381static struct netmap_obj_pool *
382netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize)
383{
384	struct netmap_obj_pool *p;
385	int i, n;
386	u_int clustsize;	/* the cluster size, multiple of page size */
387	u_int clustentries;	/* how many objects per entry */
388
389#define MAX_CLUSTSIZE	(1<<17)
390#define LINE_ROUND	64
391	if (objsize >= MAX_CLUSTSIZE) {
392		/* we could do it but there is no point */
393		D("unsupported allocation for %d bytes", objsize);
394		return NULL;
395	}
396	/* make sure objsize is a multiple of LINE_ROUND */
397	i = (objsize & (LINE_ROUND - 1));
398	if (i) {
399		D("XXX aligning object by %d bytes", LINE_ROUND - i);
400		objsize += LINE_ROUND - i;
401	}
402	/*
403	 * Compute number of objects using a brute-force approach:
404	 * given a max cluster size,
405	 * we try to fill it with objects keeping track of the
406	 * wasted space to the next page boundary.
407	 */
408	for (clustentries = 0, i = 1;; i++) {
409		u_int delta, used = i * objsize;
410		if (used > MAX_CLUSTSIZE)
411			break;
412		delta = used % PAGE_SIZE;
413		if (delta == 0) { // exact solution
414			clustentries = i;
415			break;
416		}
417		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
418			clustentries = i;
419	}
420	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
421	/* compute clustsize and round to the next page */
422	clustsize = clustentries * objsize;
423	i =  (clustsize & (PAGE_SIZE - 1));
424	if (i)
425		clustsize += PAGE_SIZE - i;
426	D("objsize %d clustsize %d objects %d",
427		objsize, clustsize, clustentries);
428
429	p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP,
430	    M_WAITOK | M_ZERO);
431	if (p == NULL) {
432		D("Unable to create '%s' allocator", name);
433		return NULL;
434	}
435	/*
436	 * Allocate and initialize the lookup table.
437	 *
438	 * The number of clusters is n = ceil(objtotal/clustentries)
439	 * objtotal' = n * clustentries
440	 */
441	strncpy(p->name, name, sizeof(p->name));
442	p->clustentries = clustentries;
443	p->_clustsize = clustsize;
444	n = (objtotal + clustentries - 1) / clustentries;
445	p->_numclusters = n;
446	p->objtotal = n * clustentries;
447	p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
448	p->_objsize = objsize;
449	p->_memtotal = p->_numclusters * p->_clustsize;
450
451	p->lut = malloc(sizeof(struct lut_entry) * p->objtotal,
452	    M_NETMAP, M_WAITOK | M_ZERO);
453	if (p->lut == NULL) {
454		D("Unable to create lookup table for '%s' allocator", name);
455		goto clean;
456	}
457
458	/* Allocate the bitmap */
459	n = (p->objtotal + 31) / 32;
460	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
461	if (p->bitmap == NULL) {
462		D("Unable to create bitmap (%d entries) for allocator '%s'", n,
463		    name);
464		goto clean;
465	}
466
467	/*
468	 * Allocate clusters, init pointers and bitmap
469	 */
470	for (i = 0; i < p->objtotal;) {
471		int lim = i + clustentries;
472		char *clust;
473
474		clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO,
475		    0, -1UL, PAGE_SIZE, 0);
476		if (clust == NULL) {
477			/*
478			 * If we get here, there is a severe memory shortage,
479			 * so halve the allocated memory to reclaim some.
480			 */
481			D("Unable to create cluster at %d for '%s' allocator",
482			    i, name);
483			lim = i / 2;
484			for (; i >= lim; i--) {
485				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
486				if (i % clustentries == 0 && p->lut[i].vaddr)
487					contigfree(p->lut[i].vaddr,
488						p->_clustsize, M_NETMAP);
489			}
490			p->objtotal = i;
491			p->objfree = p->objtotal - 2;
492			p->_numclusters = i / clustentries;
493			p->_memtotal = p->_numclusters * p->_clustsize;
494			break;
495		}
496		for (; i < lim; i++, clust += objsize) {
497			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
498			p->lut[i].vaddr = clust;
499			p->lut[i].paddr = vtophys(clust);
500		}
501	}
502	p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
503	D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
504	    p->_numclusters, p->_clustsize >> 10,
505	    p->_memtotal >> 10, name);
506
507	return p;
508
509clean:
510	netmap_destroy_obj_allocator(p);
511	return NULL;
512}
513
514static int
515netmap_memory_init(void)
516{
517	struct netmap_obj_pool *p;
518
519	nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
520			      M_WAITOK | M_ZERO);
521	if (nm_mem == NULL)
522		goto clean;
523
524	p = netmap_new_obj_allocator("netmap_if",
525	    NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE);
526	if (p == NULL)
527		goto clean;
528	nm_mem->nm_if_pool = p;
529
530	p = netmap_new_obj_allocator("netmap_ring",
531	    NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE);
532	if (p == NULL)
533		goto clean;
534	nm_mem->nm_ring_pool = p;
535
536	p = netmap_new_obj_allocator("netmap_buf",
537	    NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE);
538	if (p == NULL)
539		goto clean;
540	netmap_total_buffers = p->objtotal;
541	netmap_buffer_lut = p->lut;
542	nm_mem->nm_buf_pool = p;
543	netmap_buffer_base = p->lut[0].vaddr;
544
545	mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL,
546		 MTX_DEF);
547	nm_mem->nm_totalsize =
548	    nm_mem->nm_if_pool->_memtotal +
549	    nm_mem->nm_ring_pool->_memtotal +
550	    nm_mem->nm_buf_pool->_memtotal;
551
552	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
553	    nm_mem->nm_if_pool->_memtotal >> 10,
554	    nm_mem->nm_ring_pool->_memtotal >> 10,
555	    nm_mem->nm_buf_pool->_memtotal >> 20);
556	return 0;
557
558clean:
559	if (nm_mem) {
560		netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
561		netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
562		free(nm_mem, M_NETMAP);
563	}
564	return ENOMEM;
565}
566
567
568static void
569netmap_memory_fini(void)
570{
571	if (!nm_mem)
572		return;
573	netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
574	netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
575	netmap_destroy_obj_allocator(nm_mem->nm_buf_pool);
576	mtx_destroy(&nm_mem->nm_mtx);
577	free(nm_mem, M_NETMAP);
578}
579
580
581
582static void *
583netmap_if_new(const char *ifname, struct netmap_adapter *na)
584{
585	struct netmap_if *nifp;
586	struct netmap_ring *ring;
587	ssize_t base; /* handy for relative offsets between rings and nifp */
588	u_int i, len, ndesc;
589	u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
590	u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
591	struct netmap_kring *kring;
592
593	NMA_LOCK();
594	/*
595	 * the descriptor is followed inline by an array of offsets
596	 * to the tx and rx rings in the shared memory region.
597	 */
598	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
599	nifp = netmap_if_malloc(len);
600	if (nifp == NULL) {
601		NMA_UNLOCK();
602		return NULL;
603	}
604
605	/* initialize base fields -- override const */
606	*(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
607	*(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
608	strncpy(nifp->ni_name, ifname, IFNAMSIZ);
609
610	(na->refcount)++;	/* XXX atomic ? we are under lock */
611	if (na->refcount > 1) { /* already setup, we are done */
612		NMA_UNLOCK();
613		goto final;
614	}
615
616	/*
617	 * First instance, allocate netmap rings and buffers for this card
618	 * The rings are contiguous, but have variable size.
619	 */
620	for (i = 0; i < ntx; i++) { /* Transmit rings */
621		kring = &na->tx_rings[i];
622		ndesc = na->num_tx_desc;
623		bzero(kring, sizeof(*kring));
624		len = sizeof(struct netmap_ring) +
625			  ndesc * sizeof(struct netmap_slot);
626		ring = netmap_ring_malloc(len);
627		if (ring == NULL) {
628			D("Cannot allocate tx_ring[%d] for %s", i, ifname);
629			goto cleanup;
630		}
631		ND("txring[%d] at %p ofs %d", i, ring);
632		kring->na = na;
633		kring->ring = ring;
634		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
635		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
636		    (nm_mem->nm_if_pool->_memtotal +
637			nm_mem->nm_ring_pool->_memtotal) -
638			netmap_ring_offset(ring);
639
640		/*
641		 * IMPORTANT:
642		 * Always keep one slot empty, so we can detect new
643		 * transmissions comparing cur and nr_hwcur (they are
644		 * the same only if there are no new transmissions).
645		 */
646		ring->avail = kring->nr_hwavail = ndesc - 1;
647		ring->cur = kring->nr_hwcur = 0;
648		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
649		ND("initializing slots for txring[%d]", i);
650		netmap_new_bufs(nifp, ring->slot, ndesc);
651	}
652
653	for (i = 0; i < nrx; i++) { /* Receive rings */
654		kring = &na->rx_rings[i];
655		ndesc = na->num_rx_desc;
656		bzero(kring, sizeof(*kring));
657		len = sizeof(struct netmap_ring) +
658			  ndesc * sizeof(struct netmap_slot);
659		ring = netmap_ring_malloc(len);
660		if (ring == NULL) {
661			D("Cannot allocate rx_ring[%d] for %s", i, ifname);
662			goto cleanup;
663		}
664		ND("rxring[%d] at %p ofs %d", i, ring);
665
666		kring->na = na;
667		kring->ring = ring;
668		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
669		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
670		    (nm_mem->nm_if_pool->_memtotal +
671		        nm_mem->nm_ring_pool->_memtotal) -
672			netmap_ring_offset(ring);
673
674		ring->cur = kring->nr_hwcur = 0;
675		ring->avail = kring->nr_hwavail = 0; /* empty */
676		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
677		ND("initializing slots for rxring[%d]", i);
678		netmap_new_bufs(nifp, ring->slot, ndesc);
679	}
680	NMA_UNLOCK();
681#ifdef linux
682	// XXX initialize the selrecord structs.
683	for (i = 0; i < ntx; i++)
684		init_waitqueue_head(&na->tx_rings[i].si);
685	for (i = 0; i < nrx; i++)
686		init_waitqueue_head(&na->rx_rings[i].si);
687	init_waitqueue_head(&na->tx_si);
688	init_waitqueue_head(&na->rx_si);
689#endif
690final:
691	/*
692	 * fill the slots for the rx and tx rings. They contain the offset
693	 * between the ring and nifp, so the information is usable in
694	 * userspace to reach the ring from the nifp.
695	 */
696	base = netmap_if_offset(nifp);
697	for (i = 0; i < ntx; i++) {
698		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
699			netmap_ring_offset(na->tx_rings[i].ring) - base;
700	}
701	for (i = 0; i < nrx; i++) {
702		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
703			netmap_ring_offset(na->rx_rings[i].ring) - base;
704	}
705	return (nifp);
706cleanup:
707	// XXX missing
708	NMA_UNLOCK();
709	return NULL;
710}
711
712static void
713netmap_free_rings(struct netmap_adapter *na)
714{
715	int i;
716	for (i = 0; i < na->num_tx_rings + 1; i++)
717		netmap_obj_free_va(nm_mem->nm_ring_pool,
718			na->tx_rings[i].ring);
719	for (i = 0; i < na->num_rx_rings + 1; i++)
720		netmap_obj_free_va(nm_mem->nm_ring_pool,
721			na->rx_rings[i].ring);
722}
723