netmap_vale.c revision 341477
1/*
2 * Copyright (C) 2013-2016 Universita` di Pisa
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *   1. Redistributions of source code must retain the above copyright
9 *      notice, this list of conditions and the following disclaimer.
10 *   2. Redistributions in binary form must reproduce the above copyright
11 *      notice, this list of conditions and the following disclaimer in the
12 *      documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27
28#if defined(__FreeBSD__)
29#include <sys/cdefs.h> /* prerequisite */
30__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_vale.c 341477 2018-12-04 17:40:56Z vmaffione $");
31
32#include <sys/types.h>
33#include <sys/errno.h>
34#include <sys/param.h>	/* defines used in kernel.h */
35#include <sys/kernel.h>	/* types used in module initialization */
36#include <sys/conf.h>	/* cdevsw struct, UID, GID */
37#include <sys/sockio.h>
38#include <sys/socketvar.h>	/* struct socket */
39#include <sys/malloc.h>
40#include <sys/poll.h>
41#include <sys/rwlock.h>
42#include <sys/socket.h> /* sockaddrs */
43#include <sys/selinfo.h>
44#include <sys/sysctl.h>
45#include <net/if.h>
46#include <net/if_var.h>
47#include <net/bpf.h>		/* BIOCIMMEDIATE */
48#include <machine/bus.h>	/* bus_dmamap_* */
49#include <sys/endian.h>
50#include <sys/refcount.h>
51#include <sys/smp.h>
52
53
54#elif defined(linux)
55
56#include "bsd_glue.h"
57
58#elif defined(__APPLE__)
59
60#warning OSX support is only partial
61#include "osx_glue.h"
62
63#elif defined(_WIN32)
64#include "win_glue.h"
65
66#else
67
68#error	Unsupported platform
69
70#endif /* unsupported */
71
72/*
73 * common headers
74 */
75
76#include <net/netmap.h>
77#include <dev/netmap/netmap_kern.h>
78#include <dev/netmap/netmap_mem2.h>
79#include <dev/netmap/netmap_bdg.h>
80
81#ifdef WITH_VALE
82
83/*
84 * system parameters (most of them in netmap_kern.h)
85 * NM_BDG_NAME	prefix for switch port names, default "vale"
86 * NM_BDG_MAXPORTS	number of ports
87 * NM_BRIDGES	max number of switches in the system.
88 *	XXX should become a sysctl or tunable
89 *
90 * Switch ports are named valeX:Y where X is the switch name and Y
91 * is the port. If Y matches a physical interface name, the port is
92 * connected to a physical device.
93 *
94 * Unlike physical interfaces, switch ports use their own memory region
95 * for rings and buffers.
96 * The virtual interfaces use per-queue lock instead of core lock.
97 * In the tx loop, we aggregate traffic in batches to make all operations
98 * faster. The batch size is bridge_batch.
99 */
100#define NM_BDG_MAXRINGS		16	/* XXX unclear how many. */
101#define NM_BDG_MAXSLOTS		4096	/* XXX same as above */
102#define NM_BRIDGE_RINGSIZE	1024	/* in the device */
103#define NM_BDG_BATCH		1024	/* entries in the forwarding buffer */
104/* actual size of the tables */
105#define NM_BDG_BATCH_MAX	(NM_BDG_BATCH + NETMAP_MAX_FRAGS)
106/* NM_FT_NULL terminates a list of slots in the ft */
107#define NM_FT_NULL		NM_BDG_BATCH_MAX
108
109
110/*
111 * bridge_batch is set via sysctl to the max batch size to be
112 * used in the bridge. The actual value may be larger as the
113 * last packet in the block may overflow the size.
114 */
115static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
116SYSBEGIN(vars_vale);
117SYSCTL_DECL(_dev_netmap);
118SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0,
119		"Max batch size to be used in the bridge");
120SYSEND;
121
122static int netmap_vp_create(struct nmreq_header *hdr, struct ifnet *,
123		struct netmap_mem_d *nmd, struct netmap_vp_adapter **);
124static int netmap_vp_bdg_attach(const char *, struct netmap_adapter *,
125		struct nm_bridge *);
126static int netmap_vale_bwrap_attach(const char *, struct netmap_adapter *);
127
128/*
129 * For each output interface, nm_bdg_q is used to construct a list.
130 * bq_len is the number of output buffers (we can have coalescing
131 * during the copy).
132 */
133struct nm_bdg_q {
134	uint16_t bq_head;
135	uint16_t bq_tail;
136	uint32_t bq_len;	/* number of buffers */
137};
138
139/* Holds the default callbacks */
140struct netmap_bdg_ops vale_bdg_ops = {
141	.lookup = netmap_bdg_learning,
142	.config = NULL,
143	.dtor = NULL,
144	.vp_create = netmap_vp_create,
145	.bwrap_attach = netmap_vale_bwrap_attach,
146	.name = NM_BDG_NAME,
147};
148
149/*
150 * this is a slightly optimized copy routine which rounds
151 * to multiple of 64 bytes and is often faster than dealing
152 * with other odd sizes. We assume there is enough room
153 * in the source and destination buffers.
154 *
155 * XXX only for multiples of 64 bytes, non overlapped.
156 */
157static inline void
158pkt_copy(void *_src, void *_dst, int l)
159{
160	uint64_t *src = _src;
161	uint64_t *dst = _dst;
162	if (unlikely(l >= 1024)) {
163		memcpy(dst, src, l);
164		return;
165	}
166	for (; likely(l > 0); l-=64) {
167		*dst++ = *src++;
168		*dst++ = *src++;
169		*dst++ = *src++;
170		*dst++ = *src++;
171		*dst++ = *src++;
172		*dst++ = *src++;
173		*dst++ = *src++;
174		*dst++ = *src++;
175	}
176}
177
178
179/*
180 * Free the forwarding tables for rings attached to switch ports.
181 */
182static void
183nm_free_bdgfwd(struct netmap_adapter *na)
184{
185	int nrings, i;
186	struct netmap_kring **kring;
187
188	NMG_LOCK_ASSERT();
189	nrings = na->num_tx_rings;
190	kring = na->tx_rings;
191	for (i = 0; i < nrings; i++) {
192		if (kring[i]->nkr_ft) {
193			nm_os_free(kring[i]->nkr_ft);
194			kring[i]->nkr_ft = NULL; /* protect from freeing twice */
195		}
196	}
197}
198
199
200/*
201 * Allocate the forwarding tables for the rings attached to the bridge ports.
202 */
203static int
204nm_alloc_bdgfwd(struct netmap_adapter *na)
205{
206	int nrings, l, i, num_dstq;
207	struct netmap_kring **kring;
208
209	NMG_LOCK_ASSERT();
210	/* all port:rings + broadcast */
211	num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
212	l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;
213	l += sizeof(struct nm_bdg_q) * num_dstq;
214	l += sizeof(uint16_t) * NM_BDG_BATCH_MAX;
215
216	nrings = netmap_real_rings(na, NR_TX);
217	kring = na->tx_rings;
218	for (i = 0; i < nrings; i++) {
219		struct nm_bdg_fwd *ft;
220		struct nm_bdg_q *dstq;
221		int j;
222
223		ft = nm_os_malloc(l);
224		if (!ft) {
225			nm_free_bdgfwd(na);
226			return ENOMEM;
227		}
228		dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);
229		for (j = 0; j < num_dstq; j++) {
230			dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;
231			dstq[j].bq_len = 0;
232		}
233		kring[i]->nkr_ft = ft;
234	}
235	return 0;
236}
237
238/* Allows external modules to create bridges in exclusive mode,
239 * returns an authentication token that the external module will need
240 * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(),
241 * and nm_bdg_update_private_data() operations.
242 * Successfully executed if ret != NULL and *return_status == 0.
243 */
244void *
245netmap_vale_create(const char *bdg_name, int *return_status)
246{
247	struct nm_bridge *b = NULL;
248	void *ret = NULL;
249
250	NMG_LOCK();
251	b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
252	if (b) {
253		*return_status = EEXIST;
254		goto unlock_bdg_create;
255	}
256
257	b = nm_find_bridge(bdg_name, 1 /* create */, &vale_bdg_ops);
258	if (!b) {
259		*return_status = ENOMEM;
260		goto unlock_bdg_create;
261	}
262
263	b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE;
264	ret = nm_bdg_get_auth_token(b);
265	*return_status = 0;
266
267unlock_bdg_create:
268	NMG_UNLOCK();
269	return ret;
270}
271
272/* Allows external modules to destroy a bridge created through
273 * netmap_bdg_create(), the bridge must be empty.
274 */
275int
276netmap_vale_destroy(const char *bdg_name, void *auth_token)
277{
278	struct nm_bridge *b = NULL;
279	int ret = 0;
280
281	NMG_LOCK();
282	b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
283	if (!b) {
284		ret = ENXIO;
285		goto unlock_bdg_free;
286	}
287
288	if (!nm_bdg_valid_auth_token(b, auth_token)) {
289		ret = EACCES;
290		goto unlock_bdg_free;
291	}
292	if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) {
293		ret = EINVAL;
294		goto unlock_bdg_free;
295	}
296
297	b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE);
298	ret = netmap_bdg_free(b);
299	if (ret) {
300		b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE;
301	}
302
303unlock_bdg_free:
304	NMG_UNLOCK();
305	return ret;
306}
307
308
309
310/* nm_dtor callback for ephemeral VALE ports */
311static void
312netmap_vp_dtor(struct netmap_adapter *na)
313{
314	struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
315	struct nm_bridge *b = vpna->na_bdg;
316
317	ND("%s has %d references", na->name, na->na_refcount);
318
319	if (b) {
320		netmap_bdg_detach_common(b, vpna->bdg_port, -1);
321	}
322
323	if (na->ifp != NULL && !nm_iszombie(na)) {
324		NM_DETACH_NA(na->ifp);
325		if (vpna->autodelete) {
326			ND("releasing %s", na->ifp->if_xname);
327			NMG_UNLOCK();
328			nm_os_vi_detach(na->ifp);
329			NMG_LOCK();
330		}
331	}
332}
333
334
335/* Called by external kernel modules (e.g., Openvswitch).
336 * to modify the private data previously given to regops().
337 * 'name' may be just bridge's name (including ':' if it
338 * is not just NM_BDG_NAME).
339 * Called without NMG_LOCK.
340 */
341int
342nm_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback,
343	void *callback_data, void *auth_token)
344{
345	void *private_data = NULL;
346	struct nm_bridge *b;
347	int error = 0;
348
349	NMG_LOCK();
350	b = nm_find_bridge(name, 0 /* don't create */, NULL);
351	if (!b) {
352		error = EINVAL;
353		goto unlock_update_priv;
354	}
355	if (!nm_bdg_valid_auth_token(b, auth_token)) {
356		error = EACCES;
357		goto unlock_update_priv;
358	}
359	BDG_WLOCK(b);
360	private_data = callback(b->private_data, callback_data, &error);
361	b->private_data = private_data;
362	BDG_WUNLOCK(b);
363
364unlock_update_priv:
365	NMG_UNLOCK();
366	return error;
367}
368
369
370/* nm_krings_create callback for VALE ports.
371 * Calls the standard netmap_krings_create, then adds leases on rx
372 * rings and bdgfwd on tx rings.
373 */
374static int
375netmap_vp_krings_create(struct netmap_adapter *na)
376{
377	u_int tailroom;
378	int error, i;
379	uint32_t *leases;
380	u_int nrx = netmap_real_rings(na, NR_RX);
381
382	/*
383	 * Leases are attached to RX rings on vale ports
384	 */
385	tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
386
387	error = netmap_krings_create(na, tailroom);
388	if (error)
389		return error;
390
391	leases = na->tailroom;
392
393	for (i = 0; i < nrx; i++) { /* Receive rings */
394		na->rx_rings[i]->nkr_leases = leases;
395		leases += na->num_rx_desc;
396	}
397
398	error = nm_alloc_bdgfwd(na);
399	if (error) {
400		netmap_krings_delete(na);
401		return error;
402	}
403
404	return 0;
405}
406
407
408/* nm_krings_delete callback for VALE ports. */
409static void
410netmap_vp_krings_delete(struct netmap_adapter *na)
411{
412	nm_free_bdgfwd(na);
413	netmap_krings_delete(na);
414}
415
416
417static int
418nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n,
419	struct netmap_vp_adapter *na, u_int ring_nr);
420
421
422/*
423 * main dispatch routine for the bridge.
424 * Grab packets from a kring, move them into the ft structure
425 * associated to the tx (input) port. Max one instance per port,
426 * filtered on input (ioctl, poll or XXX).
427 * Returns the next position in the ring.
428 */
429static int
430nm_bdg_preflush(struct netmap_kring *kring, u_int end)
431{
432	struct netmap_vp_adapter *na =
433		(struct netmap_vp_adapter*)kring->na;
434	struct netmap_ring *ring = kring->ring;
435	struct nm_bdg_fwd *ft;
436	u_int ring_nr = kring->ring_id;
437	u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
438	u_int ft_i = 0;	/* start from 0 */
439	u_int frags = 1; /* how many frags ? */
440	struct nm_bridge *b = na->na_bdg;
441
442	/* To protect against modifications to the bridge we acquire a
443	 * shared lock, waiting if we can sleep (if the source port is
444	 * attached to a user process) or with a trylock otherwise (NICs).
445	 */
446	ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
447	if (na->up.na_flags & NAF_BDG_MAYSLEEP)
448		BDG_RLOCK(b);
449	else if (!BDG_RTRYLOCK(b))
450		return j;
451	ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
452	ft = kring->nkr_ft;
453
454	for (; likely(j != end); j = nm_next(j, lim)) {
455		struct netmap_slot *slot = &ring->slot[j];
456		char *buf;
457
458		ft[ft_i].ft_len = slot->len;
459		ft[ft_i].ft_flags = slot->flags;
460		ft[ft_i].ft_offset = 0;
461
462		ND("flags is 0x%x", slot->flags);
463		/* we do not use the buf changed flag, but we still need to reset it */
464		slot->flags &= ~NS_BUF_CHANGED;
465
466		/* this slot goes into a list so initialize the link field */
467		ft[ft_i].ft_next = NM_FT_NULL;
468		buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
469			(void *)(uintptr_t)slot->ptr : NMB(&na->up, slot);
470		if (unlikely(buf == NULL)) {
471			RD(5, "NULL %s buffer pointer from %s slot %d len %d",
472				(slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT",
473				kring->name, j, ft[ft_i].ft_len);
474			buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
475			ft[ft_i].ft_len = 0;
476			ft[ft_i].ft_flags = 0;
477		}
478		__builtin_prefetch(buf);
479		++ft_i;
480		if (slot->flags & NS_MOREFRAG) {
481			frags++;
482			continue;
483		}
484		if (unlikely(netmap_verbose && frags > 1))
485			RD(5, "%d frags at %d", frags, ft_i - frags);
486		ft[ft_i - frags].ft_frags = frags;
487		frags = 1;
488		if (unlikely((int)ft_i >= bridge_batch))
489			ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
490	}
491	if (frags > 1) {
492		/* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we
493		 * have to fix frags count. */
494		frags--;
495		ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;
496		ft[ft_i - frags].ft_frags = frags;
497		D("Truncate incomplete fragment at %d (%d frags)", ft_i, frags);
498	}
499	if (ft_i)
500		ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
501	BDG_RUNLOCK(b);
502	return j;
503}
504
505
506/* ----- FreeBSD if_bridge hash function ------- */
507
508/*
509 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
510 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
511 *
512 * http://www.burtleburtle.net/bob/hash/spooky.html
513 */
514#define mix(a, b, c)                                                    \
515do {                                                                    \
516	a -= b; a -= c; a ^= (c >> 13);                                 \
517	b -= c; b -= a; b ^= (a << 8);                                  \
518	c -= a; c -= b; c ^= (b >> 13);                                 \
519	a -= b; a -= c; a ^= (c >> 12);                                 \
520	b -= c; b -= a; b ^= (a << 16);                                 \
521	c -= a; c -= b; c ^= (b >> 5);                                  \
522	a -= b; a -= c; a ^= (c >> 3);                                  \
523	b -= c; b -= a; b ^= (a << 10);                                 \
524	c -= a; c -= b; c ^= (b >> 15);                                 \
525} while (/*CONSTCOND*/0)
526
527
528static __inline uint32_t
529nm_bridge_rthash(const uint8_t *addr)
530{
531	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
532
533	b += addr[5] << 8;
534	b += addr[4];
535	a += addr[3] << 24;
536	a += addr[2] << 16;
537	a += addr[1] << 8;
538	a += addr[0];
539
540	mix(a, b, c);
541#define BRIDGE_RTHASH_MASK	(NM_BDG_HASH-1)
542	return (c & BRIDGE_RTHASH_MASK);
543}
544
545#undef mix
546
547
548/*
549 * Lookup function for a learning bridge.
550 * Update the hash table with the source address,
551 * and then returns the destination port index, and the
552 * ring in *dst_ring (at the moment, always use ring 0)
553 */
554uint32_t
555netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
556		struct netmap_vp_adapter *na, void *private_data)
557{
558	uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset;
559	u_int buf_len = ft->ft_len - ft->ft_offset;
560	struct nm_hash_ent *ht = private_data;
561	uint32_t sh, dh;
562	u_int dst, mysrc = na->bdg_port;
563	uint64_t smac, dmac;
564	uint8_t indbuf[12];
565
566	if (buf_len < 14) {
567		return NM_BDG_NOPORT;
568	}
569
570	if (ft->ft_flags & NS_INDIRECT) {
571		if (copyin(buf, indbuf, sizeof(indbuf))) {
572			return NM_BDG_NOPORT;
573		}
574		buf = indbuf;
575	}
576
577	dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
578	smac = le64toh(*(uint64_t *)(buf + 4));
579	smac >>= 16;
580
581	/*
582	 * The hash is somewhat expensive, there might be some
583	 * worthwhile optimizations here.
584	 */
585	if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
586		uint8_t *s = buf+6;
587		sh = nm_bridge_rthash(s); /* hash of source */
588		/* update source port forwarding entry */
589		na->last_smac = ht[sh].mac = smac;	/* XXX expire ? */
590		ht[sh].ports = mysrc;
591		if (netmap_verbose)
592		    D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
593			s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
594	}
595	dst = NM_BDG_BROADCAST;
596	if ((buf[0] & 1) == 0) { /* unicast */
597		dh = nm_bridge_rthash(buf); /* hash of dst */
598		if (ht[dh].mac == dmac) {	/* found dst */
599			dst = ht[dh].ports;
600		}
601	}
602	return dst;
603}
604
605
606/*
607 * Available space in the ring. Only used in VALE code
608 * and only with is_rx = 1
609 */
610static inline uint32_t
611nm_kr_space(struct netmap_kring *k, int is_rx)
612{
613	int space;
614
615	if (is_rx) {
616		int busy = k->nkr_hwlease - k->nr_hwcur;
617		if (busy < 0)
618			busy += k->nkr_num_slots;
619		space = k->nkr_num_slots - 1 - busy;
620	} else {
621		/* XXX never used in this branch */
622		space = k->nr_hwtail - k->nkr_hwlease;
623		if (space < 0)
624			space += k->nkr_num_slots;
625	}
626#if 0
627	// sanity check
628	if (k->nkr_hwlease >= k->nkr_num_slots ||
629		k->nr_hwcur >= k->nkr_num_slots ||
630		k->nr_tail >= k->nkr_num_slots ||
631		busy < 0 ||
632		busy >= k->nkr_num_slots) {
633		D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d",			k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
634			k->nkr_lease_idx, k->nkr_num_slots);
635	}
636#endif
637	return space;
638}
639
640
641
642
643/* make a lease on the kring for N positions. return the
644 * lease index
645 * XXX only used in VALE code and with is_rx = 1
646 */
647static inline uint32_t
648nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
649{
650	uint32_t lim = k->nkr_num_slots - 1;
651	uint32_t lease_idx = k->nkr_lease_idx;
652
653	k->nkr_leases[lease_idx] = NR_NOSLOT;
654	k->nkr_lease_idx = nm_next(lease_idx, lim);
655
656	if (n > nm_kr_space(k, is_rx)) {
657		D("invalid request for %d slots", n);
658		panic("x");
659	}
660	/* XXX verify that there are n slots */
661	k->nkr_hwlease += n;
662	if (k->nkr_hwlease > lim)
663		k->nkr_hwlease -= lim + 1;
664
665	if (k->nkr_hwlease >= k->nkr_num_slots ||
666		k->nr_hwcur >= k->nkr_num_slots ||
667		k->nr_hwtail >= k->nkr_num_slots ||
668		k->nkr_lease_idx >= k->nkr_num_slots) {
669		D("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d",
670			k->na->name,
671			k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
672			k->nkr_lease_idx, k->nkr_num_slots);
673	}
674	return lease_idx;
675}
676
677/*
678 *
679 * This flush routine supports only unicast and broadcast but a large
680 * number of ports, and lets us replace the learn and dispatch functions.
681 */
682int
683nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
684		u_int ring_nr)
685{
686	struct nm_bdg_q *dst_ents, *brddst;
687	uint16_t num_dsts = 0, *dsts;
688	struct nm_bridge *b = na->na_bdg;
689	u_int i, me = na->bdg_port;
690
691	/*
692	 * The work area (pointed by ft) is followed by an array of
693	 * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS
694	 * queues per port plus one for the broadcast traffic.
695	 * Then we have an array of destination indexes.
696	 */
697	dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);
698	dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
699
700	/* first pass: find a destination for each packet in the batch */
701	for (i = 0; likely(i < n); i += ft[i].ft_frags) {
702		uint8_t dst_ring = ring_nr; /* default, same ring as origin */
703		uint16_t dst_port, d_i;
704		struct nm_bdg_q *d;
705		struct nm_bdg_fwd *start_ft = NULL;
706
707		ND("slot %d frags %d", i, ft[i].ft_frags);
708
709		if (na->up.virt_hdr_len < ft[i].ft_len) {
710			ft[i].ft_offset = na->up.virt_hdr_len;
711			start_ft = &ft[i];
712		} else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) {
713			ft[i].ft_offset = ft[i].ft_len;
714			start_ft = &ft[i+1];
715		} else {
716			/* Drop the packet if the virtio-net header is not into the first
717			 * fragment nor at the very beginning of the second.
718			 */
719			continue;
720		}
721		dst_port = b->bdg_ops->lookup(start_ft, &dst_ring, na, b->private_data);
722		if (netmap_verbose > 255)
723			RD(5, "slot %d port %d -> %d", i, me, dst_port);
724		if (dst_port >= NM_BDG_NOPORT)
725			continue; /* this packet is identified to be dropped */
726		else if (dst_port == NM_BDG_BROADCAST)
727			dst_ring = 0; /* broadcasts always go to ring 0 */
728		else if (unlikely(dst_port == me ||
729		    !b->bdg_ports[dst_port]))
730			continue;
731
732		/* get a position in the scratch pad */
733		d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
734		d = dst_ents + d_i;
735
736		/* append the first fragment to the list */
737		if (d->bq_head == NM_FT_NULL) { /* new destination */
738			d->bq_head = d->bq_tail = i;
739			/* remember this position to be scanned later */
740			if (dst_port != NM_BDG_BROADCAST)
741				dsts[num_dsts++] = d_i;
742		} else {
743			ft[d->bq_tail].ft_next = i;
744			d->bq_tail = i;
745		}
746		d->bq_len += ft[i].ft_frags;
747	}
748
749	/*
750	 * Broadcast traffic goes to ring 0 on all destinations.
751	 * So we need to add these rings to the list of ports to scan.
752	 * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is
753	 * expensive. We should keep a compact list of active destinations
754	 * so we could shorten this loop.
755	 */
756	brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
757	if (brddst->bq_head != NM_FT_NULL) {
758		u_int j;
759		for (j = 0; likely(j < b->bdg_active_ports); j++) {
760			uint16_t d_i;
761			i = b->bdg_port_index[j];
762			if (unlikely(i == me))
763				continue;
764			d_i = i * NM_BDG_MAXRINGS;
765			if (dst_ents[d_i].bq_head == NM_FT_NULL)
766				dsts[num_dsts++] = d_i;
767		}
768	}
769
770	ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
771	/* second pass: scan destinations */
772	for (i = 0; i < num_dsts; i++) {
773		struct netmap_vp_adapter *dst_na;
774		struct netmap_kring *kring;
775		struct netmap_ring *ring;
776		u_int dst_nr, lim, j, d_i, next, brd_next;
777		u_int needed, howmany;
778		int retry = netmap_txsync_retry;
779		struct nm_bdg_q *d;
780		uint32_t my_start = 0, lease_idx = 0;
781		int nrings;
782		int virt_hdr_mismatch = 0;
783
784		d_i = dsts[i];
785		ND("second pass %d port %d", i, d_i);
786		d = dst_ents + d_i;
787		// XXX fix the division
788		dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
789		/* protect from the lookup function returning an inactive
790		 * destination port
791		 */
792		if (unlikely(dst_na == NULL))
793			goto cleanup;
794		if (dst_na->up.na_flags & NAF_SW_ONLY)
795			goto cleanup;
796		/*
797		 * The interface may be in !netmap mode in two cases:
798		 * - when na is attached but not activated yet;
799		 * - when na is being deactivated but is still attached.
800		 */
801		if (unlikely(!nm_netmap_on(&dst_na->up))) {
802			ND("not in netmap mode!");
803			goto cleanup;
804		}
805
806		/* there is at least one either unicast or broadcast packet */
807		brd_next = brddst->bq_head;
808		next = d->bq_head;
809		/* we need to reserve this many slots. If fewer are
810		 * available, some packets will be dropped.
811		 * Packets may have multiple fragments, so we may not use
812		 * there is a chance that we may not use all of the slots
813		 * we have claimed, so we will need to handle the leftover
814		 * ones when we regain the lock.
815		 */
816		needed = d->bq_len + brddst->bq_len;
817
818		if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
819			if (netmap_verbose) {
820				RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
821						dst_na->up.virt_hdr_len);
822			}
823			/* There is a virtio-net header/offloadings mismatch between
824			 * source and destination. The slower mismatch datapath will
825			 * be used to cope with all the mismatches.
826			 */
827			virt_hdr_mismatch = 1;
828			if (dst_na->mfs < na->mfs) {
829				/* We may need to do segmentation offloadings, and so
830				 * we may need a number of destination slots greater
831				 * than the number of input slots ('needed').
832				 * We look for the smallest integer 'x' which satisfies:
833				 *	needed * na->mfs + x * H <= x * na->mfs
834				 * where 'H' is the length of the longest header that may
835				 * be replicated in the segmentation process (e.g. for
836				 * TCPv4 we must account for ethernet header, IP header
837				 * and TCPv4 header).
838				 */
839				KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
840				needed = (needed * na->mfs) /
841						(dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
842				ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
843			}
844		}
845
846		ND(5, "pass 2 dst %d is %x %s",
847			i, d_i, is_vp ? "virtual" : "nic/host");
848		dst_nr = d_i & (NM_BDG_MAXRINGS-1);
849		nrings = dst_na->up.num_rx_rings;
850		if (dst_nr >= nrings)
851			dst_nr = dst_nr % nrings;
852		kring = dst_na->up.rx_rings[dst_nr];
853		ring = kring->ring;
854		/* the destination ring may have not been opened for RX */
855		if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON))
856			goto cleanup;
857		lim = kring->nkr_num_slots - 1;
858
859retry:
860
861		if (dst_na->retry && retry) {
862			/* try to get some free slot from the previous run */
863			kring->nm_notify(kring, 0);
864			/* actually useful only for bwraps, since there
865			 * the notify will trigger a txsync on the hwna. VALE ports
866			 * have dst_na->retry == 0
867			 */
868		}
869		/* reserve the buffers in the queue and an entry
870		 * to report completion, and drop lock.
871		 * XXX this might become a helper function.
872		 */
873		mtx_lock(&kring->q_lock);
874		if (kring->nkr_stopped) {
875			mtx_unlock(&kring->q_lock);
876			goto cleanup;
877		}
878		my_start = j = kring->nkr_hwlease;
879		howmany = nm_kr_space(kring, 1);
880		if (needed < howmany)
881			howmany = needed;
882		lease_idx = nm_kr_lease(kring, howmany, 1);
883		mtx_unlock(&kring->q_lock);
884
885		/* only retry if we need more than available slots */
886		if (retry && needed <= howmany)
887			retry = 0;
888
889		/* copy to the destination queue */
890		while (howmany > 0) {
891			struct netmap_slot *slot;
892			struct nm_bdg_fwd *ft_p, *ft_end;
893			u_int cnt;
894
895			/* find the queue from which we pick next packet.
896			 * NM_FT_NULL is always higher than valid indexes
897			 * so we never dereference it if the other list
898			 * has packets (and if both are empty we never
899			 * get here).
900			 */
901			if (next < brd_next) {
902				ft_p = ft + next;
903				next = ft_p->ft_next;
904			} else { /* insert broadcast */
905				ft_p = ft + brd_next;
906				brd_next = ft_p->ft_next;
907			}
908			cnt = ft_p->ft_frags; // cnt > 0
909			if (unlikely(cnt > howmany))
910			    break; /* no more space */
911			if (netmap_verbose && cnt > 1)
912				RD(5, "rx %d frags to %d", cnt, j);
913			ft_end = ft_p + cnt;
914			if (unlikely(virt_hdr_mismatch)) {
915				bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
916			} else {
917				howmany -= cnt;
918				do {
919					char *dst, *src = ft_p->ft_buf;
920					size_t copy_len = ft_p->ft_len, dst_len = copy_len;
921
922					slot = &ring->slot[j];
923					dst = NMB(&dst_na->up, slot);
924
925					ND("send [%d] %d(%d) bytes at %s:%d",
926							i, (int)copy_len, (int)dst_len,
927							NM_IFPNAME(dst_ifp), j);
928					/* round to a multiple of 64 */
929					copy_len = (copy_len + 63) & ~63;
930
931					if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
932						     copy_len > NETMAP_BUF_SIZE(&na->up))) {
933						RD(5, "invalid len %d, down to 64", (int)copy_len);
934						copy_len = dst_len = 64; // XXX
935					}
936					if (ft_p->ft_flags & NS_INDIRECT) {
937						if (copyin(src, dst, copy_len)) {
938							// invalid user pointer, pretend len is 0
939							dst_len = 0;
940						}
941					} else {
942						//memcpy(dst, src, copy_len);
943						pkt_copy(src, dst, (int)copy_len);
944					}
945					slot->len = dst_len;
946					slot->flags = (cnt << 8)| NS_MOREFRAG;
947					j = nm_next(j, lim);
948					needed--;
949					ft_p++;
950				} while (ft_p != ft_end);
951				slot->flags = (cnt << 8); /* clear flag on last entry */
952			}
953			/* are we done ? */
954			if (next == NM_FT_NULL && brd_next == NM_FT_NULL)
955				break;
956		}
957		{
958		    /* current position */
959		    uint32_t *p = kring->nkr_leases; /* shorthand */
960		    uint32_t update_pos;
961		    int still_locked = 1;
962
963		    mtx_lock(&kring->q_lock);
964		    if (unlikely(howmany > 0)) {
965			/* not used all bufs. If i am the last one
966			 * i can recover the slots, otherwise must
967			 * fill them with 0 to mark empty packets.
968			 */
969			ND("leftover %d bufs", howmany);
970			if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
971			    /* yes i am the last one */
972			    ND("roll back nkr_hwlease to %d", j);
973			    kring->nkr_hwlease = j;
974			} else {
975			    while (howmany-- > 0) {
976				ring->slot[j].len = 0;
977				ring->slot[j].flags = 0;
978				j = nm_next(j, lim);
979			    }
980			}
981		    }
982		    p[lease_idx] = j; /* report I am done */
983
984		    update_pos = kring->nr_hwtail;
985
986		    if (my_start == update_pos) {
987			/* all slots before my_start have been reported,
988			 * so scan subsequent leases to see if other ranges
989			 * have been completed, and to a selwakeup or txsync.
990		         */
991			while (lease_idx != kring->nkr_lease_idx &&
992				p[lease_idx] != NR_NOSLOT) {
993			    j = p[lease_idx];
994			    p[lease_idx] = NR_NOSLOT;
995			    lease_idx = nm_next(lease_idx, lim);
996			}
997			/* j is the new 'write' position. j != my_start
998			 * means there are new buffers to report
999			 */
1000			if (likely(j != my_start)) {
1001				kring->nr_hwtail = j;
1002				still_locked = 0;
1003				mtx_unlock(&kring->q_lock);
1004				kring->nm_notify(kring, 0);
1005				/* this is netmap_notify for VALE ports and
1006				 * netmap_bwrap_notify for bwrap. The latter will
1007				 * trigger a txsync on the underlying hwna
1008				 */
1009				if (dst_na->retry && retry--) {
1010					/* XXX this is going to call nm_notify again.
1011					 * Only useful for bwrap in virtual machines
1012					 */
1013					goto retry;
1014				}
1015			}
1016		    }
1017		    if (still_locked)
1018			mtx_unlock(&kring->q_lock);
1019		}
1020cleanup:
1021		d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */
1022		d->bq_len = 0;
1023	}
1024	brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */
1025	brddst->bq_len = 0;
1026	return 0;
1027}
1028
1029/* nm_txsync callback for VALE ports */
1030static int
1031netmap_vp_txsync(struct netmap_kring *kring, int flags)
1032{
1033	struct netmap_vp_adapter *na =
1034		(struct netmap_vp_adapter *)kring->na;
1035	u_int done;
1036	u_int const lim = kring->nkr_num_slots - 1;
1037	u_int const head = kring->rhead;
1038
1039	if (bridge_batch <= 0) { /* testing only */
1040		done = head; // used all
1041		goto done;
1042	}
1043	if (!na->na_bdg) {
1044		done = head;
1045		goto done;
1046	}
1047	if (bridge_batch > NM_BDG_BATCH)
1048		bridge_batch = NM_BDG_BATCH;
1049
1050	done = nm_bdg_preflush(kring, head);
1051done:
1052	if (done != head)
1053		D("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail);
1054	/*
1055	 * packets between 'done' and 'cur' are left unsent.
1056	 */
1057	kring->nr_hwcur = done;
1058	kring->nr_hwtail = nm_prev(done, lim);
1059	if (netmap_verbose)
1060		D("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
1061	return 0;
1062}
1063
1064
1065/* create a netmap_vp_adapter that describes a VALE port.
1066 * Only persistent VALE ports have a non-null ifp.
1067 */
1068static int
1069netmap_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
1070		struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret)
1071{
1072	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1073	struct netmap_vp_adapter *vpna;
1074	struct netmap_adapter *na;
1075	int error = 0;
1076	u_int npipes = 0;
1077	u_int extrabufs = 0;
1078
1079	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1080		return EINVAL;
1081	}
1082
1083	vpna = nm_os_malloc(sizeof(*vpna));
1084	if (vpna == NULL)
1085		return ENOMEM;
1086
1087 	na = &vpna->up;
1088
1089	na->ifp = ifp;
1090	strncpy(na->name, hdr->nr_name, sizeof(na->name));
1091
1092	/* bound checking */
1093	na->num_tx_rings = req->nr_tx_rings;
1094	nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1095	req->nr_tx_rings = na->num_tx_rings; /* write back */
1096	na->num_rx_rings = req->nr_rx_rings;
1097	nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1098	req->nr_rx_rings = na->num_rx_rings; /* write back */
1099	nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE,
1100			1, NM_BDG_MAXSLOTS, NULL);
1101	na->num_tx_desc = req->nr_tx_slots;
1102	nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE,
1103			1, NM_BDG_MAXSLOTS, NULL);
1104	/* validate number of pipes. We want at least 1,
1105	 * but probably can do with some more.
1106	 * So let's use 2 as default (when 0 is supplied)
1107	 */
1108	nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);
1109	/* validate extra bufs */
1110	nm_bound_var(&extrabufs, 0, 0,
1111			128*NM_BDG_MAXSLOTS, NULL);
1112	req->nr_extra_bufs = extrabufs; /* write back */
1113	na->num_rx_desc = req->nr_rx_slots;
1114	/* Set the mfs to a default value, as it is needed on the VALE
1115	 * mismatch datapath. XXX We should set it according to the MTU
1116	 * known to the kernel. */
1117	vpna->mfs = NM_BDG_MFS_DEFAULT;
1118	vpna->last_smac = ~0llu;
1119	/*if (vpna->mfs > netmap_buf_size)  TODO netmap_buf_size is zero??
1120		vpna->mfs = netmap_buf_size; */
1121	if (netmap_verbose)
1122		D("max frame size %u", vpna->mfs);
1123
1124	na->na_flags |= NAF_BDG_MAYSLEEP;
1125	/* persistent VALE ports look like hw devices
1126	 * with a native netmap adapter
1127	 */
1128	if (ifp)
1129		na->na_flags |= NAF_NATIVE;
1130	na->nm_txsync = netmap_vp_txsync;
1131	na->nm_rxsync = netmap_vp_rxsync;
1132	na->nm_register = netmap_vp_reg;
1133	na->nm_krings_create = netmap_vp_krings_create;
1134	na->nm_krings_delete = netmap_vp_krings_delete;
1135	na->nm_dtor = netmap_vp_dtor;
1136	ND("nr_mem_id %d", req->nr_mem_id);
1137	na->nm_mem = nmd ?
1138		netmap_mem_get(nmd):
1139		netmap_mem_private_new(
1140			na->num_tx_rings, na->num_tx_desc,
1141			na->num_rx_rings, na->num_rx_desc,
1142			req->nr_extra_bufs, npipes, &error);
1143	if (na->nm_mem == NULL)
1144		goto err;
1145	na->nm_bdg_attach = netmap_vp_bdg_attach;
1146	/* other nmd fields are set in the common routine */
1147	error = netmap_attach_common(na);
1148	if (error)
1149		goto err;
1150	*ret = vpna;
1151	return 0;
1152
1153err:
1154	if (na->nm_mem != NULL)
1155		netmap_mem_put(na->nm_mem);
1156	nm_os_free(vpna);
1157	return error;
1158}
1159
1160/* nm_bdg_attach callback for VALE ports
1161 * The na_vp port is this same netmap_adapter. There is no host port.
1162 */
1163static int
1164netmap_vp_bdg_attach(const char *name, struct netmap_adapter *na,
1165		struct nm_bridge *b)
1166{
1167	struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
1168
1169	if (b->bdg_ops != &vale_bdg_ops) {
1170		return NM_NEED_BWRAP;
1171	}
1172	if (vpna->na_bdg) {
1173		return NM_NEED_BWRAP;
1174	}
1175	na->na_vp = vpna;
1176	strncpy(na->name, name, sizeof(na->name));
1177	na->na_hostvp = NULL;
1178	return 0;
1179}
1180
1181static int
1182netmap_vale_bwrap_krings_create(struct netmap_adapter *na)
1183{
1184	int error;
1185
1186	/* impersonate a netmap_vp_adapter */
1187	error = netmap_vp_krings_create(na);
1188	if (error)
1189		return error;
1190	error = netmap_bwrap_krings_create_common(na);
1191	if (error) {
1192		netmap_vp_krings_delete(na);
1193	}
1194	return error;
1195}
1196
1197static void
1198netmap_vale_bwrap_krings_delete(struct netmap_adapter *na)
1199{
1200	netmap_bwrap_krings_delete_common(na);
1201	netmap_vp_krings_delete(na);
1202}
1203
1204static int
1205netmap_vale_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
1206{
1207	struct netmap_bwrap_adapter *bna;
1208	struct netmap_adapter *na = NULL;
1209	struct netmap_adapter *hostna = NULL;
1210	int error;
1211
1212	bna = nm_os_malloc(sizeof(*bna));
1213	if (bna == NULL) {
1214		return ENOMEM;
1215	}
1216	na = &bna->up.up;
1217	strncpy(na->name, nr_name, sizeof(na->name));
1218	na->nm_register = netmap_bwrap_reg;
1219	na->nm_txsync = netmap_vp_txsync;
1220	// na->nm_rxsync = netmap_bwrap_rxsync;
1221	na->nm_krings_create = netmap_vale_bwrap_krings_create;
1222	na->nm_krings_delete = netmap_vale_bwrap_krings_delete;
1223	na->nm_notify = netmap_bwrap_notify;
1224	bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
1225	/* Set the mfs, needed on the VALE mismatch datapath. */
1226	bna->up.mfs = NM_BDG_MFS_DEFAULT;
1227
1228	if (hwna->na_flags & NAF_HOST_RINGS) {
1229		hostna = &bna->host.up;
1230		hostna->nm_notify = netmap_bwrap_notify;
1231		bna->host.mfs = NM_BDG_MFS_DEFAULT;
1232	}
1233
1234	error = netmap_bwrap_attach_common(na, hwna);
1235	if (error) {
1236		nm_os_free(bna);
1237	}
1238	return error;
1239}
1240
1241int
1242netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1243		struct netmap_mem_d *nmd, int create)
1244{
1245	return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops);
1246}
1247
1248
1249/* creates a persistent VALE port */
1250int
1251nm_vi_create(struct nmreq_header *hdr)
1252{
1253	struct nmreq_vale_newif *req =
1254		(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
1255	int error = 0;
1256	/* Build a nmreq_register out of the nmreq_vale_newif,
1257	 * so that we can call netmap_get_bdg_na(). */
1258	struct nmreq_register regreq;
1259	bzero(&regreq, sizeof(regreq));
1260	regreq.nr_tx_slots = req->nr_tx_slots;
1261	regreq.nr_rx_slots = req->nr_rx_slots;
1262	regreq.nr_tx_rings = req->nr_tx_rings;
1263	regreq.nr_rx_rings = req->nr_rx_rings;
1264	regreq.nr_mem_id = req->nr_mem_id;
1265	hdr->nr_reqtype = NETMAP_REQ_REGISTER;
1266	hdr->nr_body = (uintptr_t)&regreq;
1267	error = netmap_vi_create(hdr, 0 /* no autodelete */);
1268	hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
1269	hdr->nr_body = (uintptr_t)req;
1270	/* Write back to the original struct. */
1271	req->nr_tx_slots = regreq.nr_tx_slots;
1272	req->nr_rx_slots = regreq.nr_rx_slots;
1273	req->nr_tx_rings = regreq.nr_tx_rings;
1274	req->nr_rx_rings = regreq.nr_rx_rings;
1275	req->nr_mem_id = regreq.nr_mem_id;
1276	return error;
1277}
1278
1279/* remove a persistent VALE port from the system */
1280int
1281nm_vi_destroy(const char *name)
1282{
1283	struct ifnet *ifp;
1284	struct netmap_vp_adapter *vpna;
1285	int error;
1286
1287	ifp = ifunit_ref(name);
1288	if (!ifp)
1289		return ENXIO;
1290	NMG_LOCK();
1291	/* make sure this is actually a VALE port */
1292	if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {
1293		error = EINVAL;
1294		goto err;
1295	}
1296
1297	vpna = (struct netmap_vp_adapter *)NA(ifp);
1298
1299	/* we can only destroy ports that were created via NETMAP_BDG_NEWIF */
1300	if (vpna->autodelete) {
1301		error = EINVAL;
1302		goto err;
1303	}
1304
1305	/* also make sure that nobody is using the inferface */
1306	if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
1307	    vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
1308		error = EBUSY;
1309		goto err;
1310	}
1311
1312	NMG_UNLOCK();
1313
1314	D("destroying a persistent vale interface %s", ifp->if_xname);
1315	/* Linux requires all the references are released
1316	 * before unregister
1317	 */
1318	netmap_detach(ifp);
1319	if_rele(ifp);
1320	nm_os_vi_detach(ifp);
1321	return 0;
1322
1323err:
1324	NMG_UNLOCK();
1325	if_rele(ifp);
1326	return error;
1327}
1328
1329static int
1330nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
1331{
1332	req->nr_rx_rings = na->num_rx_rings;
1333	req->nr_tx_rings = na->num_tx_rings;
1334	req->nr_rx_slots = na->num_rx_desc;
1335	req->nr_tx_slots = na->num_tx_desc;
1336	return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL,
1337					&req->nr_mem_id);
1338}
1339
1340
1341/*
1342 * Create a virtual interface registered to the system.
1343 * The interface will be attached to a bridge later.
1344 */
1345int
1346netmap_vi_create(struct nmreq_header *hdr, int autodelete)
1347{
1348	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1349	struct ifnet *ifp;
1350	struct netmap_vp_adapter *vpna;
1351	struct netmap_mem_d *nmd = NULL;
1352	int error;
1353
1354	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1355		return EINVAL;
1356	}
1357
1358	/* don't include VALE prefix */
1359	if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))
1360		return EINVAL;
1361	if (strlen(hdr->nr_name) >= IFNAMSIZ) {
1362		return EINVAL;
1363	}
1364	ifp = ifunit_ref(hdr->nr_name);
1365	if (ifp) { /* already exist, cannot create new one */
1366		error = EEXIST;
1367		NMG_LOCK();
1368		if (NM_NA_VALID(ifp)) {
1369			int update_err = nm_update_info(req, NA(ifp));
1370			if (update_err)
1371				error = update_err;
1372		}
1373		NMG_UNLOCK();
1374		if_rele(ifp);
1375		return error;
1376	}
1377	error = nm_os_vi_persist(hdr->nr_name, &ifp);
1378	if (error)
1379		return error;
1380
1381	NMG_LOCK();
1382	if (req->nr_mem_id) {
1383		nmd = netmap_mem_find(req->nr_mem_id);
1384		if (nmd == NULL) {
1385			error = EINVAL;
1386			goto err_1;
1387		}
1388	}
1389	/* netmap_vp_create creates a struct netmap_vp_adapter */
1390	error = netmap_vp_create(hdr, ifp, nmd, &vpna);
1391	if (error) {
1392		D("error %d", error);
1393		goto err_1;
1394	}
1395	/* persist-specific routines */
1396	vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;
1397	if (!autodelete) {
1398		netmap_adapter_get(&vpna->up);
1399	} else {
1400		vpna->autodelete = 1;
1401	}
1402	NM_ATTACH_NA(ifp, &vpna->up);
1403	/* return the updated info */
1404	error = nm_update_info(req, &vpna->up);
1405	if (error) {
1406		goto err_2;
1407	}
1408	ND("returning nr_mem_id %d", req->nr_mem_id);
1409	if (nmd)
1410		netmap_mem_put(nmd);
1411	NMG_UNLOCK();
1412	ND("created %s", ifp->if_xname);
1413	return 0;
1414
1415err_2:
1416	netmap_detach(ifp);
1417err_1:
1418	if (nmd)
1419		netmap_mem_put(nmd);
1420	NMG_UNLOCK();
1421	nm_os_vi_detach(ifp);
1422
1423	return error;
1424}
1425
1426#endif /* WITH_VALE */
1427