1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2018 Universita` di Pisa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *   1. Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *   2. Redistributions in binary form must reproduce the above copyright
14 *      notice, this list of conditions and the following disclaimer in the
15 *      documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32#include <sys/types.h>
33#include <sys/stat.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <fcntl.h>
37#include <inttypes.h>
38#include <stdlib.h>
39#include <stdio.h>
40#include <stdarg.h>
41#include <string.h>
42#include <unistd.h>
43#include <errno.h>
44#include <net/netmap_user.h>
45#define LIBNETMAP_NOTHREADSAFE
46#include "libnetmap.h"
47
48struct nmport_cleanup_d {
49	struct nmport_cleanup_d *next;
50	void (*cleanup)(struct nmport_cleanup_d *, struct nmport_d *);
51};
52
53static void
54nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c)
55{
56	c->next = d->clist;
57	d->clist = c;
58}
59
60static void
61nmport_pop_cleanup(struct nmport_d *d)
62{
63	struct nmport_cleanup_d *top;
64
65	top = d->clist;
66	d->clist = d->clist->next;
67	(*top->cleanup)(top, d);
68	nmctx_free(d->ctx, top);
69}
70
71void nmport_do_cleanup(struct nmport_d *d)
72{
73	while (d->clist != NULL) {
74		nmport_pop_cleanup(d);
75	}
76}
77
78static struct nmport_d *
79nmport_new_with_ctx(struct nmctx *ctx)
80{
81	struct nmport_d *d;
82
83	/* allocate a descriptor */
84	d = nmctx_malloc(ctx, sizeof(*d));
85	if (d == NULL) {
86		nmctx_ferror(ctx, "cannot allocate nmport descriptor");
87		goto out;
88	}
89	memset(d, 0, sizeof(*d));
90
91	nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg);
92
93	d->ctx = ctx;
94	d->fd = -1;
95
96out:
97	return d;
98}
99
100struct nmport_d *
101nmport_new(void)
102{
103	struct nmctx *ctx = nmctx_get();
104	return nmport_new_with_ctx(ctx);
105}
106
107
108void
109nmport_delete(struct nmport_d *d)
110{
111	nmctx_free(d->ctx, d);
112}
113
114void
115nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d)
116{
117	(void)c;
118
119	if (d->extmem == NULL)
120		return;
121
122	nmreq_remove_option(&d->hdr, &d->extmem->nro_opt);
123	nmctx_free(d->ctx, d->extmem);
124	d->extmem = NULL;
125}
126
127
128int
129nmport_extmem(struct nmport_d *d, void *base, size_t size)
130{
131	struct nmctx *ctx = d->ctx;
132	struct nmport_cleanup_d *clnup = NULL;
133
134	if (d->register_done) {
135		nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name);
136		errno = EINVAL;
137		return -1;
138	}
139
140	if (d->extmem != NULL) {
141		nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name);
142		errno = EINVAL;
143		return -1;
144	}
145
146	clnup = (struct nmport_cleanup_d *)nmctx_malloc(ctx, sizeof(*clnup));
147	if (clnup == NULL) {
148		nmctx_ferror(ctx, "failed to allocate cleanup descriptor");
149		errno = ENOMEM;
150		return -1;
151	}
152
153	d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem));
154	if (d->extmem == NULL) {
155		nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name);
156		nmctx_free(ctx, clnup);
157		errno = ENOMEM;
158		return -1;
159	}
160	memset(d->extmem, 0, sizeof(*d->extmem));
161	d->extmem->nro_usrptr = (uintptr_t)base;
162	d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM;
163	d->extmem->nro_info.nr_memsize = size;
164	nmreq_push_option(&d->hdr, &d->extmem->nro_opt);
165
166	clnup->cleanup = nmport_extmem_cleanup;
167	nmport_push_cleanup(d, clnup);
168
169	return 0;
170}
171
172struct nmport_extmem_from_file_cleanup_d {
173	struct nmport_cleanup_d up;
174	void *p;
175	size_t size;
176};
177
178void nmport_extmem_from_file_cleanup(struct nmport_cleanup_d *c,
179		struct nmport_d *d)
180{
181	struct nmport_extmem_from_file_cleanup_d *cc =
182		(struct nmport_extmem_from_file_cleanup_d *)c;
183
184	munmap(cc->p, cc->size);
185}
186
187int
188nmport_extmem_from_file(struct nmport_d *d, const char *fname)
189{
190	struct nmctx *ctx = d->ctx;
191	int fd = -1;
192	off_t mapsize;
193	void *p;
194	struct nmport_extmem_from_file_cleanup_d *clnup = NULL;
195
196	clnup = nmctx_malloc(ctx, sizeof(*clnup));
197	if (clnup == NULL) {
198		nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
199		errno = ENOMEM;
200		goto fail;
201	}
202
203	fd = open(fname, O_RDWR);
204	if (fd < 0) {
205		nmctx_ferror(ctx, "cannot open '%s': %s", fname, strerror(errno));
206		goto fail;
207	}
208	mapsize = lseek(fd, 0, SEEK_END);
209	if (mapsize < 0) {
210		nmctx_ferror(ctx, "failed to obtain filesize of '%s': %s", fname, strerror(errno));
211		goto fail;
212	}
213	p = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
214	if (p == MAP_FAILED) {
215		nmctx_ferror(ctx, "cannot mmap '%s': %s", fname, strerror(errno));
216		goto fail;
217	}
218	close(fd);
219
220	clnup->p = p;
221	clnup->size = mapsize;
222	clnup->up.cleanup = nmport_extmem_from_file_cleanup;
223	nmport_push_cleanup(d, &clnup->up);
224
225	if (nmport_extmem(d, p, mapsize) < 0)
226		goto fail;
227
228	return 0;
229
230fail:
231	if (fd >= 0)
232		close(fd);
233	if (clnup != NULL) {
234		if (clnup->p != MAP_FAILED)
235			nmport_pop_cleanup(d);
236		else
237			nmctx_free(ctx, clnup);
238	}
239	return -1;
240}
241
242struct nmreq_pools_info*
243nmport_extmem_getinfo(struct nmport_d *d)
244{
245	if (d->extmem == NULL)
246		return NULL;
247	return &d->extmem->nro_info;
248}
249
250/* head of the list of options */
251static struct nmreq_opt_parser *nmport_opt_parsers;
252
253#define NPOPT_PARSER(o)		nmport_opt_##o##_parser
254#define NPOPT_DESC(o)		nmport_opt_##o##_desc
255#define NPOPT_NRKEYS(o)		(NPOPT_DESC(o).nr_keys)
256#define NPOPT_DECL(o, f)						\
257static int NPOPT_PARSER(o)(struct nmreq_parse_ctx *);			\
258static struct nmreq_opt_parser NPOPT_DESC(o) = {			\
259	.prefix = #o,							\
260	.parse = NPOPT_PARSER(o),					\
261	.flags = (f),							\
262	.default_key = -1,						\
263	.nr_keys = 0,							\
264	.next = NULL,							\
265};									\
266static void __attribute__((constructor))				\
267nmport_opt_##o##_ctor(void)						\
268{									\
269	NPOPT_DESC(o).next = nmport_opt_parsers;			\
270	nmport_opt_parsers = &NPOPT_DESC(o);				\
271}
272struct nmport_key_desc {
273	struct nmreq_opt_parser *option;
274	const char *key;
275	unsigned int flags;
276	int id;
277};
278static void
279nmport_opt_key_ctor(struct nmport_key_desc *k)
280{
281	struct nmreq_opt_parser *o = k->option;
282	struct nmreq_opt_key *ok;
283
284	k->id = o->nr_keys;
285	ok = &o->keys[k->id];
286	ok->key = k->key;
287	ok->id = k->id;
288	ok->flags = k->flags;
289	o->nr_keys++;
290	if (ok->flags & NMREQ_OPTK_DEFAULT)
291		o->default_key = ok->id;
292}
293#define NPKEY_DESC(o, k)	nmport_opt_##o##_key_##k##_desc
294#define NPKEY_ID(o, k)		(NPKEY_DESC(o, k).id)
295#define NPKEY_DECL(o, k, f)						\
296static struct nmport_key_desc NPKEY_DESC(o, k) = {			\
297	.option = &NPOPT_DESC(o),					\
298	.key = #k,							\
299	.flags = (f),							\
300	.id = -1,							\
301};									\
302static void __attribute__((constructor))				\
303nmport_opt_##o##_key_##k##_ctor(void)					\
304{									\
305	nmport_opt_key_ctor(&NPKEY_DESC(o, k));				\
306}
307#define nmport_key(p, o, k)	((p)->keys[NPKEY_ID(o, k)])
308#define nmport_defkey(p, o)	((p)->keys[NPOPT_DESC(o).default_key])
309
310NPOPT_DECL(share, 0)
311	NPKEY_DECL(share, port, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
312NPOPT_DECL(extmem, 0)
313	NPKEY_DECL(extmem, file, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
314	NPKEY_DECL(extmem, if_num, 0)
315	NPKEY_DECL(extmem, if_size, 0)
316	NPKEY_DECL(extmem, ring_num, 0)
317	NPKEY_DECL(extmem, ring_size, 0)
318	NPKEY_DECL(extmem, buf_num, 0)
319	NPKEY_DECL(extmem, buf_size, 0)
320NPOPT_DECL(conf, 0)
321	NPKEY_DECL(conf, rings, 0)
322	NPKEY_DECL(conf, host_rings, 0)
323	NPKEY_DECL(conf, slots, 0)
324	NPKEY_DECL(conf, tx_rings, 0)
325	NPKEY_DECL(conf, rx_rings, 0)
326	NPKEY_DECL(conf, host_tx_rings, 0)
327	NPKEY_DECL(conf, host_rx_rings, 0)
328	NPKEY_DECL(conf, tx_slots, 0)
329	NPKEY_DECL(conf, rx_slots, 0)
330
331
332static int
333NPOPT_PARSER(share)(struct nmreq_parse_ctx *p)
334{
335	struct nmctx *ctx = p->ctx;
336	struct nmport_d *d = p->token;
337	int32_t mem_id;
338	const char *v = nmport_defkey(p, share);
339
340	mem_id = nmreq_get_mem_id(&v, ctx);
341	if (mem_id < 0)
342		return -1;
343	if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) {
344		nmctx_ferror(ctx, "cannot set mem_id to %"PRId32", already set to %"PRIu16"",
345				mem_id, d->reg.nr_mem_id);
346		errno = EINVAL;
347		return -1;
348	}
349	d->reg.nr_mem_id = mem_id;
350	return 0;
351}
352
353static int
354NPOPT_PARSER(extmem)(struct nmreq_parse_ctx *p)
355{
356	struct nmport_d *d;
357	struct nmreq_pools_info *pi;
358	int i;
359
360	d = p->token;
361
362	if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0)
363		return -1;
364
365	pi = &d->extmem->nro_info;
366
367	for  (i = 0; i < NPOPT_NRKEYS(extmem); i++) {
368		const char *k = p->keys[i];
369		uint32_t v;
370
371		if (k == NULL)
372			continue;
373
374		v = atoi(k);
375		if (i == NPKEY_ID(extmem, if_num)) {
376			pi->nr_if_pool_objtotal = v;
377		} else if (i == NPKEY_ID(extmem, if_size)) {
378			pi->nr_if_pool_objsize = v;
379		} else if (i == NPKEY_ID(extmem, ring_num)) {
380			pi->nr_ring_pool_objtotal = v;
381		} else if (i == NPKEY_ID(extmem, ring_size)) {
382			pi->nr_ring_pool_objsize = v;
383		} else if (i == NPKEY_ID(extmem, buf_num)) {
384			pi->nr_buf_pool_objtotal = v;
385		} else if (i == NPKEY_ID(extmem, buf_size)) {
386			pi->nr_buf_pool_objsize = v;
387		}
388	}
389	return 0;
390}
391
392static int
393NPOPT_PARSER(conf)(struct nmreq_parse_ctx *p)
394{
395	struct nmport_d *d;
396
397	d = p->token;
398
399	if (nmport_key(p, conf, rings) != NULL) {
400		uint16_t nr_rings = atoi(nmport_key(p, conf, rings));
401		d->reg.nr_tx_rings = nr_rings;
402		d->reg.nr_rx_rings = nr_rings;
403	}
404	if (nmport_key(p, conf, host_rings) != NULL) {
405		uint16_t nr_rings = atoi(nmport_key(p, conf, host_rings));
406		d->reg.nr_host_tx_rings = nr_rings;
407		d->reg.nr_host_rx_rings = nr_rings;
408	}
409	if (nmport_key(p, conf, slots) != NULL) {
410		uint32_t nr_slots = atoi(nmport_key(p, conf, slots));
411		d->reg.nr_tx_slots = nr_slots;
412		d->reg.nr_rx_slots = nr_slots;
413	}
414	if (nmport_key(p, conf, tx_rings) != NULL) {
415		d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings));
416	}
417	if (nmport_key(p, conf, rx_rings) != NULL) {
418		d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings));
419	}
420	if (nmport_key(p, conf, host_tx_rings) != NULL) {
421		d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings));
422	}
423	if (nmport_key(p, conf, host_rx_rings) != NULL) {
424		d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings));
425	}
426	if (nmport_key(p, conf, tx_slots) != NULL) {
427		d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots));
428	}
429	if (nmport_key(p, conf, rx_slots) != NULL) {
430		d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots));
431	}
432	return 0;
433}
434
435void
436nmport_disable_option(const char *opt)
437{
438	struct nmreq_opt_parser *p;
439
440	for (p = nmport_opt_parsers; p != NULL; p = p->next) {
441		if (!strcmp(p->prefix, opt)) {
442			p->flags |= NMREQ_OPTF_DISABLED;
443		}
444	}
445}
446
447int
448nmport_enable_option(const char *opt)
449{
450	struct nmreq_opt_parser *p;
451
452	for (p = nmport_opt_parsers; p != NULL; p = p->next) {
453		if (!strcmp(p->prefix, opt)) {
454			p->flags &= ~NMREQ_OPTF_DISABLED;
455			return 0;
456		}
457	}
458	errno = EOPNOTSUPP;
459	return -1;
460}
461
462
463int
464nmport_parse(struct nmport_d *d, const char *ifname)
465{
466	const char *scan = ifname;
467
468	if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) {
469		goto err;
470	}
471
472	/* parse the register request */
473	if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) {
474		goto err;
475	}
476
477	/* parse the options, if any */
478	if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) {
479		goto err;
480	}
481	return 0;
482
483err:
484	nmport_undo_parse(d);
485	return -1;
486}
487
488void
489nmport_undo_parse(struct nmport_d *d)
490{
491	nmport_do_cleanup(d);
492	memset(&d->reg, 0, sizeof(d->reg));
493	memset(&d->hdr, 0, sizeof(d->hdr));
494}
495
496struct nmport_d *
497nmport_prepare(const char *ifname)
498{
499	struct nmport_d *d;
500
501	/* allocate a descriptor */
502	d = nmport_new();
503	if (d == NULL)
504		goto err;
505
506	/* parse the header */
507	if (nmport_parse(d, ifname) < 0)
508		goto err;
509
510	return d;
511
512err:
513	nmport_undo_prepare(d);
514	return NULL;
515}
516
517void
518nmport_undo_prepare(struct nmport_d *d)
519{
520	if (d == NULL)
521		return;
522	nmport_undo_parse(d);
523	nmport_delete(d);
524}
525
526int
527nmport_register(struct nmport_d *d)
528{
529	struct nmctx *ctx = d->ctx;
530
531	if (d->register_done) {
532		errno = EINVAL;
533		nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name);
534		return -1;
535	}
536
537	d->fd = open("/dev/netmap", O_RDWR);
538	if (d->fd < 0) {
539		nmctx_ferror(ctx, "/dev/netmap: %s", strerror(errno));
540		goto err;
541	}
542
543	if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) {
544		struct nmreq_option *o;
545		int option_errors = 0;
546
547		nmreq_foreach_option(&d->hdr, o) {
548			if (o->nro_status) {
549				nmctx_ferror(ctx, "%s: option %s: %s",
550						d->hdr.nr_name,
551						nmreq_option_name(o->nro_reqtype),
552						strerror(o->nro_status));
553				option_errors++;
554			}
555
556		}
557		if (!option_errors)
558			nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno));
559		goto err;
560	}
561
562	d->register_done = 1;
563
564	return 0;
565
566err:
567	nmport_undo_register(d);
568	return -1;
569}
570
571void
572nmport_undo_register(struct nmport_d *d)
573{
574	if (d->fd >= 0)
575		close(d->fd);
576	d->fd = -1;
577	d->register_done = 0;
578}
579
580/* lookup the mem_id in the mem-list: do a new mmap() if
581 * not found, reuse existing otherwise
582 */
583int
584nmport_mmap(struct nmport_d *d)
585{
586	struct nmctx *ctx = d->ctx;
587	struct nmem_d *m = NULL;
588	u_int num_tx, num_rx;
589	int i;
590
591	if (d->mmap_done) {
592		errno = EINVAL;
593		nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name);
594		return -1;
595	}
596
597	if (!d->register_done) {
598		errno = EINVAL;
599		nmctx_ferror(ctx, "cannot map unregistered port");
600		return -1;
601	}
602
603	nmctx_lock(ctx);
604
605	for (m = ctx->mem_descs; m != NULL; m = m->next)
606		if (m->mem_id == d->reg.nr_mem_id)
607			break;
608
609	if (m == NULL) {
610		m = nmctx_malloc(ctx, sizeof(*m));
611		if (m == NULL) {
612			nmctx_ferror(ctx, "cannot allocate memory descriptor");
613			goto err;
614		}
615		memset(m, 0, sizeof(*m));
616		if (d->extmem != NULL) {
617			m->mem = (void *)((uintptr_t)d->extmem->nro_usrptr);
618			m->size = d->extmem->nro_info.nr_memsize;
619			m->is_extmem = 1;
620		} else {
621			m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE,
622					MAP_SHARED, d->fd, 0);
623			if (m->mem == MAP_FAILED) {
624				nmctx_ferror(ctx, "mmap: %s", strerror(errno));
625				goto err;
626			}
627			m->size = d->reg.nr_memsize;
628		}
629		m->mem_id = d->reg.nr_mem_id;
630		m->next = ctx->mem_descs;
631		if (ctx->mem_descs != NULL)
632			ctx->mem_descs->prev = m;
633		ctx->mem_descs = m;
634	}
635	m->refcount++;
636
637	nmctx_unlock(ctx);
638
639	d->mem = m;
640
641	d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset);
642
643	num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings;
644	for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++)
645		;
646	d->first_tx_ring = i;
647	for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++)
648		;
649	d->last_tx_ring = i - 1;
650
651	num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings;
652	for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++)
653		;
654	d->first_rx_ring = i;
655	for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++)
656		;
657	d->last_rx_ring = i - 1;
658
659	d->mmap_done = 1;
660
661	return 0;
662
663err:
664	nmctx_unlock(ctx);
665	nmport_undo_mmap(d);
666	return -1;
667}
668
669void
670nmport_undo_mmap(struct nmport_d *d)
671{
672	struct nmem_d *m;
673	struct nmctx *ctx = d->ctx;
674
675	m = d->mem;
676	if (m == NULL)
677		return;
678	nmctx_lock(ctx);
679	m->refcount--;
680	if (m->refcount <= 0) {
681		if (!m->is_extmem && m->mem != MAP_FAILED)
682			munmap(m->mem, m->size);
683		/* extract from the list and free */
684		if (m->next != NULL)
685			m->next->prev = m->prev;
686		if (m->prev != NULL)
687			m->prev->next = m->next;
688		else
689			ctx->mem_descs = m->next;
690		nmctx_free(ctx, m);
691		d->mem = NULL;
692	}
693	nmctx_unlock(ctx);
694	d->mmap_done = 0;
695	d->mem = NULL;
696	d->nifp = NULL;
697	d->first_tx_ring = 0;
698	d->last_tx_ring = 0;
699	d->first_rx_ring = 0;
700	d->last_rx_ring = 0;
701	d->cur_tx_ring = 0;
702	d->cur_rx_ring = 0;
703}
704
705int
706nmport_open_desc(struct nmport_d *d)
707{
708	if (nmport_register(d) < 0)
709		goto err;
710
711	if (nmport_mmap(d) < 0)
712		goto err;
713
714	return 0;
715err:
716	nmport_undo_open_desc(d);
717	return -1;
718}
719
720void
721nmport_undo_open_desc(struct nmport_d *d)
722{
723	nmport_undo_mmap(d);
724	nmport_undo_register(d);
725}
726
727
728struct nmport_d *
729nmport_open(const char *ifname)
730{
731	struct nmport_d *d;
732
733	/* prepare the descriptor */
734	d = nmport_prepare(ifname);
735	if (d == NULL)
736		goto err;
737
738	/* open netmap and register */
739	if (nmport_open_desc(d) < 0)
740		goto err;
741
742	return d;
743
744err:
745	nmport_close(d);
746	return NULL;
747}
748
749void
750nmport_close(struct nmport_d *d)
751{
752	if (d == NULL)
753		return;
754	nmport_undo_open_desc(d);
755	nmport_undo_prepare(d);
756}
757
758struct nmport_d *
759nmport_clone(struct nmport_d *d)
760{
761	struct nmport_d *c;
762	struct nmctx *ctx;
763
764	ctx = d->ctx;
765
766	if (d->extmem != NULL && !d->register_done) {
767		errno = EINVAL;
768		nmctx_ferror(ctx, "cannot clone unregistered port that is using extmem");
769		return NULL;
770	}
771
772	c = nmport_new_with_ctx(ctx);
773	if (c == NULL)
774		return NULL;
775	/* copy the output of parse */
776	c->hdr = d->hdr;
777	/* redirect the pointer to the body */
778	c->hdr.nr_body = (uintptr_t)&c->reg;
779	/* options are not cloned */
780	c->hdr.nr_options = 0;
781	c->reg = d->reg; /* this also copies the mem_id */
782	/* put the new port in an un-registered, unmapped state */
783	c->fd = -1;
784	c->nifp = NULL;
785	c->register_done = 0;
786	c->mem = NULL;
787	c->extmem = NULL;
788	c->mmap_done = 0;
789	c->first_tx_ring = 0;
790	c->last_tx_ring = 0;
791	c->first_rx_ring = 0;
792	c->last_rx_ring = 0;
793	c->cur_tx_ring = 0;
794	c->cur_rx_ring = 0;
795
796	return c;
797}
798
799int
800nmport_inject(struct nmport_d *d, const void *buf, size_t size)
801{
802	u_int c, n = d->last_tx_ring - d->first_tx_ring + 1,
803		ri = d->cur_tx_ring;
804
805	for (c = 0; c < n ; c++, ri++) {
806		/* compute current ring to use */
807		struct netmap_ring *ring;
808		uint32_t i, j, idx;
809		size_t rem;
810
811		if (ri > d->last_tx_ring)
812			ri = d->first_tx_ring;
813		ring = NETMAP_TXRING(d->nifp, ri);
814		rem = size;
815		j = ring->cur;
816		while (rem > ring->nr_buf_size && j != ring->tail) {
817			rem -= ring->nr_buf_size;
818			j = nm_ring_next(ring, j);
819		}
820		if (j == ring->tail && rem > 0)
821			continue;
822		i = ring->cur;
823		while (i != j) {
824			idx = ring->slot[i].buf_idx;
825			ring->slot[i].len = ring->nr_buf_size;
826			ring->slot[i].flags = NS_MOREFRAG;
827			nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size);
828			i = nm_ring_next(ring, i);
829			buf = (char *)buf + ring->nr_buf_size;
830		}
831		idx = ring->slot[i].buf_idx;
832		ring->slot[i].len = rem;
833		ring->slot[i].flags = 0;
834		nm_pkt_copy(buf, NETMAP_BUF(ring, idx), rem);
835		ring->head = ring->cur = nm_ring_next(ring, i);
836		d->cur_tx_ring = ri;
837		return size;
838	}
839	return 0; /* fail */
840}
841