1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2018 Vincenzo Maffione
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *   1. Redistributions of source code must retain the above copyright
11 *      notice, this list of conditions and the following disclaimer.
12 *   2. Redistributions in binary form must reproduce the above copyright
13 *      notice, this list of conditions and the following disclaimer in the
14 *      documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29
30#if defined(__FreeBSD__)
31#include <sys/cdefs.h> /* prerequisite */
32#include <sys/types.h>
33#include <sys/param.h>	/* defines used in kernel.h */
34#include <sys/filio.h>	/* FIONBIO */
35#include <sys/malloc.h>
36#include <sys/socketvar.h>	/* struct socket */
37#include <sys/socket.h> /* sockaddrs */
38#include <sys/sysctl.h>
39#include <net/if.h>
40#include <net/if_var.h>
41#include <net/bpf.h>		/* BIOCIMMEDIATE */
42#include <machine/bus.h>	/* bus_dmamap_* */
43#include <sys/endian.h>
44#elif defined(linux)
45#include "bsd_glue.h"
46#elif defined(__APPLE__)
47#warning OSX support is only partial
48#include "osx_glue.h"
49#elif defined (_WIN32)
50#include "win_glue.h"
51#endif
52
53/*
54 * common headers
55 */
56#include <net/netmap.h>
57#include <dev/netmap/netmap_kern.h>
58#include <dev/netmap/netmap_bdg.h>
59
60static int
61nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr,
62				struct nmreq_register *req)
63{
64	req->nr_offset = nmr->nr_offset;
65	req->nr_memsize = nmr->nr_memsize;
66	req->nr_tx_slots = nmr->nr_tx_slots;
67	req->nr_rx_slots = nmr->nr_rx_slots;
68	req->nr_tx_rings = nmr->nr_tx_rings;
69	req->nr_rx_rings = nmr->nr_rx_rings;
70	req->nr_host_tx_rings = 0;
71	req->nr_host_rx_rings = 0;
72	req->nr_mem_id = nmr->nr_arg2;
73	req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK;
74	if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) {
75		/* Convert the older nmr->nr_ringid (original
76		 * netmap control API) to nmr->nr_flags. */
77		u_int regmode = NR_REG_DEFAULT;
78		if (nmr->nr_ringid & NETMAP_SW_RING) {
79			regmode = NR_REG_SW;
80		} else if (nmr->nr_ringid & NETMAP_HW_RING) {
81			regmode = NR_REG_ONE_NIC;
82		} else {
83			regmode = NR_REG_ALL_NIC;
84		}
85		req->nr_mode = regmode;
86	} else {
87		req->nr_mode = nmr->nr_flags & NR_REG_MASK;
88	}
89
90	/* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */
91	if (req->nr_mode == NR_REG_PIPE_MASTER ||
92			req->nr_mode == NR_REG_PIPE_SLAVE) {
93		char suffix[10];
94		snprintf(suffix, sizeof(suffix), "%c%d",
95			(req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'),
96			req->nr_ringid);
97		if (strlen(hdr->nr_name) + strlen(suffix)
98					>= sizeof(hdr->nr_name)) {
99			/* No space for the pipe suffix. */
100			return ENOBUFS;
101		}
102		strlcat(hdr->nr_name, suffix, sizeof(hdr->nr_name));
103		req->nr_mode = NR_REG_ALL_NIC;
104		req->nr_ringid = 0;
105	}
106	req->nr_flags = nmr->nr_flags & (~NR_REG_MASK);
107	if (nmr->nr_ringid & NETMAP_NO_TX_POLL) {
108		req->nr_flags |= NR_NO_TX_POLL;
109	}
110	if (nmr->nr_ringid & NETMAP_DO_RX_POLL) {
111		req->nr_flags |= NR_DO_RX_POLL;
112	}
113	/* nmr->nr_arg1 (nr_pipes) ignored */
114	req->nr_extra_bufs = nmr->nr_arg3;
115
116	return 0;
117}
118
119/* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs
120 * (new API). The new struct is dynamically allocated. */
121static struct nmreq_header *
122nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
123{
124	struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr));
125
126	if (hdr == NULL) {
127		goto oom;
128	}
129
130	/* Sanitize nmr->nr_name by adding the string terminator. */
131	if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) {
132		nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';
133	}
134
135	/* First prepare the request header. */
136	hdr->nr_version = NETMAP_API; /* new API */
137	strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
138	hdr->nr_options = (uintptr_t)NULL;
139	hdr->nr_body = (uintptr_t)NULL;
140
141	switch (ioctl_cmd) {
142	case NIOCREGIF: {
143		switch (nmr->nr_cmd) {
144		case 0: {
145			/* Regular NIOCREGIF operation. */
146			struct nmreq_register *req = nm_os_malloc(sizeof(*req));
147			if (!req) { goto oom; }
148			hdr->nr_body = (uintptr_t)req;
149			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
150			if (nmreq_register_from_legacy(nmr, hdr, req)) {
151				goto oom;
152			}
153			break;
154		}
155		case NETMAP_BDG_ATTACH: {
156			struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
157			if (!req) { goto oom; }
158			hdr->nr_body = (uintptr_t)req;
159			hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
160			if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
161				goto oom;
162			}
163			/* Fix nr_mode, starting from nr_arg1. */
164			if (nmr->nr_arg1 & NETMAP_BDG_HOST) {
165				req->reg.nr_mode = NR_REG_NIC_SW;
166			} else {
167				req->reg.nr_mode = NR_REG_ALL_NIC;
168			}
169			break;
170		}
171		case NETMAP_BDG_DETACH: {
172			hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
173			hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
174			break;
175		}
176		case NETMAP_BDG_VNET_HDR:
177		case NETMAP_VNET_HDR_GET: {
178			struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
179			if (!req) { goto oom; }
180			hdr->nr_body = (uintptr_t)req;
181			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
182				NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
183			req->nr_hdr_len = nmr->nr_arg1;
184			break;
185		}
186		case NETMAP_BDG_NEWIF : {
187			struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
188			if (!req) { goto oom; }
189			hdr->nr_body = (uintptr_t)req;
190			hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
191			req->nr_tx_slots = nmr->nr_tx_slots;
192			req->nr_rx_slots = nmr->nr_rx_slots;
193			req->nr_tx_rings = nmr->nr_tx_rings;
194			req->nr_rx_rings = nmr->nr_rx_rings;
195			req->nr_mem_id = nmr->nr_arg2;
196			break;
197		}
198		case NETMAP_BDG_DELIF: {
199			hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF;
200			break;
201		}
202		case NETMAP_BDG_POLLING_ON:
203		case NETMAP_BDG_POLLING_OFF: {
204			struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
205			if (!req) { goto oom; }
206			hdr->nr_body = (uintptr_t)req;
207			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
208				NETMAP_REQ_VALE_POLLING_ENABLE :
209				NETMAP_REQ_VALE_POLLING_DISABLE;
210			switch (nmr->nr_flags & NR_REG_MASK) {
211			default:
212				req->nr_mode = 0; /* invalid */
213				break;
214			case NR_REG_ONE_NIC:
215				req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU;
216				break;
217			case NR_REG_ALL_NIC:
218				req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU;
219				break;
220			}
221			req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK;
222			req->nr_num_polling_cpus = nmr->nr_arg1;
223			break;
224		}
225		case NETMAP_PT_HOST_CREATE:
226		case NETMAP_PT_HOST_DELETE: {
227			nm_prerr("Netmap passthrough not supported yet");
228			return NULL;
229			break;
230		}
231		}
232		break;
233	}
234	case NIOCGINFO: {
235		if (nmr->nr_cmd == NETMAP_BDG_LIST) {
236			struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
237			if (!req) { goto oom; }
238			hdr->nr_body = (uintptr_t)req;
239			hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
240			req->nr_bridge_idx = nmr->nr_arg1;
241			req->nr_port_idx = nmr->nr_arg2;
242		} else {
243			/* Regular NIOCGINFO. */
244			struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
245			if (!req) { goto oom; }
246			hdr->nr_body = (uintptr_t)req;
247			hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
248			req->nr_memsize = nmr->nr_memsize;
249			req->nr_tx_slots = nmr->nr_tx_slots;
250			req->nr_rx_slots = nmr->nr_rx_slots;
251			req->nr_tx_rings = nmr->nr_tx_rings;
252			req->nr_rx_rings = nmr->nr_rx_rings;
253			req->nr_host_tx_rings = 0;
254			req->nr_host_rx_rings = 0;
255			req->nr_mem_id = nmr->nr_arg2;
256		}
257		break;
258	}
259	}
260
261	return hdr;
262oom:
263	if (hdr) {
264		if (hdr->nr_body) {
265			nm_os_free((void *)(uintptr_t)hdr->nr_body);
266		}
267		nm_os_free(hdr);
268	}
269	nm_prerr("Failed to allocate memory for nmreq_xyz struct");
270
271	return NULL;
272}
273
274static void
275nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr)
276{
277	nmr->nr_offset = req->nr_offset;
278	nmr->nr_memsize = req->nr_memsize;
279	nmr->nr_tx_slots = req->nr_tx_slots;
280	nmr->nr_rx_slots = req->nr_rx_slots;
281	nmr->nr_tx_rings = req->nr_tx_rings;
282	nmr->nr_rx_rings = req->nr_rx_rings;
283	nmr->nr_arg2 = req->nr_mem_id;
284	nmr->nr_arg3 = req->nr_extra_bufs;
285}
286
287/* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct.
288 * It also frees the nmreq_xyz struct, as it was allocated by
289 * nmreq_from_legacy(). */
290static int
291nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
292{
293	int ret = 0;
294
295	/* We only write-back the fields that the user expects to be
296	 * written back. */
297	switch (hdr->nr_reqtype) {
298	case NETMAP_REQ_REGISTER: {
299		struct nmreq_register *req =
300			(struct nmreq_register *)(uintptr_t)hdr->nr_body;
301		nmreq_register_to_legacy(req, nmr);
302		break;
303	}
304	case NETMAP_REQ_PORT_INFO_GET: {
305		struct nmreq_port_info_get *req =
306			(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
307		nmr->nr_memsize = req->nr_memsize;
308		nmr->nr_tx_slots = req->nr_tx_slots;
309		nmr->nr_rx_slots = req->nr_rx_slots;
310		nmr->nr_tx_rings = req->nr_tx_rings;
311		nmr->nr_rx_rings = req->nr_rx_rings;
312		nmr->nr_arg2 = req->nr_mem_id;
313		break;
314	}
315	case NETMAP_REQ_VALE_ATTACH: {
316		struct nmreq_vale_attach *req =
317			(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
318		nmreq_register_to_legacy(&req->reg, nmr);
319		break;
320	}
321	case NETMAP_REQ_VALE_DETACH: {
322		break;
323	}
324	case NETMAP_REQ_VALE_LIST: {
325		struct nmreq_vale_list *req =
326			(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
327		strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
328		nmr->nr_arg1 = req->nr_bridge_idx;
329		nmr->nr_arg2 = req->nr_port_idx;
330		break;
331	}
332	case NETMAP_REQ_PORT_HDR_SET:
333	case NETMAP_REQ_PORT_HDR_GET: {
334		struct nmreq_port_hdr *req =
335			(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
336		nmr->nr_arg1 = req->nr_hdr_len;
337		break;
338	}
339	case NETMAP_REQ_VALE_NEWIF: {
340		struct nmreq_vale_newif *req =
341			(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
342		nmr->nr_tx_slots = req->nr_tx_slots;
343		nmr->nr_rx_slots = req->nr_rx_slots;
344		nmr->nr_tx_rings = req->nr_tx_rings;
345		nmr->nr_rx_rings = req->nr_rx_rings;
346		nmr->nr_arg2 = req->nr_mem_id;
347		break;
348	}
349	case NETMAP_REQ_VALE_DELIF:
350	case NETMAP_REQ_VALE_POLLING_ENABLE:
351	case NETMAP_REQ_VALE_POLLING_DISABLE: {
352		break;
353	}
354	}
355
356	return ret;
357}
358
359int
360netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
361			struct thread *td)
362{
363	int error = 0;
364
365	switch (cmd) {
366	case NIOCGINFO:
367	case NIOCREGIF: {
368		/* Request for the legacy control API. Convert it to a
369		 * NIOCCTRL request. */
370		struct nmreq *nmr = (struct nmreq *) data;
371		struct nmreq_header *hdr;
372
373		if (nmr->nr_version < 14) {
374			nm_prerr("Minimum supported API is 14 (requested %u)",
375			    nmr->nr_version);
376			return EINVAL;
377		}
378		hdr = nmreq_from_legacy(nmr, cmd);
379		if (hdr == NULL) { /* out of memory */
380			return ENOMEM;
381		}
382		error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td,
383					/*nr_body_is_user=*/0);
384		if (error == 0) {
385			nmreq_to_legacy(hdr, nmr);
386		}
387		if (hdr->nr_body) {
388			nm_os_free((void *)(uintptr_t)hdr->nr_body);
389		}
390		nm_os_free(hdr);
391		break;
392	}
393#ifdef WITH_VALE
394	case NIOCCONFIG: {
395		struct nm_ifreq *nr = (struct nm_ifreq *)data;
396		error = netmap_bdg_config(nr);
397		break;
398	}
399#endif
400#ifdef __FreeBSD__
401	case FIONBIO:
402	case FIOASYNC:
403		/* FIONBIO/FIOASYNC are no-ops. */
404		break;
405
406	case BIOCIMMEDIATE:
407	case BIOCGHDRCMPLT:
408	case BIOCSHDRCMPLT:
409	case BIOCSSEESENT:
410		/* Ignore these commands. */
411		break;
412
413	default:	/* allow device-specific ioctls */
414	    {
415		struct nmreq *nmr = (struct nmreq *)data;
416		if_t ifp = ifunit_ref(nmr->nr_name);
417		if (ifp == NULL) {
418			error = ENXIO;
419		} else {
420			struct socket so;
421
422			bzero(&so, sizeof(so));
423			so.so_vnet = if_getvnet(ifp);
424			// so->so_proto not null.
425			error = ifioctl(&so, cmd, data, td);
426			if_rele(ifp);
427		}
428		break;
429	    }
430
431#else /* linux */
432	default:
433		error = EOPNOTSUPP;
434#endif /* linux */
435	}
436
437	return error;
438}
439