1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2018 Vincenzo Maffione
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *   1. Redistributions of source code must retain the above copyright
11 *      notice, this list of conditions and the following disclaimer.
12 *   2. Redistributions in binary form must reproduce the above copyright
13 *      notice, this list of conditions and the following disclaimer in the
14 *      documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* $FreeBSD$ */
30
31#if defined(__FreeBSD__)
32#include <sys/cdefs.h> /* prerequisite */
33#include <sys/types.h>
34#include <sys/param.h>	/* defines used in kernel.h */
35#include <sys/filio.h>	/* FIONBIO */
36#include <sys/malloc.h>
37#include <sys/socketvar.h>	/* struct socket */
38#include <sys/socket.h> /* sockaddrs */
39#include <sys/sysctl.h>
40#include <net/if.h>
41#include <net/if_var.h>
42#include <net/bpf.h>		/* BIOCIMMEDIATE */
43#include <machine/bus.h>	/* bus_dmamap_* */
44#include <sys/endian.h>
45#elif defined(linux)
46#include "bsd_glue.h"
47#elif defined(__APPLE__)
48#warning OSX support is only partial
49#include "osx_glue.h"
50#elif defined (_WIN32)
51#include "win_glue.h"
52#endif
53
54/*
55 * common headers
56 */
57#include <net/netmap.h>
58#include <dev/netmap/netmap_kern.h>
59#include <dev/netmap/netmap_bdg.h>
60
61static int
62nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr,
63				struct nmreq_register *req)
64{
65	req->nr_offset = nmr->nr_offset;
66	req->nr_memsize = nmr->nr_memsize;
67	req->nr_tx_slots = nmr->nr_tx_slots;
68	req->nr_rx_slots = nmr->nr_rx_slots;
69	req->nr_tx_rings = nmr->nr_tx_rings;
70	req->nr_rx_rings = nmr->nr_rx_rings;
71	req->nr_host_tx_rings = 0;
72	req->nr_host_rx_rings = 0;
73	req->nr_mem_id = nmr->nr_arg2;
74	req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK;
75	if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) {
76		/* Convert the older nmr->nr_ringid (original
77		 * netmap control API) to nmr->nr_flags. */
78		u_int regmode = NR_REG_DEFAULT;
79		if (nmr->nr_ringid & NETMAP_SW_RING) {
80			regmode = NR_REG_SW;
81		} else if (nmr->nr_ringid & NETMAP_HW_RING) {
82			regmode = NR_REG_ONE_NIC;
83		} else {
84			regmode = NR_REG_ALL_NIC;
85		}
86		req->nr_mode = regmode;
87	} else {
88		req->nr_mode = nmr->nr_flags & NR_REG_MASK;
89	}
90
91	/* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */
92	if (req->nr_mode == NR_REG_PIPE_MASTER ||
93			req->nr_mode == NR_REG_PIPE_SLAVE) {
94		char suffix[10];
95		snprintf(suffix, sizeof(suffix), "%c%d",
96			(req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'),
97			req->nr_ringid);
98		if (strlen(hdr->nr_name) + strlen(suffix)
99					>= sizeof(hdr->nr_name)) {
100			/* No space for the pipe suffix. */
101			return ENOBUFS;
102		}
103		strlcat(hdr->nr_name, suffix, sizeof(hdr->nr_name));
104		req->nr_mode = NR_REG_ALL_NIC;
105		req->nr_ringid = 0;
106	}
107	req->nr_flags = nmr->nr_flags & (~NR_REG_MASK);
108	if (nmr->nr_ringid & NETMAP_NO_TX_POLL) {
109		req->nr_flags |= NR_NO_TX_POLL;
110	}
111	if (nmr->nr_ringid & NETMAP_DO_RX_POLL) {
112		req->nr_flags |= NR_DO_RX_POLL;
113	}
114	/* nmr->nr_arg1 (nr_pipes) ignored */
115	req->nr_extra_bufs = nmr->nr_arg3;
116
117	return 0;
118}
119
120/* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs
121 * (new API). The new struct is dynamically allocated. */
122static struct nmreq_header *
123nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
124{
125	struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr));
126
127	if (hdr == NULL) {
128		goto oom;
129	}
130
131	/* Sanitize nmr->nr_name by adding the string terminator. */
132	if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) {
133		nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';
134	}
135
136	/* First prepare the request header. */
137	hdr->nr_version = NETMAP_API; /* new API */
138	strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
139	hdr->nr_options = (uintptr_t)NULL;
140	hdr->nr_body = (uintptr_t)NULL;
141
142	switch (ioctl_cmd) {
143	case NIOCREGIF: {
144		switch (nmr->nr_cmd) {
145		case 0: {
146			/* Regular NIOCREGIF operation. */
147			struct nmreq_register *req = nm_os_malloc(sizeof(*req));
148			if (!req) { goto oom; }
149			hdr->nr_body = (uintptr_t)req;
150			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
151			if (nmreq_register_from_legacy(nmr, hdr, req)) {
152				goto oom;
153			}
154			break;
155		}
156		case NETMAP_BDG_ATTACH: {
157			struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
158			if (!req) { goto oom; }
159			hdr->nr_body = (uintptr_t)req;
160			hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
161			if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
162				goto oom;
163			}
164			/* Fix nr_mode, starting from nr_arg1. */
165			if (nmr->nr_arg1 & NETMAP_BDG_HOST) {
166				req->reg.nr_mode = NR_REG_NIC_SW;
167			} else {
168				req->reg.nr_mode = NR_REG_ALL_NIC;
169			}
170			break;
171		}
172		case NETMAP_BDG_DETACH: {
173			hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
174			hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
175			break;
176		}
177		case NETMAP_BDG_VNET_HDR:
178		case NETMAP_VNET_HDR_GET: {
179			struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
180			if (!req) { goto oom; }
181			hdr->nr_body = (uintptr_t)req;
182			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
183				NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
184			req->nr_hdr_len = nmr->nr_arg1;
185			break;
186		}
187		case NETMAP_BDG_NEWIF : {
188			struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
189			if (!req) { goto oom; }
190			hdr->nr_body = (uintptr_t)req;
191			hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
192			req->nr_tx_slots = nmr->nr_tx_slots;
193			req->nr_rx_slots = nmr->nr_rx_slots;
194			req->nr_tx_rings = nmr->nr_tx_rings;
195			req->nr_rx_rings = nmr->nr_rx_rings;
196			req->nr_mem_id = nmr->nr_arg2;
197			break;
198		}
199		case NETMAP_BDG_DELIF: {
200			hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF;
201			break;
202		}
203		case NETMAP_BDG_POLLING_ON:
204		case NETMAP_BDG_POLLING_OFF: {
205			struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
206			if (!req) { goto oom; }
207			hdr->nr_body = (uintptr_t)req;
208			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
209				NETMAP_REQ_VALE_POLLING_ENABLE :
210				NETMAP_REQ_VALE_POLLING_DISABLE;
211			switch (nmr->nr_flags & NR_REG_MASK) {
212			default:
213				req->nr_mode = 0; /* invalid */
214				break;
215			case NR_REG_ONE_NIC:
216				req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU;
217				break;
218			case NR_REG_ALL_NIC:
219				req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU;
220				break;
221			}
222			req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK;
223			req->nr_num_polling_cpus = nmr->nr_arg1;
224			break;
225		}
226		case NETMAP_PT_HOST_CREATE:
227		case NETMAP_PT_HOST_DELETE: {
228			nm_prerr("Netmap passthrough not supported yet");
229			return NULL;
230			break;
231		}
232		}
233		break;
234	}
235	case NIOCGINFO: {
236		if (nmr->nr_cmd == NETMAP_BDG_LIST) {
237			struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
238			if (!req) { goto oom; }
239			hdr->nr_body = (uintptr_t)req;
240			hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
241			req->nr_bridge_idx = nmr->nr_arg1;
242			req->nr_port_idx = nmr->nr_arg2;
243		} else {
244			/* Regular NIOCGINFO. */
245			struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
246			if (!req) { goto oom; }
247			hdr->nr_body = (uintptr_t)req;
248			hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
249			req->nr_memsize = nmr->nr_memsize;
250			req->nr_tx_slots = nmr->nr_tx_slots;
251			req->nr_rx_slots = nmr->nr_rx_slots;
252			req->nr_tx_rings = nmr->nr_tx_rings;
253			req->nr_rx_rings = nmr->nr_rx_rings;
254			req->nr_host_tx_rings = 0;
255			req->nr_host_rx_rings = 0;
256			req->nr_mem_id = nmr->nr_arg2;
257		}
258		break;
259	}
260	}
261
262	return hdr;
263oom:
264	if (hdr) {
265		if (hdr->nr_body) {
266			nm_os_free((void *)(uintptr_t)hdr->nr_body);
267		}
268		nm_os_free(hdr);
269	}
270	nm_prerr("Failed to allocate memory for nmreq_xyz struct");
271
272	return NULL;
273}
274
275static void
276nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr)
277{
278	nmr->nr_offset = req->nr_offset;
279	nmr->nr_memsize = req->nr_memsize;
280	nmr->nr_tx_slots = req->nr_tx_slots;
281	nmr->nr_rx_slots = req->nr_rx_slots;
282	nmr->nr_tx_rings = req->nr_tx_rings;
283	nmr->nr_rx_rings = req->nr_rx_rings;
284	nmr->nr_arg2 = req->nr_mem_id;
285	nmr->nr_arg3 = req->nr_extra_bufs;
286}
287
288/* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct.
289 * It also frees the nmreq_xyz struct, as it was allocated by
290 * nmreq_from_legacy(). */
291static int
292nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
293{
294	int ret = 0;
295
296	/* We only write-back the fields that the user expects to be
297	 * written back. */
298	switch (hdr->nr_reqtype) {
299	case NETMAP_REQ_REGISTER: {
300		struct nmreq_register *req =
301			(struct nmreq_register *)(uintptr_t)hdr->nr_body;
302		nmreq_register_to_legacy(req, nmr);
303		break;
304	}
305	case NETMAP_REQ_PORT_INFO_GET: {
306		struct nmreq_port_info_get *req =
307			(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
308		nmr->nr_memsize = req->nr_memsize;
309		nmr->nr_tx_slots = req->nr_tx_slots;
310		nmr->nr_rx_slots = req->nr_rx_slots;
311		nmr->nr_tx_rings = req->nr_tx_rings;
312		nmr->nr_rx_rings = req->nr_rx_rings;
313		nmr->nr_arg2 = req->nr_mem_id;
314		break;
315	}
316	case NETMAP_REQ_VALE_ATTACH: {
317		struct nmreq_vale_attach *req =
318			(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
319		nmreq_register_to_legacy(&req->reg, nmr);
320		break;
321	}
322	case NETMAP_REQ_VALE_DETACH: {
323		break;
324	}
325	case NETMAP_REQ_VALE_LIST: {
326		struct nmreq_vale_list *req =
327			(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
328		strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
329		nmr->nr_arg1 = req->nr_bridge_idx;
330		nmr->nr_arg2 = req->nr_port_idx;
331		break;
332	}
333	case NETMAP_REQ_PORT_HDR_SET:
334	case NETMAP_REQ_PORT_HDR_GET: {
335		struct nmreq_port_hdr *req =
336			(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
337		nmr->nr_arg1 = req->nr_hdr_len;
338		break;
339	}
340	case NETMAP_REQ_VALE_NEWIF: {
341		struct nmreq_vale_newif *req =
342			(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
343		nmr->nr_tx_slots = req->nr_tx_slots;
344		nmr->nr_rx_slots = req->nr_rx_slots;
345		nmr->nr_tx_rings = req->nr_tx_rings;
346		nmr->nr_rx_rings = req->nr_rx_rings;
347		nmr->nr_arg2 = req->nr_mem_id;
348		break;
349	}
350	case NETMAP_REQ_VALE_DELIF:
351	case NETMAP_REQ_VALE_POLLING_ENABLE:
352	case NETMAP_REQ_VALE_POLLING_DISABLE: {
353		break;
354	}
355	}
356
357	return ret;
358}
359
360int
361netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
362			struct thread *td)
363{
364	int error = 0;
365
366	switch (cmd) {
367	case NIOCGINFO:
368	case NIOCREGIF: {
369		/* Request for the legacy control API. Convert it to a
370		 * NIOCCTRL request. */
371		struct nmreq *nmr = (struct nmreq *) data;
372		struct nmreq_header *hdr;
373
374		if (nmr->nr_version < 14) {
375			nm_prerr("Minimum supported API is 14 (requested %u)",
376			    nmr->nr_version);
377			return EINVAL;
378		}
379		hdr = nmreq_from_legacy(nmr, cmd);
380		if (hdr == NULL) { /* out of memory */
381			return ENOMEM;
382		}
383		error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td,
384					/*nr_body_is_user=*/0);
385		if (error == 0) {
386			nmreq_to_legacy(hdr, nmr);
387		}
388		if (hdr->nr_body) {
389			nm_os_free((void *)(uintptr_t)hdr->nr_body);
390		}
391		nm_os_free(hdr);
392		break;
393	}
394#ifdef WITH_VALE
395	case NIOCCONFIG: {
396		struct nm_ifreq *nr = (struct nm_ifreq *)data;
397		error = netmap_bdg_config(nr);
398		break;
399	}
400#endif
401#ifdef __FreeBSD__
402	case FIONBIO:
403	case FIOASYNC:
404		/* FIONBIO/FIOASYNC are no-ops. */
405		break;
406
407	case BIOCIMMEDIATE:
408	case BIOCGHDRCMPLT:
409	case BIOCSHDRCMPLT:
410	case BIOCSSEESENT:
411		/* Ignore these commands. */
412		break;
413
414	default:	/* allow device-specific ioctls */
415	    {
416		struct nmreq *nmr = (struct nmreq *)data;
417		struct ifnet *ifp = ifunit_ref(nmr->nr_name);
418		if (ifp == NULL) {
419			error = ENXIO;
420		} else {
421			struct socket so;
422
423			bzero(&so, sizeof(so));
424			so.so_vnet = ifp->if_vnet;
425			// so->so_proto not null.
426			error = ifioctl(&so, cmd, data, td);
427			if_rele(ifp);
428		}
429		break;
430	    }
431
432#else /* linux */
433	default:
434		error = EOPNOTSUPP;
435#endif /* linux */
436	}
437
438	return error;
439}
440