1/*
2 * Copyright (C) 2018 Vincenzo Maffione
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *   1. Redistributions of source code must retain the above copyright
9 *      notice, this list of conditions and the following disclaimer.
10 *   2. Redistributions in binary form must reproduce the above copyright
11 *      notice, this list of conditions and the following disclaimer in the
12 *      documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/* $FreeBSD: stable/11/sys/dev/netmap/netmap_legacy.c 364756 2020-08-25 11:12:30Z vmaffione $ */
27
28#if defined(__FreeBSD__)
29#include <sys/cdefs.h> /* prerequisite */
30#include <sys/types.h>
31#include <sys/param.h>	/* defines used in kernel.h */
32#include <sys/filio.h>	/* FIONBIO */
33#include <sys/malloc.h>
34#include <sys/socketvar.h>	/* struct socket */
35#include <sys/socket.h> /* sockaddrs */
36#include <sys/sysctl.h>
37#include <net/if.h>
38#include <net/if_var.h>
39#include <net/bpf.h>		/* BIOCIMMEDIATE */
40#include <machine/bus.h>	/* bus_dmamap_* */
41#include <sys/endian.h>
42#elif defined(linux)
43#include "bsd_glue.h"
44#elif defined(__APPLE__)
45#warning OSX support is only partial
46#include "osx_glue.h"
47#elif defined (_WIN32)
48#include "win_glue.h"
49#endif
50
51/*
52 * common headers
53 */
54#include <net/netmap.h>
55#include <dev/netmap/netmap_kern.h>
56#include <dev/netmap/netmap_bdg.h>
57
58static int
59nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr,
60				struct nmreq_register *req)
61{
62	req->nr_offset = nmr->nr_offset;
63	req->nr_memsize = nmr->nr_memsize;
64	req->nr_tx_slots = nmr->nr_tx_slots;
65	req->nr_rx_slots = nmr->nr_rx_slots;
66	req->nr_tx_rings = nmr->nr_tx_rings;
67	req->nr_rx_rings = nmr->nr_rx_rings;
68	req->nr_mem_id = nmr->nr_arg2;
69	req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK;
70	if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) {
71		/* Convert the older nmr->nr_ringid (original
72		 * netmap control API) to nmr->nr_flags. */
73		u_int regmode = NR_REG_DEFAULT;
74		if (nmr->nr_ringid & NETMAP_SW_RING) {
75			regmode = NR_REG_SW;
76		} else if (nmr->nr_ringid & NETMAP_HW_RING) {
77			regmode = NR_REG_ONE_NIC;
78		} else {
79			regmode = NR_REG_ALL_NIC;
80		}
81		req->nr_mode = regmode;
82	} else {
83		req->nr_mode = nmr->nr_flags & NR_REG_MASK;
84	}
85
86	/* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */
87	if (req->nr_mode == NR_REG_PIPE_MASTER ||
88			req->nr_mode == NR_REG_PIPE_SLAVE) {
89		char suffix[10];
90		snprintf(suffix, sizeof(suffix), "%c%d",
91			(req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'),
92			req->nr_ringid);
93		if (strlen(hdr->nr_name) + strlen(suffix)
94					>= sizeof(hdr->nr_name)) {
95			/* No space for the pipe suffix. */
96			return ENOBUFS;
97		}
98		strncat(hdr->nr_name, suffix, strlen(suffix));
99		req->nr_mode = NR_REG_ALL_NIC;
100		req->nr_ringid = 0;
101	}
102	req->nr_flags = nmr->nr_flags & (~NR_REG_MASK);
103	if (nmr->nr_ringid & NETMAP_NO_TX_POLL) {
104		req->nr_flags |= NR_NO_TX_POLL;
105	}
106	if (nmr->nr_ringid & NETMAP_DO_RX_POLL) {
107		req->nr_flags |= NR_DO_RX_POLL;
108	}
109	/* nmr->nr_arg1 (nr_pipes) ignored */
110	req->nr_extra_bufs = nmr->nr_arg3;
111
112	return 0;
113}
114
115/* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs
116 * (new API). The new struct is dynamically allocated. */
117static struct nmreq_header *
118nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
119{
120	struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr));
121
122	if (hdr == NULL) {
123		goto oom;
124	}
125
126	/* Sanitize nmr->nr_name by adding the string terminator. */
127	if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) {
128		nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';
129	}
130
131	/* First prepare the request header. */
132	hdr->nr_version = NETMAP_API; /* new API */
133	strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
134	hdr->nr_options = (uintptr_t)NULL;
135	hdr->nr_body = (uintptr_t)NULL;
136
137	switch (ioctl_cmd) {
138	case NIOCREGIF: {
139		switch (nmr->nr_cmd) {
140		case 0: {
141			/* Regular NIOCREGIF operation. */
142			struct nmreq_register *req = nm_os_malloc(sizeof(*req));
143			if (!req) { goto oom; }
144			hdr->nr_body = (uintptr_t)req;
145			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
146			if (nmreq_register_from_legacy(nmr, hdr, req)) {
147				goto oom;
148			}
149			break;
150		}
151		case NETMAP_BDG_ATTACH: {
152			struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
153			if (!req) { goto oom; }
154			hdr->nr_body = (uintptr_t)req;
155			hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
156			if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
157				goto oom;
158			}
159			/* Fix nr_mode, starting from nr_arg1. */
160			if (nmr->nr_arg1 & NETMAP_BDG_HOST) {
161				req->reg.nr_mode = NR_REG_NIC_SW;
162			} else {
163				req->reg.nr_mode = NR_REG_ALL_NIC;
164			}
165			break;
166		}
167		case NETMAP_BDG_DETACH: {
168			hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
169			hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
170			break;
171		}
172		case NETMAP_BDG_VNET_HDR:
173		case NETMAP_VNET_HDR_GET: {
174			struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
175			if (!req) { goto oom; }
176			hdr->nr_body = (uintptr_t)req;
177			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
178				NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
179			req->nr_hdr_len = nmr->nr_arg1;
180			break;
181		}
182		case NETMAP_BDG_NEWIF : {
183			struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
184			if (!req) { goto oom; }
185			hdr->nr_body = (uintptr_t)req;
186			hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
187			req->nr_tx_slots = nmr->nr_tx_slots;
188			req->nr_rx_slots = nmr->nr_rx_slots;
189			req->nr_tx_rings = nmr->nr_tx_rings;
190			req->nr_rx_rings = nmr->nr_rx_rings;
191			req->nr_mem_id = nmr->nr_arg2;
192			break;
193		}
194		case NETMAP_BDG_DELIF: {
195			hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF;
196			break;
197		}
198		case NETMAP_BDG_POLLING_ON:
199		case NETMAP_BDG_POLLING_OFF: {
200			struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
201			if (!req) { goto oom; }
202			hdr->nr_body = (uintptr_t)req;
203			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
204				NETMAP_REQ_VALE_POLLING_ENABLE :
205				NETMAP_REQ_VALE_POLLING_DISABLE;
206			switch (nmr->nr_flags & NR_REG_MASK) {
207			default:
208				req->nr_mode = 0; /* invalid */
209				break;
210			case NR_REG_ONE_NIC:
211				req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU;
212				break;
213			case NR_REG_ALL_NIC:
214				req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU;
215				break;
216			}
217			req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK;
218			req->nr_num_polling_cpus = nmr->nr_arg1;
219			break;
220		}
221		case NETMAP_PT_HOST_CREATE:
222		case NETMAP_PT_HOST_DELETE: {
223			nm_prerr("Netmap passthrough not supported yet");
224			return NULL;
225			break;
226		}
227		}
228		break;
229	}
230	case NIOCGINFO: {
231		if (nmr->nr_cmd == NETMAP_BDG_LIST) {
232			struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
233			if (!req) { goto oom; }
234			hdr->nr_body = (uintptr_t)req;
235			hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
236			req->nr_bridge_idx = nmr->nr_arg1;
237			req->nr_port_idx = nmr->nr_arg2;
238		} else {
239			/* Regular NIOCGINFO. */
240			struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
241			if (!req) { goto oom; }
242			hdr->nr_body = (uintptr_t)req;
243			hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
244			req->nr_memsize = nmr->nr_memsize;
245			req->nr_tx_slots = nmr->nr_tx_slots;
246			req->nr_rx_slots = nmr->nr_rx_slots;
247			req->nr_tx_rings = nmr->nr_tx_rings;
248			req->nr_rx_rings = nmr->nr_rx_rings;
249			req->nr_mem_id = nmr->nr_arg2;
250		}
251		break;
252	}
253	}
254
255	return hdr;
256oom:
257	if (hdr) {
258		if (hdr->nr_body) {
259			nm_os_free((void *)(uintptr_t)hdr->nr_body);
260		}
261		nm_os_free(hdr);
262	}
263	nm_prerr("Failed to allocate memory for nmreq_xyz struct");
264
265	return NULL;
266}
267
268static void
269nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr)
270{
271	nmr->nr_offset = req->nr_offset;
272	nmr->nr_memsize = req->nr_memsize;
273	nmr->nr_tx_slots = req->nr_tx_slots;
274	nmr->nr_rx_slots = req->nr_rx_slots;
275	nmr->nr_tx_rings = req->nr_tx_rings;
276	nmr->nr_rx_rings = req->nr_rx_rings;
277	nmr->nr_arg2 = req->nr_mem_id;
278	nmr->nr_arg3 = req->nr_extra_bufs;
279}
280
281/* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct.
282 * It also frees the nmreq_xyz struct, as it was allocated by
283 * nmreq_from_legacy(). */
284static int
285nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
286{
287	int ret = 0;
288
289	/* We only write-back the fields that the user expects to be
290	 * written back. */
291	switch (hdr->nr_reqtype) {
292	case NETMAP_REQ_REGISTER: {
293		struct nmreq_register *req =
294			(struct nmreq_register *)(uintptr_t)hdr->nr_body;
295		nmreq_register_to_legacy(req, nmr);
296		break;
297	}
298	case NETMAP_REQ_PORT_INFO_GET: {
299		struct nmreq_port_info_get *req =
300			(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
301		nmr->nr_memsize = req->nr_memsize;
302		nmr->nr_tx_slots = req->nr_tx_slots;
303		nmr->nr_rx_slots = req->nr_rx_slots;
304		nmr->nr_tx_rings = req->nr_tx_rings;
305		nmr->nr_rx_rings = req->nr_rx_rings;
306		nmr->nr_arg2 = req->nr_mem_id;
307		break;
308	}
309	case NETMAP_REQ_VALE_ATTACH: {
310		struct nmreq_vale_attach *req =
311			(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
312		nmreq_register_to_legacy(&req->reg, nmr);
313		break;
314	}
315	case NETMAP_REQ_VALE_DETACH: {
316		break;
317	}
318	case NETMAP_REQ_VALE_LIST: {
319		struct nmreq_vale_list *req =
320			(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
321		strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
322		nmr->nr_arg1 = req->nr_bridge_idx;
323		nmr->nr_arg2 = req->nr_port_idx;
324		break;
325	}
326	case NETMAP_REQ_PORT_HDR_SET:
327	case NETMAP_REQ_PORT_HDR_GET: {
328		struct nmreq_port_hdr *req =
329			(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
330		nmr->nr_arg1 = req->nr_hdr_len;
331		break;
332	}
333	case NETMAP_REQ_VALE_NEWIF: {
334		struct nmreq_vale_newif *req =
335			(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
336		nmr->nr_tx_slots = req->nr_tx_slots;
337		nmr->nr_rx_slots = req->nr_rx_slots;
338		nmr->nr_tx_rings = req->nr_tx_rings;
339		nmr->nr_rx_rings = req->nr_rx_rings;
340		nmr->nr_arg2 = req->nr_mem_id;
341		break;
342	}
343	case NETMAP_REQ_VALE_DELIF:
344	case NETMAP_REQ_VALE_POLLING_ENABLE:
345	case NETMAP_REQ_VALE_POLLING_DISABLE: {
346		break;
347	}
348	}
349
350	return ret;
351}
352
353int
354netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
355			struct thread *td)
356{
357	int error = 0;
358
359	switch (cmd) {
360	case NIOCGINFO:
361	case NIOCREGIF: {
362		/* Request for the legacy control API. Convert it to a
363		 * NIOCCTRL request. */
364		struct nmreq *nmr = (struct nmreq *) data;
365		struct nmreq_header *hdr;
366
367		if (nmr->nr_version < 11) {
368			nm_prerr("Minimum supported API is 11 (requested %u)",
369			    nmr->nr_version);
370			return EINVAL;
371		}
372		hdr = nmreq_from_legacy(nmr, cmd);
373		if (hdr == NULL) { /* out of memory */
374			return ENOMEM;
375		}
376		error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td,
377					/*nr_body_is_user=*/0);
378		if (error == 0) {
379			nmreq_to_legacy(hdr, nmr);
380		}
381		if (hdr->nr_body) {
382			nm_os_free((void *)(uintptr_t)hdr->nr_body);
383		}
384		nm_os_free(hdr);
385		break;
386	}
387#ifdef WITH_VALE
388	case NIOCCONFIG: {
389		struct nm_ifreq *nr = (struct nm_ifreq *)data;
390		error = netmap_bdg_config(nr);
391		break;
392	}
393#endif
394#ifdef __FreeBSD__
395	case FIONBIO:
396	case FIOASYNC:
397		/* FIONBIO/FIOASYNC are no-ops. */
398		break;
399
400	case BIOCIMMEDIATE:
401	case BIOCGHDRCMPLT:
402	case BIOCSHDRCMPLT:
403	case BIOCSSEESENT:
404		/* Ignore these commands. */
405		break;
406
407	default:	/* allow device-specific ioctls */
408	    {
409		struct nmreq *nmr = (struct nmreq *)data;
410		struct ifnet *ifp = ifunit_ref(nmr->nr_name);
411		if (ifp == NULL) {
412			error = ENXIO;
413		} else {
414			struct socket so;
415
416			bzero(&so, sizeof(so));
417			so.so_vnet = ifp->if_vnet;
418			// so->so_proto not null.
419			error = ifioctl(&so, cmd, data, td);
420			if_rele(ifp);
421		}
422		break;
423	    }
424
425#else /* linux */
426	default:
427		error = EOPNOTSUPP;
428#endif /* linux */
429	}
430
431	return error;
432}
433