• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/vhost/
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * virtio-net server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/virtio_net.h>
13#include <linux/mmu_context.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/workqueue.h>
18#include <linux/rcupdate.h>
19#include <linux/file.h>
20#include <linux/slab.h>
21
22#include <linux/net.h>
23#include <linux/if_packet.h>
24#include <linux/if_arp.h>
25#include <linux/if_tun.h>
26#include <linux/if_macvlan.h>
27
28#include <net/sock.h>
29
30#include "vhost.h"
31
32/* Max number of bytes transferred before requeueing the job.
33 * Using this limit prevents one virtqueue from starving others. */
34#define VHOST_NET_WEIGHT 0x80000
35
36enum {
37	VHOST_NET_VQ_RX = 0,
38	VHOST_NET_VQ_TX = 1,
39	VHOST_NET_VQ_MAX = 2,
40};
41
42enum vhost_net_poll_state {
43	VHOST_NET_POLL_DISABLED = 0,
44	VHOST_NET_POLL_STARTED = 1,
45	VHOST_NET_POLL_STOPPED = 2,
46};
47
48struct vhost_net {
49	struct vhost_dev dev;
50	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
51	struct vhost_poll poll[VHOST_NET_VQ_MAX];
52	/* Tells us whether we are polling a socket for TX.
53	 * We only do this when socket buffer fills up.
54	 * Protected by tx vq lock. */
55	enum vhost_net_poll_state tx_poll_state;
56};
57
58/* Pop first len bytes from iovec. Return number of segments used. */
59static int move_iovec_hdr(struct iovec *from, struct iovec *to,
60			  size_t len, int iov_count)
61{
62	int seg = 0;
63	size_t size;
64	while (len && seg < iov_count) {
65		size = min(from->iov_len, len);
66		to->iov_base = from->iov_base;
67		to->iov_len = size;
68		from->iov_len -= size;
69		from->iov_base += size;
70		len -= size;
71		++from;
72		++to;
73		++seg;
74	}
75	return seg;
76}
77/* Copy iovec entries for len bytes from iovec. */
78static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
79			   size_t len, int iovcount)
80{
81	int seg = 0;
82	size_t size;
83	while (len && seg < iovcount) {
84		size = min(from->iov_len, len);
85		to->iov_base = from->iov_base;
86		to->iov_len = size;
87		len -= size;
88		++from;
89		++to;
90		++seg;
91	}
92}
93
94/* Caller must have TX VQ lock */
95static void tx_poll_stop(struct vhost_net *net)
96{
97	if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
98		return;
99	vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
100	net->tx_poll_state = VHOST_NET_POLL_STOPPED;
101}
102
103/* Caller must have TX VQ lock */
104static void tx_poll_start(struct vhost_net *net, struct socket *sock)
105{
106	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
107		return;
108	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
109	net->tx_poll_state = VHOST_NET_POLL_STARTED;
110}
111
112/* Expects to be always run from workqueue - which acts as
113 * read-size critical section for our kind of RCU. */
114static void handle_tx(struct vhost_net *net)
115{
116	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
117	unsigned out, in, s;
118	int head;
119	struct msghdr msg = {
120		.msg_name = NULL,
121		.msg_namelen = 0,
122		.msg_control = NULL,
123		.msg_controllen = 0,
124		.msg_iov = vq->iov,
125		.msg_flags = MSG_DONTWAIT,
126	};
127	size_t len, total_len = 0;
128	int err, wmem;
129	size_t hdr_size;
130	struct socket *sock = rcu_dereference(vq->private_data);
131	if (!sock)
132		return;
133
134	wmem = atomic_read(&sock->sk->sk_wmem_alloc);
135	if (wmem >= sock->sk->sk_sndbuf) {
136		mutex_lock(&vq->mutex);
137		tx_poll_start(net, sock);
138		mutex_unlock(&vq->mutex);
139		return;
140	}
141
142	use_mm(net->dev.mm);
143	mutex_lock(&vq->mutex);
144	vhost_disable_notify(vq);
145
146	if (wmem < sock->sk->sk_sndbuf / 2)
147		tx_poll_stop(net);
148	hdr_size = vq->vhost_hlen;
149
150	for (;;) {
151		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
152					 ARRAY_SIZE(vq->iov),
153					 &out, &in,
154					 NULL, NULL);
155		/* On error, stop handling until the next kick. */
156		if (unlikely(head < 0))
157			break;
158		/* Nothing new?  Wait for eventfd to tell us they refilled. */
159		if (head == vq->num) {
160			wmem = atomic_read(&sock->sk->sk_wmem_alloc);
161			if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
162				tx_poll_start(net, sock);
163				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
164				break;
165			}
166			if (unlikely(vhost_enable_notify(vq))) {
167				vhost_disable_notify(vq);
168				continue;
169			}
170			break;
171		}
172		if (in) {
173			vq_err(vq, "Unexpected descriptor format for TX: "
174			       "out %d, int %d\n", out, in);
175			break;
176		}
177		/* Skip header. TODO: support TSO. */
178		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
179		msg.msg_iovlen = out;
180		len = iov_length(vq->iov, out);
181		/* Sanity check */
182		if (!len) {
183			vq_err(vq, "Unexpected header len for TX: "
184			       "%zd expected %zd\n",
185			       iov_length(vq->hdr, s), hdr_size);
186			break;
187		}
188		/* TODO: Check specific error and bomb out unless ENOBUFS? */
189		err = sock->ops->sendmsg(NULL, sock, &msg, len);
190		if (unlikely(err < 0)) {
191			vhost_discard_vq_desc(vq, 1);
192			tx_poll_start(net, sock);
193			break;
194		}
195		if (err != len)
196			pr_debug("Truncated TX packet: "
197				 " len %d != %zd\n", err, len);
198		vhost_add_used_and_signal(&net->dev, vq, head, 0);
199		total_len += len;
200		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
201			vhost_poll_queue(&vq->poll);
202			break;
203		}
204	}
205
206	mutex_unlock(&vq->mutex);
207	unuse_mm(net->dev.mm);
208}
209
210static int peek_head_len(struct sock *sk)
211{
212	struct sk_buff *head;
213	int len = 0;
214
215	lock_sock(sk);
216	head = skb_peek(&sk->sk_receive_queue);
217	if (head)
218		len = head->len;
219	release_sock(sk);
220	return len;
221}
222
223/* This is a multi-buffer version of vhost_get_desc, that works if
224 *	vq has read descriptors only.
225 * @vq		- the relevant virtqueue
226 * @datalen	- data length we'll be reading
227 * @iovcount	- returned count of io vectors we fill
228 * @log		- vhost log
229 * @log_num	- log offset
230 *	returns number of buffer heads allocated, negative on error
231 */
232static int get_rx_bufs(struct vhost_virtqueue *vq,
233		       struct vring_used_elem *heads,
234		       int datalen,
235		       unsigned *iovcount,
236		       struct vhost_log *log,
237		       unsigned *log_num)
238{
239	unsigned int out, in;
240	int seg = 0;
241	int headcount = 0;
242	unsigned d;
243	int r, nlogs = 0;
244
245	while (datalen > 0) {
246		if (unlikely(seg >= VHOST_NET_MAX_SG)) {
247			r = -ENOBUFS;
248			goto err;
249		}
250		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
251				      ARRAY_SIZE(vq->iov) - seg, &out,
252				      &in, log, log_num);
253		if (d == vq->num) {
254			r = 0;
255			goto err;
256		}
257		if (unlikely(out || in <= 0)) {
258			vq_err(vq, "unexpected descriptor format for RX: "
259				"out %d, in %d\n", out, in);
260			r = -EINVAL;
261			goto err;
262		}
263		if (unlikely(log)) {
264			nlogs += *log_num;
265			log += *log_num;
266		}
267		heads[headcount].id = d;
268		heads[headcount].len = iov_length(vq->iov + seg, in);
269		datalen -= heads[headcount].len;
270		++headcount;
271		seg += in;
272	}
273	heads[headcount - 1].len += datalen;
274	*iovcount = seg;
275	if (unlikely(log))
276		*log_num = nlogs;
277	return headcount;
278err:
279	vhost_discard_vq_desc(vq, headcount);
280	return r;
281}
282
283/* Expects to be always run from workqueue - which acts as
284 * read-size critical section for our kind of RCU. */
285static void handle_rx_big(struct vhost_net *net)
286{
287	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
288	unsigned out, in, log, s;
289	int head;
290	struct vhost_log *vq_log;
291	struct msghdr msg = {
292		.msg_name = NULL,
293		.msg_namelen = 0,
294		.msg_control = NULL,
295		.msg_controllen = 0,
296		.msg_iov = vq->iov,
297		.msg_flags = MSG_DONTWAIT,
298	};
299
300	struct virtio_net_hdr hdr = {
301		.flags = 0,
302		.gso_type = VIRTIO_NET_HDR_GSO_NONE
303	};
304
305	size_t len, total_len = 0;
306	int err;
307	size_t hdr_size;
308	struct socket *sock = rcu_dereference(vq->private_data);
309	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
310		return;
311
312	use_mm(net->dev.mm);
313	mutex_lock(&vq->mutex);
314	vhost_disable_notify(vq);
315	hdr_size = vq->vhost_hlen;
316
317	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
318		vq->log : NULL;
319
320	for (;;) {
321		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
322					 ARRAY_SIZE(vq->iov),
323					 &out, &in,
324					 vq_log, &log);
325		/* On error, stop handling until the next kick. */
326		if (unlikely(head < 0))
327			break;
328		/* OK, now we need to know about added descriptors. */
329		if (head == vq->num) {
330			if (unlikely(vhost_enable_notify(vq))) {
331				/* They have slipped one in as we were
332				 * doing that: check again. */
333				vhost_disable_notify(vq);
334				continue;
335			}
336			/* Nothing new?  Wait for eventfd to tell us
337			 * they refilled. */
338			break;
339		}
340		/* We don't need to be notified again. */
341		if (out) {
342			vq_err(vq, "Unexpected descriptor format for RX: "
343			       "out %d, int %d\n",
344			       out, in);
345			break;
346		}
347		/* Skip header. TODO: support TSO/mergeable rx buffers. */
348		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
349		msg.msg_iovlen = in;
350		len = iov_length(vq->iov, in);
351		/* Sanity check */
352		if (!len) {
353			vq_err(vq, "Unexpected header len for RX: "
354			       "%zd expected %zd\n",
355			       iov_length(vq->hdr, s), hdr_size);
356			break;
357		}
358		err = sock->ops->recvmsg(NULL, sock, &msg,
359					 len, MSG_DONTWAIT | MSG_TRUNC);
360		/* TODO: Check specific error and bomb out unless EAGAIN? */
361		if (err < 0) {
362			vhost_discard_vq_desc(vq, 1);
363			break;
364		}
365		/* TODO: Should check and handle checksum. */
366		if (err > len) {
367			pr_debug("Discarded truncated rx packet: "
368				 " len %d > %zd\n", err, len);
369			vhost_discard_vq_desc(vq, 1);
370			continue;
371		}
372		len = err;
373		err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
374		if (err) {
375			vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
376			       vq->iov->iov_base, err);
377			break;
378		}
379		len += hdr_size;
380		vhost_add_used_and_signal(&net->dev, vq, head, len);
381		if (unlikely(vq_log))
382			vhost_log_write(vq, vq_log, log, len);
383		total_len += len;
384		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
385			vhost_poll_queue(&vq->poll);
386			break;
387		}
388	}
389
390	mutex_unlock(&vq->mutex);
391	unuse_mm(net->dev.mm);
392}
393
394/* Expects to be always run from workqueue - which acts as
395 * read-size critical section for our kind of RCU. */
396static void handle_rx_mergeable(struct vhost_net *net)
397{
398	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
399	unsigned uninitialized_var(in), log;
400	struct vhost_log *vq_log;
401	struct msghdr msg = {
402		.msg_name = NULL,
403		.msg_namelen = 0,
404		.msg_control = NULL,
405		.msg_controllen = 0,
406		.msg_iov = vq->iov,
407		.msg_flags = MSG_DONTWAIT,
408	};
409
410	struct virtio_net_hdr_mrg_rxbuf hdr = {
411		.hdr.flags = 0,
412		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
413	};
414
415	size_t total_len = 0;
416	int err, headcount;
417	size_t vhost_hlen, sock_hlen;
418	size_t vhost_len, sock_len;
419	struct socket *sock = rcu_dereference(vq->private_data);
420	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
421		return;
422
423	use_mm(net->dev.mm);
424	mutex_lock(&vq->mutex);
425	vhost_disable_notify(vq);
426	vhost_hlen = vq->vhost_hlen;
427	sock_hlen = vq->sock_hlen;
428
429	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
430		vq->log : NULL;
431
432	while ((sock_len = peek_head_len(sock->sk))) {
433		sock_len += sock_hlen;
434		vhost_len = sock_len + vhost_hlen;
435		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
436					&in, vq_log, &log);
437		/* On error, stop handling until the next kick. */
438		if (unlikely(headcount < 0))
439			break;
440		/* OK, now we need to know about added descriptors. */
441		if (!headcount) {
442			if (unlikely(vhost_enable_notify(vq))) {
443				/* They have slipped one in as we were
444				 * doing that: check again. */
445				vhost_disable_notify(vq);
446				continue;
447			}
448			/* Nothing new?  Wait for eventfd to tell us
449			 * they refilled. */
450			break;
451		}
452		/* We don't need to be notified again. */
453		if (unlikely((vhost_hlen)))
454			/* Skip header. TODO: support TSO. */
455			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
456		else
457			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
458			 * needed because sendmsg can modify msg_iov. */
459			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
460		msg.msg_iovlen = in;
461		err = sock->ops->recvmsg(NULL, sock, &msg,
462					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
463		/* Userspace might have consumed the packet meanwhile:
464		 * it's not supposed to do this usually, but might be hard
465		 * to prevent. Discard data we got (if any) and keep going. */
466		if (unlikely(err != sock_len)) {
467			pr_debug("Discarded rx packet: "
468				 " len %d, expected %zd\n", err, sock_len);
469			vhost_discard_vq_desc(vq, headcount);
470			continue;
471		}
472		if (unlikely(vhost_hlen) &&
473		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
474				      vhost_hlen)) {
475			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
476			       vq->iov->iov_base);
477			break;
478		}
479		/* TODO: Should check and handle checksum. */
480		if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) &&
481		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
482				      offsetof(typeof(hdr), num_buffers),
483				      sizeof hdr.num_buffers)) {
484			vq_err(vq, "Failed num_buffers write");
485			vhost_discard_vq_desc(vq, headcount);
486			break;
487		}
488		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
489					    headcount);
490		if (unlikely(vq_log))
491			vhost_log_write(vq, vq_log, log, vhost_len);
492		total_len += vhost_len;
493		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
494			vhost_poll_queue(&vq->poll);
495			break;
496		}
497	}
498
499	mutex_unlock(&vq->mutex);
500	unuse_mm(net->dev.mm);
501}
502
503static void handle_rx(struct vhost_net *net)
504{
505	if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
506		handle_rx_mergeable(net);
507	else
508		handle_rx_big(net);
509}
510
511static void handle_tx_kick(struct vhost_work *work)
512{
513	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
514						  poll.work);
515	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
516
517	handle_tx(net);
518}
519
520static void handle_rx_kick(struct vhost_work *work)
521{
522	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
523						  poll.work);
524	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
525
526	handle_rx(net);
527}
528
529static void handle_tx_net(struct vhost_work *work)
530{
531	struct vhost_net *net = container_of(work, struct vhost_net,
532					     poll[VHOST_NET_VQ_TX].work);
533	handle_tx(net);
534}
535
536static void handle_rx_net(struct vhost_work *work)
537{
538	struct vhost_net *net = container_of(work, struct vhost_net,
539					     poll[VHOST_NET_VQ_RX].work);
540	handle_rx(net);
541}
542
543static int vhost_net_open(struct inode *inode, struct file *f)
544{
545	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
546	struct vhost_dev *dev;
547	int r;
548
549	if (!n)
550		return -ENOMEM;
551
552	dev = &n->dev;
553	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
554	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
555	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
556	if (r < 0) {
557		kfree(n);
558		return r;
559	}
560
561	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
562	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
563	n->tx_poll_state = VHOST_NET_POLL_DISABLED;
564
565	f->private_data = n;
566
567	return 0;
568}
569
570static void vhost_net_disable_vq(struct vhost_net *n,
571				 struct vhost_virtqueue *vq)
572{
573	if (!vq->private_data)
574		return;
575	if (vq == n->vqs + VHOST_NET_VQ_TX) {
576		tx_poll_stop(n);
577		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
578	} else
579		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
580}
581
582static void vhost_net_enable_vq(struct vhost_net *n,
583				struct vhost_virtqueue *vq)
584{
585	struct socket *sock = vq->private_data;
586	if (!sock)
587		return;
588	if (vq == n->vqs + VHOST_NET_VQ_TX) {
589		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
590		tx_poll_start(n, sock);
591	} else
592		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
593}
594
595static struct socket *vhost_net_stop_vq(struct vhost_net *n,
596					struct vhost_virtqueue *vq)
597{
598	struct socket *sock;
599
600	mutex_lock(&vq->mutex);
601	sock = vq->private_data;
602	vhost_net_disable_vq(n, vq);
603	rcu_assign_pointer(vq->private_data, NULL);
604	mutex_unlock(&vq->mutex);
605	return sock;
606}
607
608static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
609			   struct socket **rx_sock)
610{
611	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
612	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
613}
614
615static void vhost_net_flush_vq(struct vhost_net *n, int index)
616{
617	vhost_poll_flush(n->poll + index);
618	vhost_poll_flush(&n->dev.vqs[index].poll);
619}
620
621static void vhost_net_flush(struct vhost_net *n)
622{
623	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
624	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
625}
626
627static int vhost_net_release(struct inode *inode, struct file *f)
628{
629	struct vhost_net *n = f->private_data;
630	struct socket *tx_sock;
631	struct socket *rx_sock;
632
633	vhost_net_stop(n, &tx_sock, &rx_sock);
634	vhost_net_flush(n);
635	vhost_dev_cleanup(&n->dev);
636	if (tx_sock)
637		fput(tx_sock->file);
638	if (rx_sock)
639		fput(rx_sock->file);
640	/* We do an extra flush before freeing memory,
641	 * since jobs can re-queue themselves. */
642	vhost_net_flush(n);
643	kfree(n);
644	return 0;
645}
646
647static struct socket *get_raw_socket(int fd)
648{
649	struct {
650		struct sockaddr_ll sa;
651		char  buf[MAX_ADDR_LEN];
652	} uaddr;
653	int uaddr_len = sizeof uaddr, r;
654	struct socket *sock = sockfd_lookup(fd, &r);
655	if (!sock)
656		return ERR_PTR(-ENOTSOCK);
657
658	/* Parameter checking */
659	if (sock->sk->sk_type != SOCK_RAW) {
660		r = -ESOCKTNOSUPPORT;
661		goto err;
662	}
663
664	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
665			       &uaddr_len, 0);
666	if (r)
667		goto err;
668
669	if (uaddr.sa.sll_family != AF_PACKET) {
670		r = -EPFNOSUPPORT;
671		goto err;
672	}
673	return sock;
674err:
675	fput(sock->file);
676	return ERR_PTR(r);
677}
678
679static struct socket *get_tap_socket(int fd)
680{
681	struct file *file = fget(fd);
682	struct socket *sock;
683	if (!file)
684		return ERR_PTR(-EBADF);
685	sock = tun_get_socket(file);
686	if (!IS_ERR(sock))
687		return sock;
688	sock = macvtap_get_socket(file);
689	if (IS_ERR(sock))
690		fput(file);
691	return sock;
692}
693
694static struct socket *get_socket(int fd)
695{
696	struct socket *sock;
697	/* special case to disable backend */
698	if (fd == -1)
699		return NULL;
700	sock = get_raw_socket(fd);
701	if (!IS_ERR(sock))
702		return sock;
703	sock = get_tap_socket(fd);
704	if (!IS_ERR(sock))
705		return sock;
706	return ERR_PTR(-ENOTSOCK);
707}
708
709static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
710{
711	struct socket *sock, *oldsock;
712	struct vhost_virtqueue *vq;
713	int r;
714
715	mutex_lock(&n->dev.mutex);
716	r = vhost_dev_check_owner(&n->dev);
717	if (r)
718		goto err;
719
720	if (index >= VHOST_NET_VQ_MAX) {
721		r = -ENOBUFS;
722		goto err;
723	}
724	vq = n->vqs + index;
725	mutex_lock(&vq->mutex);
726
727	/* Verify that ring has been setup correctly. */
728	if (!vhost_vq_access_ok(vq)) {
729		r = -EFAULT;
730		goto err_vq;
731	}
732	sock = get_socket(fd);
733	if (IS_ERR(sock)) {
734		r = PTR_ERR(sock);
735		goto err_vq;
736	}
737
738	/* start polling new socket */
739	oldsock = vq->private_data;
740	if (sock != oldsock) {
741                vhost_net_disable_vq(n, vq);
742                rcu_assign_pointer(vq->private_data, sock);
743                vhost_net_enable_vq(n, vq);
744	}
745
746	mutex_unlock(&vq->mutex);
747
748	if (oldsock) {
749		vhost_net_flush_vq(n, index);
750		fput(oldsock->file);
751	}
752
753	mutex_unlock(&n->dev.mutex);
754	return 0;
755
756err_vq:
757	mutex_unlock(&vq->mutex);
758err:
759	mutex_unlock(&n->dev.mutex);
760	return r;
761}
762
763static long vhost_net_reset_owner(struct vhost_net *n)
764{
765	struct socket *tx_sock = NULL;
766	struct socket *rx_sock = NULL;
767	long err;
768	mutex_lock(&n->dev.mutex);
769	err = vhost_dev_check_owner(&n->dev);
770	if (err)
771		goto done;
772	vhost_net_stop(n, &tx_sock, &rx_sock);
773	vhost_net_flush(n);
774	err = vhost_dev_reset_owner(&n->dev);
775done:
776	mutex_unlock(&n->dev.mutex);
777	if (tx_sock)
778		fput(tx_sock->file);
779	if (rx_sock)
780		fput(rx_sock->file);
781	return err;
782}
783
784static int vhost_net_set_features(struct vhost_net *n, u64 features)
785{
786	size_t vhost_hlen, sock_hlen, hdr_len;
787	int i;
788
789	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
790			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
791			sizeof(struct virtio_net_hdr);
792	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
793		/* vhost provides vnet_hdr */
794		vhost_hlen = hdr_len;
795		sock_hlen = 0;
796	} else {
797		/* socket provides vnet_hdr */
798		vhost_hlen = 0;
799		sock_hlen = hdr_len;
800	}
801	mutex_lock(&n->dev.mutex);
802	if ((features & (1 << VHOST_F_LOG_ALL)) &&
803	    !vhost_log_access_ok(&n->dev)) {
804		mutex_unlock(&n->dev.mutex);
805		return -EFAULT;
806	}
807	n->dev.acked_features = features;
808	smp_wmb();
809	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
810		mutex_lock(&n->vqs[i].mutex);
811		n->vqs[i].vhost_hlen = vhost_hlen;
812		n->vqs[i].sock_hlen = sock_hlen;
813		mutex_unlock(&n->vqs[i].mutex);
814	}
815	vhost_net_flush(n);
816	mutex_unlock(&n->dev.mutex);
817	return 0;
818}
819
820static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
821			    unsigned long arg)
822{
823	struct vhost_net *n = f->private_data;
824	void __user *argp = (void __user *)arg;
825	u64 __user *featurep = argp;
826	struct vhost_vring_file backend;
827	u64 features;
828	int r;
829	switch (ioctl) {
830	case VHOST_NET_SET_BACKEND:
831		if (copy_from_user(&backend, argp, sizeof backend))
832			return -EFAULT;
833		return vhost_net_set_backend(n, backend.index, backend.fd);
834	case VHOST_GET_FEATURES:
835		features = VHOST_FEATURES;
836		if (copy_to_user(featurep, &features, sizeof features))
837			return -EFAULT;
838		return 0;
839	case VHOST_SET_FEATURES:
840		if (copy_from_user(&features, featurep, sizeof features))
841			return -EFAULT;
842		if (features & ~VHOST_FEATURES)
843			return -EOPNOTSUPP;
844		return vhost_net_set_features(n, features);
845	case VHOST_RESET_OWNER:
846		return vhost_net_reset_owner(n);
847	default:
848		mutex_lock(&n->dev.mutex);
849		r = vhost_dev_ioctl(&n->dev, ioctl, arg);
850		vhost_net_flush(n);
851		mutex_unlock(&n->dev.mutex);
852		return r;
853	}
854}
855
856#ifdef CONFIG_COMPAT
857static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
858				   unsigned long arg)
859{
860	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
861}
862#endif
863
864static const struct file_operations vhost_net_fops = {
865	.owner          = THIS_MODULE,
866	.release        = vhost_net_release,
867	.unlocked_ioctl = vhost_net_ioctl,
868#ifdef CONFIG_COMPAT
869	.compat_ioctl   = vhost_net_compat_ioctl,
870#endif
871	.open           = vhost_net_open,
872};
873
874static struct miscdevice vhost_net_misc = {
875	MISC_DYNAMIC_MINOR,
876	"vhost-net",
877	&vhost_net_fops,
878};
879
880static int vhost_net_init(void)
881{
882	return misc_register(&vhost_net_misc);
883}
884module_init(vhost_net_init);
885
886static void vhost_net_exit(void)
887{
888	misc_deregister(&vhost_net_misc);
889}
890module_exit(vhost_net_exit);
891
892MODULE_VERSION("0.0.1");
893MODULE_LICENSE("GPL v2");
894MODULE_AUTHOR("Michael S. Tsirkin");
895MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
896