1/*
2 * Copyright (c) 2013 Antti Kantee.  All Rights Reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
14 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* XXX */
27struct iovec {
28	void *iov_base;
29	unsigned long iov_len;
30};
31
32#include <mini-os/os.h>
33#include <mini-os/netfront.h>
34
35#include <bmk-core/errno.h>
36#include <bmk-core/memalloc.h>
37#include <bmk-core/string.h>
38#include <bmk-core/sched.h>
39
40#include <bmk-rumpuser/core_types.h>
41#include <bmk-rumpuser/rumpuser.h>
42
43#include "if_virt.h"
44#include "if_virt_user.h"
45
46/*
47 * For now, shovel the packets from the interrupt to a
48 * thread context via an intermediate set of buffers.  Need
49 * to fix this a bit down the road.
50 */
51#define MAXPKT 2000
52struct onepkt {
53	unsigned char pkt_data[MAXPKT];
54	int pkt_dlen;
55};
56
57#define NBUF 64
58struct virtif_user {
59	struct netfront_dev *viu_dev;
60	struct bmk_thread *viu_rcvr;
61	struct bmk_thread *viu_thr;
62	struct virtif_sc *viu_vifsc;
63
64	int viu_read;
65	int viu_write;
66	int viu_dying;
67	struct onepkt viu_pkts[NBUF];
68};
69
70/*
71 * Ok, based on how (the unmodified) netfront works, we need to
72 * consume the data here.  So store it locally (and revisit some day).
73 */
74static void
75myrecv(struct netfront_dev *dev, unsigned char *data, int dlen)
76{
77	struct virtif_user *viu = netfront_get_private(dev);
78	int nextw;
79
80	/* TODO: we should be at the correct spl already, assert how? */
81
82	nextw = (viu->viu_write+1) % NBUF;
83	/* queue full?  drop packet */
84	if (nextw == viu->viu_read) {
85		return;
86	}
87
88	if (dlen > MAXPKT) {
89		minios_printk("myrecv: pkt len %d too big\n", dlen);
90		return;
91	}
92
93	bmk_memcpy(viu->viu_pkts[viu->viu_write].pkt_data, data, dlen);
94	viu->viu_pkts[viu->viu_write].pkt_dlen = dlen;
95	viu->viu_write = nextw;
96
97	if (viu->viu_rcvr)
98		bmk_sched_wake(viu->viu_rcvr);
99}
100
101static void
102pusher(void *arg)
103{
104	struct virtif_user *viu = arg;
105	struct iovec iov;
106	struct onepkt *mypkt;
107	int flags;
108
109	/* give us a rump kernel context */
110	rumpuser__hyp.hyp_schedule();
111	rumpuser__hyp.hyp_lwproc_newlwp(0);
112	rumpuser__hyp.hyp_unschedule();
113
114	local_irq_save(flags);
115 again:
116	while (!viu->viu_dying) {
117		while (viu->viu_read == viu->viu_write) {
118			viu->viu_rcvr = bmk_current;
119			bmk_sched_blockprepare();
120			local_irq_restore(flags);
121			bmk_sched_block();
122			local_irq_save(flags);
123			viu->viu_rcvr = NULL;
124			goto again;
125		}
126		mypkt = &viu->viu_pkts[viu->viu_read];
127		local_irq_restore(flags);
128
129		iov.iov_base = mypkt->pkt_data;
130		iov.iov_len =  mypkt->pkt_dlen;
131
132		rumpuser__hyp.hyp_schedule();
133		rump_virtif_pktdeliver(viu->viu_vifsc, &iov, 1);
134		rumpuser__hyp.hyp_unschedule();
135
136		local_irq_save(flags);
137		viu->viu_read = (viu->viu_read+1) % NBUF;
138	}
139	local_irq_restore(flags);
140}
141
142int
143VIFHYPER_CREATE(int devnum, struct virtif_sc *vif_sc, uint8_t *enaddr,
144	struct virtif_user **viup)
145{
146	struct virtif_user *viu = NULL;
147	int rv, nlocks;
148
149	rumpkern_unsched(&nlocks, NULL);
150
151	viu = bmk_memalloc(sizeof(*viu), 0, BMK_MEMWHO_RUMPKERN);
152	if (viu == NULL) {
153		rv = BMK_ENOMEM;
154		goto out;
155	}
156	bmk_memset(viu, 0, sizeof(*viu));
157	viu->viu_vifsc = vif_sc;
158
159	viu->viu_dev = netfront_init(NULL, myrecv, enaddr, NULL, viu);
160	if (!viu->viu_dev) {
161		rv = BMK_EINVAL; /* ? */
162		bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
163		goto out;
164	}
165
166	viu->viu_thr = bmk_sched_create("xenifp",
167	    NULL, 1, pusher, viu, NULL, 0);
168	if (viu->viu_thr == NULL) {
169		minios_printk("fatal thread creation failure\n"); /* XXX */
170		minios_do_exit();
171	}
172
173	rv = 0;
174
175 out:
176	rumpkern_sched(nlocks, NULL);
177
178	*viup = viu;
179	return rv;
180}
181
182void
183VIFHYPER_SEND(struct virtif_user *viu,
184	struct iovec *iov, size_t iovlen)
185{
186	size_t tlen, i;
187	int nlocks;
188	void *d;
189	char *d0;
190
191	rumpkern_unsched(&nlocks, NULL);
192	/*
193	 * netfront doesn't do scatter-gather, so just simply
194	 * copy the data into one lump here.  drop packet if we
195	 * can't allocate temp memory space.
196	 */
197	if (iovlen == 1) {
198		d = iov->iov_base;
199		tlen = iov->iov_len;
200	} else {
201		for (i = 0, tlen = 0; i < iovlen; i++) {
202			tlen += iov[i].iov_len;
203		}
204
205		/*
206		 * allocate the temp space from RUMPKERN instead of BMK
207		 * since there are no huge repercussions if we fail or
208		 * succeed.
209		 */
210		d = d0 = bmk_memalloc(tlen, 0, BMK_MEMWHO_RUMPKERN);
211		if (d == NULL)
212			goto out;
213
214		for (i = 0; i < iovlen; i++) {
215			bmk_memcpy(d0, iov[i].iov_base, iov[i].iov_len);
216			d0 += iov[i].iov_len;
217		}
218	}
219
220	netfront_xmit(viu->viu_dev, d, tlen);
221
222	if (iovlen != 1)
223		bmk_memfree(d, BMK_MEMWHO_RUMPKERN);
224
225 out:
226	rumpkern_sched(nlocks, NULL);
227}
228
229void
230VIFHYPER_DYING(struct virtif_user *viu)
231{
232
233	viu->viu_dying = 1;
234	if (viu->viu_rcvr)
235		bmk_sched_wake(viu->viu_rcvr);
236}
237
238void
239VIFHYPER_DESTROY(struct virtif_user *viu)
240{
241
242	ASSERT(viu->viu_dying == 1);
243
244	bmk_sched_join(viu->viu_thr);
245	netfront_shutdown(viu->viu_dev);
246	bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
247}
248