netfront.c revision 225709
1/*-
2 * Copyright (c) 2004-2006 Kip Macy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/xen/netfront/netfront.c 225709 2011-09-21 00:15:29Z gibbs $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/sockio.h>
36#include <sys/mbuf.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
39#include <sys/kernel.h>
40#include <sys/socket.h>
41#include <sys/sysctl.h>
42#include <sys/queue.h>
43#include <sys/lock.h>
44#include <sys/sx.h>
45
46#include <net/if.h>
47#include <net/if_arp.h>
48#include <net/ethernet.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51
52#include <net/bpf.h>
53
54#include <net/if_types.h>
55#include <net/if.h>
56
57#include <netinet/in_systm.h>
58#include <netinet/in.h>
59#include <netinet/ip.h>
60#include <netinet/if_ether.h>
61#if __FreeBSD_version >= 700000
62#include <netinet/tcp.h>
63#include <netinet/tcp_lro.h>
64#endif
65
66#include <vm/vm.h>
67#include <vm/pmap.h>
68
69#include <machine/clock.h>      /* for DELAY */
70#include <machine/bus.h>
71#include <machine/resource.h>
72#include <machine/frame.h>
73#include <machine/vmparam.h>
74
75#include <sys/bus.h>
76#include <sys/rman.h>
77
78#include <machine/intr_machdep.h>
79
80#include <machine/xen/xen-os.h>
81#include <machine/xen/xenfunc.h>
82#include <machine/xen/xenvar.h>
83#include <xen/hypervisor.h>
84#include <xen/xen_intr.h>
85#include <xen/evtchn.h>
86#include <xen/gnttab.h>
87#include <xen/interface/memory.h>
88#include <xen/interface/io/netif.h>
89#include <xen/xenbus/xenbusvar.h>
90
91#include <dev/xen/netfront/mbufq.h>
92
93#include "xenbus_if.h"
94
95/* Features supported by all backends.  TSO and LRO can be negotiated */
96#define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
97
98#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
99#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
100
101#if __FreeBSD_version >= 700000
102/*
103 * Should the driver do LRO on the RX end
104 *  this can be toggled on the fly, but the
105 *  interface must be reset (down/up) for it
106 *  to take effect.
107 */
108static int xn_enable_lro = 1;
109TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
110#else
111
112#define IFCAP_TSO4	0
113#define CSUM_TSO	0
114
115#endif
116
117#ifdef CONFIG_XEN
118static int MODPARM_rx_copy = 0;
119module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
120MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
121static int MODPARM_rx_flip = 0;
122module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
123MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
124#else
125static const int MODPARM_rx_copy = 1;
126static const int MODPARM_rx_flip = 0;
127#endif
128
129/**
130 * \brief The maximum allowed data fragments in a single transmit
131 *        request.
132 *
133 * This limit is imposed by the backend driver.  We assume here that
134 * we are dealing with a Linux driver domain and have set our limit
135 * to mirror the Linux MAX_SKB_FRAGS constant.
136 */
137#define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
138
139#define RX_COPY_THRESHOLD 256
140
141#define net_ratelimit() 0
142
143struct netfront_info;
144struct netfront_rx_info;
145
146static void xn_txeof(struct netfront_info *);
147static void xn_rxeof(struct netfront_info *);
148static void network_alloc_rx_buffers(struct netfront_info *);
149
150static void xn_tick_locked(struct netfront_info *);
151static void xn_tick(void *);
152
153static void xn_intr(void *);
154static inline int xn_count_frags(struct mbuf *m);
155static int  xn_assemble_tx_request(struct netfront_info *sc,
156				   struct mbuf *m_head);
157static void xn_start_locked(struct ifnet *);
158static void xn_start(struct ifnet *);
159static int  xn_ioctl(struct ifnet *, u_long, caddr_t);
160static void xn_ifinit_locked(struct netfront_info *);
161static void xn_ifinit(void *);
162static void xn_stop(struct netfront_info *);
163static void xn_query_features(struct netfront_info *np);
164static int  xn_configure_features(struct netfront_info *np);
165#ifdef notyet
166static void xn_watchdog(struct ifnet *);
167#endif
168
169static void show_device(struct netfront_info *sc);
170#ifdef notyet
171static void netfront_closing(device_t dev);
172#endif
173static void netif_free(struct netfront_info *info);
174static int netfront_detach(device_t dev);
175
176static int talk_to_backend(device_t dev, struct netfront_info *info);
177static int create_netdev(device_t dev);
178static void netif_disconnect_backend(struct netfront_info *info);
179static int setup_device(device_t dev, struct netfront_info *info);
180static void free_ring(int *ref, void *ring_ptr_ref);
181
182static int  xn_ifmedia_upd(struct ifnet *ifp);
183static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
184
185/* Xenolinux helper functions */
186int network_connect(struct netfront_info *);
187
188static void xn_free_rx_ring(struct netfront_info *);
189
190static void xn_free_tx_ring(struct netfront_info *);
191
192static int xennet_get_responses(struct netfront_info *np,
193	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
194	struct mbuf **list, int *pages_flipped_p);
195
196#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
197
198#define INVALID_P2M_ENTRY (~0UL)
199
200/*
201 * Mbuf pointers. We need these to keep track of the virtual addresses
202 * of our mbuf chains since we can only convert from virtual to physical,
203 * not the other way around.  The size must track the free index arrays.
204 */
205struct xn_chain_data {
206	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
207	int		xn_tx_chain_cnt;
208	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
209};
210
211#define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x))
212
213struct net_device_stats
214{
215	u_long	rx_packets;		/* total packets received	*/
216	u_long	tx_packets;		/* total packets transmitted	*/
217	u_long	rx_bytes;		/* total bytes received 	*/
218	u_long	tx_bytes;		/* total bytes transmitted	*/
219	u_long	rx_errors;		/* bad packets received		*/
220	u_long	tx_errors;		/* packet transmit problems	*/
221	u_long	rx_dropped;		/* no space in linux buffers	*/
222	u_long	tx_dropped;		/* no space available in linux	*/
223	u_long	multicast;		/* multicast packets received	*/
224	u_long	collisions;
225
226	/* detailed rx_errors: */
227	u_long	rx_length_errors;
228	u_long	rx_over_errors;		/* receiver ring buff overflow	*/
229	u_long	rx_crc_errors;		/* recved pkt with crc error	*/
230	u_long	rx_frame_errors;	/* recv'd frame alignment error */
231	u_long	rx_fifo_errors;		/* recv'r fifo overrun		*/
232	u_long	rx_missed_errors;	/* receiver missed packet	*/
233
234	/* detailed tx_errors */
235	u_long	tx_aborted_errors;
236	u_long	tx_carrier_errors;
237	u_long	tx_fifo_errors;
238	u_long	tx_heartbeat_errors;
239	u_long	tx_window_errors;
240
241	/* for cslip etc */
242	u_long	rx_compressed;
243	u_long	tx_compressed;
244};
245
246struct netfront_info {
247
248	struct ifnet *xn_ifp;
249#if __FreeBSD_version >= 700000
250	struct lro_ctrl xn_lro;
251#endif
252
253	struct net_device_stats stats;
254	u_int tx_full;
255
256	netif_tx_front_ring_t tx;
257	netif_rx_front_ring_t rx;
258
259	struct mtx   tx_lock;
260	struct mtx   rx_lock;
261	struct mtx   sc_lock;
262
263	u_int handle;
264	u_int irq;
265	u_int copying_receiver;
266	u_int carrier;
267	u_int maxfrags;
268
269	/* Receive-ring batched refills. */
270#define RX_MIN_TARGET 32
271#define RX_MAX_TARGET NET_RX_RING_SIZE
272	int rx_min_target;
273	int rx_max_target;
274	int rx_target;
275
276	grant_ref_t gref_tx_head;
277	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
278	grant_ref_t gref_rx_head;
279	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
280
281	device_t		xbdev;
282	int			tx_ring_ref;
283	int			rx_ring_ref;
284	uint8_t			mac[ETHER_ADDR_LEN];
285	struct xn_chain_data	xn_cdata;	/* mbufs */
286	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
287
288	int			xn_if_flags;
289	struct callout	        xn_stat_ch;
290
291	u_long			rx_pfn_array[NET_RX_RING_SIZE];
292	multicall_entry_t	rx_mcl[NET_RX_RING_SIZE+1];
293	mmu_update_t		rx_mmu[NET_RX_RING_SIZE];
294	struct ifmedia		sc_media;
295};
296
297#define rx_mbufs xn_cdata.xn_rx_chain
298#define tx_mbufs xn_cdata.xn_tx_chain
299
300#define XN_LOCK_INIT(_sc, _name) \
301        mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
302        mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF);  \
303        mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
304
305#define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
306#define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
307
308#define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
309#define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
310
311#define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
312#define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
313
314#define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
315#define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
316#define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
317#define XN_LOCK_DESTROY(_sc)   mtx_destroy(&(_sc)->rx_lock); \
318                               mtx_destroy(&(_sc)->tx_lock); \
319                               mtx_destroy(&(_sc)->sc_lock);
320
321struct netfront_rx_info {
322	struct netif_rx_response rx;
323	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
324};
325
326#define netfront_carrier_on(netif)	((netif)->carrier = 1)
327#define netfront_carrier_off(netif)	((netif)->carrier = 0)
328#define netfront_carrier_ok(netif)	((netif)->carrier)
329
330/* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
331
332
333
334/*
335 * Access macros for acquiring freeing slots in tx_skbs[].
336 */
337
338static inline void
339add_id_to_freelist(struct mbuf **list, uintptr_t id)
340{
341	KASSERT(id != 0,
342		("%s: the head item (0) must always be free.", __func__));
343	list[id] = list[0];
344	list[0]  = (struct mbuf *)id;
345}
346
347static inline unsigned short
348get_id_from_freelist(struct mbuf **list)
349{
350	uintptr_t id;
351
352	id = (uintptr_t)list[0];
353	KASSERT(id != 0,
354		("%s: the head item (0) must always remain free.", __func__));
355	list[0] = list[id];
356	return (id);
357}
358
359static inline int
360xennet_rxidx(RING_IDX idx)
361{
362	return idx & (NET_RX_RING_SIZE - 1);
363}
364
365static inline struct mbuf *
366xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
367{
368	int i = xennet_rxidx(ri);
369	struct mbuf *m;
370
371	m = np->rx_mbufs[i];
372	np->rx_mbufs[i] = NULL;
373	return (m);
374}
375
376static inline grant_ref_t
377xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
378{
379	int i = xennet_rxidx(ri);
380	grant_ref_t ref = np->grant_rx_ref[i];
381	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
382	np->grant_rx_ref[i] = GRANT_REF_INVALID;
383	return ref;
384}
385
386#define IPRINTK(fmt, args...) \
387    printf("[XEN] " fmt, ##args)
388#ifdef INVARIANTS
389#define WPRINTK(fmt, args...) \
390    printf("[XEN] " fmt, ##args)
391#else
392#define WPRINTK(fmt, args...)
393#endif
394#ifdef DEBUG
395#define DPRINTK(fmt, args...) \
396    printf("[XEN] %s: " fmt, __func__, ##args)
397#else
398#define DPRINTK(fmt, args...)
399#endif
400
401/**
402 * Read the 'mac' node at the given device's node in the store, and parse that
403 * as colon-separated octets, placing result the given mac array.  mac must be
404 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
405 * Return 0 on success, or errno on error.
406 */
407static int
408xen_net_read_mac(device_t dev, uint8_t mac[])
409{
410	int error, i;
411	char *s, *e, *macstr;
412	const char *path;
413
414	path = xenbus_get_node(dev);
415	error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
416	if (error == ENOENT) {
417		/*
418		 * Deal with missing mac XenStore nodes on devices with
419		 * HVM emulation (the 'ioemu' configuration attribute)
420		 * enabled.
421		 *
422		 * The HVM emulator may execute in a stub device model
423		 * domain which lacks the permission, only given to Dom0,
424		 * to update the guest's XenStore tree.  For this reason,
425		 * the HVM emulator doesn't even attempt to write the
426		 * front-side mac node, even when operating in Dom0.
427		 * However, there should always be a mac listed in the
428		 * backend tree.  Fallback to this version if our query
429		 * of the front side XenStore location doesn't find
430		 * anything.
431		 */
432		path = xenbus_get_otherend_path(dev);
433		error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
434	}
435	if (error != 0) {
436		xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
437		return (error);
438	}
439
440	s = macstr;
441	for (i = 0; i < ETHER_ADDR_LEN; i++) {
442		mac[i] = strtoul(s, &e, 16);
443		if (s == e || (e[0] != ':' && e[0] != 0)) {
444			free(macstr, M_XENBUS);
445			return (ENOENT);
446		}
447		s = &e[1];
448	}
449	free(macstr, M_XENBUS);
450	return (0);
451}
452
453/**
454 * Entry point to this code when a new device is created.  Allocate the basic
455 * structures and the ring buffers for communication with the backend, and
456 * inform the backend of the appropriate details for those.  Switch to
457 * Connected state.
458 */
459static int
460netfront_probe(device_t dev)
461{
462
463	if (!strcmp(xenbus_get_type(dev), "vif")) {
464		device_set_desc(dev, "Virtual Network Interface");
465		return (0);
466	}
467
468	return (ENXIO);
469}
470
471static int
472netfront_attach(device_t dev)
473{
474	int err;
475
476	err = create_netdev(dev);
477	if (err) {
478		xenbus_dev_fatal(dev, err, "creating netdev");
479		return (err);
480	}
481
482#if __FreeBSD_version >= 700000
483	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
484	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
485	    OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
486	    &xn_enable_lro, 0, "Large Receive Offload");
487#endif
488
489	return (0);
490}
491
492static int
493netfront_suspend(device_t dev)
494{
495	struct netfront_info *info = device_get_softc(dev);
496
497	XN_RX_LOCK(info);
498	XN_TX_LOCK(info);
499	netfront_carrier_off(info);
500	XN_TX_UNLOCK(info);
501	XN_RX_UNLOCK(info);
502	return (0);
503}
504
505/**
506 * We are reconnecting to the backend, due to a suspend/resume, or a backend
507 * driver restart.  We tear down our netif structure and recreate it, but
508 * leave the device-layer structures intact so that this is transparent to the
509 * rest of the kernel.
510 */
511static int
512netfront_resume(device_t dev)
513{
514	struct netfront_info *info = device_get_softc(dev);
515
516	netif_disconnect_backend(info);
517	return (0);
518}
519
520
521/* Common code used when first setting up, and when resuming. */
522static int
523talk_to_backend(device_t dev, struct netfront_info *info)
524{
525	const char *message;
526	struct xs_transaction xst;
527	const char *node = xenbus_get_node(dev);
528	int err;
529
530	err = xen_net_read_mac(dev, info->mac);
531	if (err) {
532		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
533		goto out;
534	}
535
536	/* Create shared ring, alloc event channel. */
537	err = setup_device(dev, info);
538	if (err)
539		goto out;
540
541 again:
542	err = xs_transaction_start(&xst);
543	if (err) {
544		xenbus_dev_fatal(dev, err, "starting transaction");
545		goto destroy_ring;
546	}
547	err = xs_printf(xst, node, "tx-ring-ref","%u",
548			info->tx_ring_ref);
549	if (err) {
550		message = "writing tx ring-ref";
551		goto abort_transaction;
552	}
553	err = xs_printf(xst, node, "rx-ring-ref","%u",
554			info->rx_ring_ref);
555	if (err) {
556		message = "writing rx ring-ref";
557		goto abort_transaction;
558	}
559	err = xs_printf(xst, node,
560			"event-channel", "%u", irq_to_evtchn_port(info->irq));
561	if (err) {
562		message = "writing event-channel";
563		goto abort_transaction;
564	}
565	err = xs_printf(xst, node, "request-rx-copy", "%u",
566			info->copying_receiver);
567	if (err) {
568		message = "writing request-rx-copy";
569		goto abort_transaction;
570	}
571	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
572	if (err) {
573		message = "writing feature-rx-notify";
574		goto abort_transaction;
575	}
576	err = xs_printf(xst, node, "feature-sg", "%d", 1);
577	if (err) {
578		message = "writing feature-sg";
579		goto abort_transaction;
580	}
581#if __FreeBSD_version >= 700000
582	err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
583	if (err) {
584		message = "writing feature-gso-tcpv4";
585		goto abort_transaction;
586	}
587#endif
588
589	err = xs_transaction_end(xst, 0);
590	if (err) {
591		if (err == EAGAIN)
592			goto again;
593		xenbus_dev_fatal(dev, err, "completing transaction");
594		goto destroy_ring;
595	}
596
597	return 0;
598
599 abort_transaction:
600	xs_transaction_end(xst, 1);
601	xenbus_dev_fatal(dev, err, "%s", message);
602 destroy_ring:
603	netif_free(info);
604 out:
605	return err;
606}
607
608
609static int
610setup_device(device_t dev, struct netfront_info *info)
611{
612	netif_tx_sring_t *txs;
613	netif_rx_sring_t *rxs;
614	int error;
615	struct ifnet *ifp;
616
617	ifp = info->xn_ifp;
618
619	info->tx_ring_ref = GRANT_REF_INVALID;
620	info->rx_ring_ref = GRANT_REF_INVALID;
621	info->rx.sring = NULL;
622	info->tx.sring = NULL;
623	info->irq = 0;
624
625	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
626	if (!txs) {
627		error = ENOMEM;
628		xenbus_dev_fatal(dev, error, "allocating tx ring page");
629		goto fail;
630	}
631	SHARED_RING_INIT(txs);
632	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
633	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
634	if (error)
635		goto fail;
636
637	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
638	if (!rxs) {
639		error = ENOMEM;
640		xenbus_dev_fatal(dev, error, "allocating rx ring page");
641		goto fail;
642	}
643	SHARED_RING_INIT(rxs);
644	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
645
646	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
647	if (error)
648		goto fail;
649
650	error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev),
651	    "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq);
652
653	if (error) {
654		xenbus_dev_fatal(dev, error,
655				 "bind_evtchn_to_irqhandler failed");
656		goto fail;
657	}
658
659	show_device(info);
660
661	return (0);
662
663 fail:
664	netif_free(info);
665	return (error);
666}
667
668#ifdef INET
669/**
670 * If this interface has an ipv4 address, send an arp for it. This
671 * helps to get the network going again after migrating hosts.
672 */
673static void
674netfront_send_fake_arp(device_t dev, struct netfront_info *info)
675{
676	struct ifnet *ifp;
677	struct ifaddr *ifa;
678
679	ifp = info->xn_ifp;
680	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
681		if (ifa->ifa_addr->sa_family == AF_INET) {
682			arp_ifinit(ifp, ifa);
683		}
684	}
685}
686#endif
687
688/**
689 * Callback received when the backend's state changes.
690 */
691static void
692netfront_backend_changed(device_t dev, XenbusState newstate)
693{
694	struct netfront_info *sc = device_get_softc(dev);
695
696	DPRINTK("newstate=%d\n", newstate);
697
698	switch (newstate) {
699	case XenbusStateInitialising:
700	case XenbusStateInitialised:
701	case XenbusStateConnected:
702	case XenbusStateUnknown:
703	case XenbusStateClosed:
704	case XenbusStateReconfigured:
705	case XenbusStateReconfiguring:
706		break;
707	case XenbusStateInitWait:
708		if (xenbus_get_state(dev) != XenbusStateInitialising)
709			break;
710		if (network_connect(sc) != 0)
711			break;
712		xenbus_set_state(dev, XenbusStateConnected);
713#ifdef INET
714		netfront_send_fake_arp(dev, sc);
715#endif
716		break;
717	case XenbusStateClosing:
718		xenbus_set_state(dev, XenbusStateClosed);
719		break;
720	}
721}
722
723static void
724xn_free_rx_ring(struct netfront_info *sc)
725{
726#if 0
727	int i;
728
729	for (i = 0; i < NET_RX_RING_SIZE; i++) {
730		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
731			m_freem(sc->rx_mbufs[i]);
732			sc->rx_mbufs[i] = NULL;
733		}
734	}
735
736	sc->rx.rsp_cons = 0;
737	sc->xn_rx_if->req_prod = 0;
738	sc->xn_rx_if->event = sc->rx.rsp_cons ;
739#endif
740}
741
742static void
743xn_free_tx_ring(struct netfront_info *sc)
744{
745#if 0
746	int i;
747
748	for (i = 0; i < NET_TX_RING_SIZE; i++) {
749		if (sc->tx_mbufs[i] != NULL) {
750			m_freem(sc->tx_mbufs[i]);
751			sc->xn_cdata.xn_tx_chain[i] = NULL;
752		}
753	}
754
755	return;
756#endif
757}
758
759/**
760 * \brief Verify that there is sufficient space in the Tx ring
761 *        buffer for a maximally sized request to be enqueued.
762 *
763 * A transmit request requires a transmit descriptor for each packet
764 * fragment, plus up to 2 entries for "options" (e.g. TSO).
765 */
766static inline int
767xn_tx_slot_available(struct netfront_info *np)
768{
769	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
770}
771
772static void
773netif_release_tx_bufs(struct netfront_info *np)
774{
775	int i;
776
777	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
778		struct mbuf *m;
779
780		m = np->tx_mbufs[i];
781
782		/*
783		 * We assume that no kernel addresses are
784		 * less than NET_TX_RING_SIZE.  Any entry
785		 * in the table that is below this number
786		 * must be an index from free-list tracking.
787		 */
788		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
789			continue;
790		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
791		gnttab_release_grant_reference(&np->gref_tx_head,
792		    np->grant_tx_ref[i]);
793		np->grant_tx_ref[i] = GRANT_REF_INVALID;
794		add_id_to_freelist(np->tx_mbufs, i);
795		np->xn_cdata.xn_tx_chain_cnt--;
796		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
797			panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0");
798		}
799		m_free(m);
800	}
801}
802
803static void
804network_alloc_rx_buffers(struct netfront_info *sc)
805{
806	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
807	unsigned short id;
808	struct mbuf *m_new;
809	int i, batch_target, notify;
810	RING_IDX req_prod;
811	struct xen_memory_reservation reservation;
812	grant_ref_t ref;
813	int nr_flips;
814	netif_rx_request_t *req;
815	vm_offset_t vaddr;
816	u_long pfn;
817
818	req_prod = sc->rx.req_prod_pvt;
819
820	if (unlikely(sc->carrier == 0))
821		return;
822
823	/*
824	 * Allocate mbufs greedily, even though we batch updates to the
825	 * receive ring. This creates a less bursty demand on the memory
826	 * allocator, and so should reduce the chance of failed allocation
827	 * requests both for ourself and for other kernel subsystems.
828	 *
829	 * Here we attempt to maintain rx_target buffers in flight, counting
830	 * buffers that we have yet to process in the receive ring.
831	 */
832	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
833	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
834		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
835		if (m_new == NULL) {
836			printf("%s: MGETHDR failed\n", __func__);
837			goto no_mbuf;
838		}
839
840		m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
841		if ((m_new->m_flags & M_EXT) == 0) {
842			printf("%s: m_cljget failed\n", __func__);
843			m_freem(m_new);
844
845no_mbuf:
846			if (i != 0)
847				goto refill;
848			/*
849			 * XXX set timer
850			 */
851			break;
852		}
853		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
854
855		/* queue the mbufs allocated */
856		mbufq_tail(&sc->xn_rx_batch, m_new);
857	}
858
859	/*
860	 * If we've allocated at least half of our target number of entries,
861	 * submit them to the backend - we have enough to make the overhead
862	 * of submission worthwhile.  Otherwise wait for more mbufs and
863	 * request entries to become available.
864	 */
865	if (i < (sc->rx_target/2)) {
866		if (req_prod >sc->rx.sring->req_prod)
867			goto push;
868		return;
869	}
870
871	/*
872	 * Double floating fill target if we risked having the backend
873	 * run out of empty buffers for receive traffic.  We define "running
874	 * low" as having less than a fourth of our target buffers free
875	 * at the time we refilled the queue.
876	 */
877	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
878		sc->rx_target *= 2;
879		if (sc->rx_target > sc->rx_max_target)
880			sc->rx_target = sc->rx_max_target;
881	}
882
883refill:
884	for (nr_flips = i = 0; ; i++) {
885		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
886			break;
887
888		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
889				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
890
891		id = xennet_rxidx(req_prod + i);
892
893		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
894		sc->rx_mbufs[id] = m_new;
895
896		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
897		KASSERT(ref != GNTTAB_LIST_END,
898			("reserved grant references exhuasted"));
899		sc->grant_rx_ref[id] = ref;
900
901		vaddr = mtod(m_new, vm_offset_t);
902		pfn = vtophys(vaddr) >> PAGE_SHIFT;
903		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
904
905		if (sc->copying_receiver == 0) {
906			gnttab_grant_foreign_transfer_ref(ref,
907			    otherend_id, pfn);
908			sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
909			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
910				/* Remove this page before passing
911				 * back to Xen.
912				 */
913				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
914				MULTI_update_va_mapping(&sc->rx_mcl[i],
915				    vaddr, 0, 0);
916			}
917			nr_flips++;
918		} else {
919			gnttab_grant_foreign_access_ref(ref,
920			    otherend_id,
921			    PFNTOMFN(pfn), 0);
922		}
923		req->id = id;
924		req->gref = ref;
925
926		sc->rx_pfn_array[i] =
927		    vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
928	}
929
930	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
931	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
932	/*
933	 * We may have allocated buffers which have entries outstanding
934	 * in the page * update queue -- make sure we flush those first!
935	 */
936	PT_UPDATES_FLUSH();
937	if (nr_flips != 0) {
938#ifdef notyet
939		/* Tell the ballon driver what is going on. */
940		balloon_update_driver_allowance(i);
941#endif
942		set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
943		reservation.nr_extents   = i;
944		reservation.extent_order = 0;
945		reservation.address_bits = 0;
946		reservation.domid        = DOMID_SELF;
947
948		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
949
950			/* After all PTEs have been zapped, flush the TLB. */
951			sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
952			    UVMF_TLB_FLUSH|UVMF_ALL;
953
954			/* Give away a batch of pages. */
955			sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
956			sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
957			sc->rx_mcl[i].args[1] =  (u_long)&reservation;
958			/* Zap PTEs and give away pages in one big multicall. */
959			(void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
960
961			/* Check return status of HYPERVISOR_dom_mem_op(). */
962			if (unlikely(sc->rx_mcl[i].result != i))
963				panic("Unable to reduce memory reservation\n");
964			} else {
965				if (HYPERVISOR_memory_op(
966				    XENMEM_decrease_reservation, &reservation)
967				    != i)
968					panic("Unable to reduce memory "
969					    "reservation\n");
970		}
971	} else {
972		wmb();
973	}
974
975	/* Above is a suitable barrier to ensure backend will see requests. */
976	sc->rx.req_prod_pvt = req_prod + i;
977push:
978	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
979	if (notify)
980		notify_remote_via_irq(sc->irq);
981}
982
983static void
984xn_rxeof(struct netfront_info *np)
985{
986	struct ifnet *ifp;
987#if __FreeBSD_version >= 700000
988	struct lro_ctrl *lro = &np->xn_lro;
989	struct lro_entry *queued;
990#endif
991	struct netfront_rx_info rinfo;
992	struct netif_rx_response *rx = &rinfo.rx;
993	struct netif_extra_info *extras = rinfo.extras;
994	RING_IDX i, rp;
995	multicall_entry_t *mcl;
996	struct mbuf *m;
997	struct mbuf_head rxq, errq;
998	int err, pages_flipped = 0, work_to_do;
999
1000	do {
1001		XN_RX_LOCK_ASSERT(np);
1002		if (!netfront_carrier_ok(np))
1003			return;
1004
1005		mbufq_init(&errq);
1006		mbufq_init(&rxq);
1007
1008		ifp = np->xn_ifp;
1009
1010		rp = np->rx.sring->rsp_prod;
1011		rmb();	/* Ensure we see queued responses up to 'rp'. */
1012
1013		i = np->rx.rsp_cons;
1014		while ((i != rp)) {
1015			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1016			memset(extras, 0, sizeof(rinfo.extras));
1017
1018			m = NULL;
1019			err = xennet_get_responses(np, &rinfo, rp, &i, &m,
1020			    &pages_flipped);
1021
1022			if (unlikely(err)) {
1023				if (m)
1024					mbufq_tail(&errq, m);
1025				np->stats.rx_errors++;
1026				continue;
1027			}
1028
1029			m->m_pkthdr.rcvif = ifp;
1030			if ( rx->flags & NETRXF_data_validated ) {
1031				/* Tell the stack the checksums are okay */
1032				/*
1033				 * XXX this isn't necessarily the case - need to add
1034				 * check
1035				 */
1036
1037				m->m_pkthdr.csum_flags |=
1038					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
1039					    | CSUM_PSEUDO_HDR);
1040				m->m_pkthdr.csum_data = 0xffff;
1041			}
1042
1043			np->stats.rx_packets++;
1044			np->stats.rx_bytes += m->m_pkthdr.len;
1045
1046			mbufq_tail(&rxq, m);
1047			np->rx.rsp_cons = i;
1048		}
1049
1050		if (pages_flipped) {
1051			/* Some pages are no longer absent... */
1052#ifdef notyet
1053			balloon_update_driver_allowance(-pages_flipped);
1054#endif
1055			/* Do all the remapping work, and M->P updates, in one big
1056			 * hypercall.
1057			 */
1058			if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
1059				mcl = np->rx_mcl + pages_flipped;
1060				mcl->op = __HYPERVISOR_mmu_update;
1061				mcl->args[0] = (u_long)np->rx_mmu;
1062				mcl->args[1] = pages_flipped;
1063				mcl->args[2] = 0;
1064				mcl->args[3] = DOMID_SELF;
1065				(void)HYPERVISOR_multicall(np->rx_mcl,
1066				    pages_flipped + 1);
1067			}
1068		}
1069
1070		while ((m = mbufq_dequeue(&errq)))
1071			m_freem(m);
1072
1073		/*
1074		 * Process all the mbufs after the remapping is complete.
1075		 * Break the mbuf chain first though.
1076		 */
1077		while ((m = mbufq_dequeue(&rxq)) != NULL) {
1078			ifp->if_ipackets++;
1079
1080			/*
1081			 * Do we really need to drop the rx lock?
1082			 */
1083			XN_RX_UNLOCK(np);
1084#if __FreeBSD_version >= 700000
1085			/* Use LRO if possible */
1086			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1087			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1088				/*
1089				 * If LRO fails, pass up to the stack
1090				 * directly.
1091				 */
1092				(*ifp->if_input)(ifp, m);
1093			}
1094#else
1095			(*ifp->if_input)(ifp, m);
1096#endif
1097			XN_RX_LOCK(np);
1098		}
1099
1100		np->rx.rsp_cons = i;
1101
1102#if __FreeBSD_version >= 700000
1103		/*
1104		 * Flush any outstanding LRO work
1105		 */
1106		while (!SLIST_EMPTY(&lro->lro_active)) {
1107			queued = SLIST_FIRST(&lro->lro_active);
1108			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1109			tcp_lro_flush(lro, queued);
1110		}
1111#endif
1112
1113#if 0
1114		/* If we get a callback with very few responses, reduce fill target. */
1115		/* NB. Note exponential increase, linear decrease. */
1116		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1117			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
1118			np->rx_target = np->rx_min_target;
1119#endif
1120
1121		network_alloc_rx_buffers(np);
1122
1123		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
1124	} while (work_to_do);
1125}
1126
1127static void
1128xn_txeof(struct netfront_info *np)
1129{
1130	RING_IDX i, prod;
1131	unsigned short id;
1132	struct ifnet *ifp;
1133	netif_tx_response_t *txr;
1134	struct mbuf *m;
1135
1136	XN_TX_LOCK_ASSERT(np);
1137
1138	if (!netfront_carrier_ok(np))
1139		return;
1140
1141	ifp = np->xn_ifp;
1142
1143	do {
1144		prod = np->tx.sring->rsp_prod;
1145		rmb(); /* Ensure we see responses up to 'rp'. */
1146
1147		for (i = np->tx.rsp_cons; i != prod; i++) {
1148			txr = RING_GET_RESPONSE(&np->tx, i);
1149			if (txr->status == NETIF_RSP_NULL)
1150				continue;
1151
1152			if (txr->status != NETIF_RSP_OKAY) {
1153				printf("%s: WARNING: response is %d!\n",
1154				       __func__, txr->status);
1155			}
1156			id = txr->id;
1157			m = np->tx_mbufs[id];
1158			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1159			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1160				("mbuf already on the free list, but we're "
1161				"trying to free it again!"));
1162			M_ASSERTVALID(m);
1163
1164			/*
1165			 * Increment packet count if this is the last
1166			 * mbuf of the chain.
1167			 */
1168			if (!m->m_next)
1169				ifp->if_opackets++;
1170			if (unlikely(gnttab_query_foreign_access(
1171			    np->grant_tx_ref[id]) != 0)) {
1172				panic("grant id %u still in use by the backend",
1173				      id);
1174			}
1175			gnttab_end_foreign_access_ref(
1176				np->grant_tx_ref[id]);
1177			gnttab_release_grant_reference(
1178				&np->gref_tx_head, np->grant_tx_ref[id]);
1179			np->grant_tx_ref[id] = GRANT_REF_INVALID;
1180
1181			np->tx_mbufs[id] = NULL;
1182			add_id_to_freelist(np->tx_mbufs, id);
1183			np->xn_cdata.xn_tx_chain_cnt--;
1184			m_free(m);
1185			/* Only mark the queue active if we've freed up at least one slot to try */
1186			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1187		}
1188		np->tx.rsp_cons = prod;
1189
1190		/*
1191		 * Set a new event, then check for race with update of
1192		 * tx_cons. Note that it is essential to schedule a
1193		 * callback, no matter how few buffers are pending. Even if
1194		 * there is space in the transmit ring, higher layers may
1195		 * be blocked because too much data is outstanding: in such
1196		 * cases notification from Xen is likely to be the only kick
1197		 * that we'll get.
1198		 */
1199		np->tx.sring->rsp_event =
1200		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1201
1202		mb();
1203	} while (prod != np->tx.sring->rsp_prod);
1204
1205	if (np->tx_full &&
1206	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1207		np->tx_full = 0;
1208#if 0
1209		if (np->user_state == UST_OPEN)
1210			netif_wake_queue(dev);
1211#endif
1212	}
1213
1214}
1215
1216static void
1217xn_intr(void *xsc)
1218{
1219	struct netfront_info *np = xsc;
1220	struct ifnet *ifp = np->xn_ifp;
1221
1222#if 0
1223	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1224	    likely(netfront_carrier_ok(np)) &&
1225	    ifp->if_drv_flags & IFF_DRV_RUNNING))
1226		return;
1227#endif
1228	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1229		XN_TX_LOCK(np);
1230		xn_txeof(np);
1231		XN_TX_UNLOCK(np);
1232	}
1233
1234	XN_RX_LOCK(np);
1235	xn_rxeof(np);
1236	XN_RX_UNLOCK(np);
1237
1238	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1239	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1240		xn_start(ifp);
1241}
1242
1243
1244static void
1245xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1246	grant_ref_t ref)
1247{
1248	int new = xennet_rxidx(np->rx.req_prod_pvt);
1249
1250	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1251	np->rx_mbufs[new] = m;
1252	np->grant_rx_ref[new] = ref;
1253	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1254	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1255	np->rx.req_prod_pvt++;
1256}
1257
1258static int
1259xennet_get_extras(struct netfront_info *np,
1260    struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1261{
1262	struct netif_extra_info *extra;
1263
1264	int err = 0;
1265
1266	do {
1267		struct mbuf *m;
1268		grant_ref_t ref;
1269
1270		if (unlikely(*cons + 1 == rp)) {
1271#if 0
1272			if (net_ratelimit())
1273				WPRINTK("Missing extra info\n");
1274#endif
1275			err = EINVAL;
1276			break;
1277		}
1278
1279		extra = (struct netif_extra_info *)
1280		RING_GET_RESPONSE(&np->rx, ++(*cons));
1281
1282		if (unlikely(!extra->type ||
1283			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1284#if 0
1285			if (net_ratelimit())
1286				WPRINTK("Invalid extra type: %d\n",
1287					extra->type);
1288#endif
1289			err = EINVAL;
1290		} else {
1291			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1292		}
1293
1294		m = xennet_get_rx_mbuf(np, *cons);
1295		ref = xennet_get_rx_ref(np, *cons);
1296		xennet_move_rx_slot(np, m, ref);
1297	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1298
1299	return err;
1300}
1301
1302static int
1303xennet_get_responses(struct netfront_info *np,
1304	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1305	struct mbuf  **list,
1306	int *pages_flipped_p)
1307{
1308	int pages_flipped = *pages_flipped_p;
1309	struct mmu_update *mmu;
1310	struct multicall_entry *mcl;
1311	struct netif_rx_response *rx = &rinfo->rx;
1312	struct netif_extra_info *extras = rinfo->extras;
1313	struct mbuf *m, *m0, *m_prev;
1314	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1315	RING_IDX ref_cons = *cons;
1316	int frags = 1;
1317	int err = 0;
1318	u_long ret;
1319
1320	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1321
1322
1323	if (rx->flags & NETRXF_extra_info) {
1324		err = xennet_get_extras(np, extras, rp, cons);
1325	}
1326
1327
1328	if (m0 != NULL) {
1329		m0->m_pkthdr.len = 0;
1330		m0->m_next = NULL;
1331	}
1332
1333	for (;;) {
1334		u_long mfn;
1335
1336#if 0
1337		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1338			rx->status, rx->offset, frags);
1339#endif
1340		if (unlikely(rx->status < 0 ||
1341			rx->offset + rx->status > PAGE_SIZE)) {
1342
1343#if 0
1344			if (net_ratelimit())
1345				WPRINTK("rx->offset: %x, size: %u\n",
1346					rx->offset, rx->status);
1347#endif
1348			xennet_move_rx_slot(np, m, ref);
1349			if (m0 == m)
1350				m0 = NULL;
1351			m = NULL;
1352			err = EINVAL;
1353			goto next_skip_queue;
1354		}
1355
1356		/*
1357		 * This definitely indicates a bug, either in this driver or in
1358		 * the backend driver. In future this should flag the bad
1359		 * situation to the system controller to reboot the backed.
1360		 */
1361		if (ref == GRANT_REF_INVALID) {
1362
1363#if 0
1364			if (net_ratelimit())
1365				WPRINTK("Bad rx response id %d.\n", rx->id);
1366#endif
1367			printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1368			err = EINVAL;
1369			goto next;
1370		}
1371
1372		if (!np->copying_receiver) {
1373			/* Memory pressure, insufficient buffer
1374			 * headroom, ...
1375			 */
1376			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1377				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
1378					rx->id, rx->status);
1379				xennet_move_rx_slot(np, m, ref);
1380				err = ENOMEM;
1381				goto next;
1382			}
1383
1384			if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1385				/* Remap the page. */
1386				void *vaddr = mtod(m, void *);
1387				uint32_t pfn;
1388
1389				mcl = np->rx_mcl + pages_flipped;
1390				mmu = np->rx_mmu + pages_flipped;
1391
1392				MULTI_update_va_mapping(mcl, (u_long)vaddr,
1393				    (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1394				    PG_V | PG_M | PG_A, 0);
1395				pfn = (uintptr_t)m->m_ext.ext_arg1;
1396				mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1397				    MMU_MACHPHYS_UPDATE;
1398				mmu->val = pfn;
1399
1400				set_phys_to_machine(pfn, mfn);
1401			}
1402			pages_flipped++;
1403		} else {
1404			ret = gnttab_end_foreign_access_ref(ref);
1405			KASSERT(ret, ("ret != 0"));
1406		}
1407
1408		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1409
1410next:
1411		if (m == NULL)
1412			break;
1413
1414		m->m_len = rx->status;
1415		m->m_data += rx->offset;
1416		m0->m_pkthdr.len += rx->status;
1417
1418next_skip_queue:
1419		if (!(rx->flags & NETRXF_more_data))
1420			break;
1421
1422		if (*cons + frags == rp) {
1423			if (net_ratelimit())
1424				WPRINTK("Need more frags\n");
1425			err = ENOENT;
1426			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1427			       __func__, *cons, frags, rp);
1428			break;
1429		}
1430		/*
1431		 * Note that m can be NULL, if rx->status < 0 or if
1432		 * rx->offset + rx->status > PAGE_SIZE above.
1433		 */
1434		m_prev = m;
1435
1436		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1437		m = xennet_get_rx_mbuf(np, *cons + frags);
1438
1439		/*
1440		 * m_prev == NULL can happen if rx->status < 0 or if
1441		 * rx->offset + * rx->status > PAGE_SIZE above.
1442		 */
1443		if (m_prev != NULL)
1444			m_prev->m_next = m;
1445
1446		/*
1447		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1448		 * rx->status > PAGE_SIZE above.
1449		 */
1450		if (m0 == NULL)
1451			m0 = m;
1452		m->m_next = NULL;
1453		ref = xennet_get_rx_ref(np, *cons + frags);
1454		ref_cons = *cons + frags;
1455		frags++;
1456	}
1457	*list = m0;
1458	*cons += frags;
1459	*pages_flipped_p = pages_flipped;
1460
1461	return (err);
1462}
1463
1464static void
1465xn_tick_locked(struct netfront_info *sc)
1466{
1467	XN_RX_LOCK_ASSERT(sc);
1468	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1469
1470	/* XXX placeholder for printing debug information */
1471
1472}
1473
1474
1475static void
1476xn_tick(void *xsc)
1477{
1478	struct netfront_info *sc;
1479
1480	sc = xsc;
1481	XN_RX_LOCK(sc);
1482	xn_tick_locked(sc);
1483	XN_RX_UNLOCK(sc);
1484
1485}
1486
1487/**
1488 * \brief Count the number of fragments in an mbuf chain.
1489 *
1490 * Surprisingly, there isn't an M* macro for this.
1491 */
1492static inline int
1493xn_count_frags(struct mbuf *m)
1494{
1495	int nfrags;
1496
1497	for (nfrags = 0; m != NULL; m = m->m_next)
1498		nfrags++;
1499
1500	return (nfrags);
1501}
1502
1503/**
1504 * Given an mbuf chain, make sure we have enough room and then push
1505 * it onto the transmit ring.
1506 */
1507static int
1508xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1509{
1510	struct ifnet *ifp;
1511	struct mbuf *m;
1512	u_int nfrags;
1513	netif_extra_info_t *extra;
1514	int otherend_id;
1515
1516	ifp = sc->xn_ifp;
1517
1518	/**
1519	 * Defragment the mbuf if necessary.
1520	 */
1521	nfrags = xn_count_frags(m_head);
1522
1523	/*
1524	 * Check to see whether this request is longer than netback
1525	 * can handle, and try to defrag it.
1526	 */
1527	/**
1528	 * It is a bit lame, but the netback driver in Linux can't
1529	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1530	 * the Linux network stack.
1531	 */
1532	if (nfrags > sc->maxfrags) {
1533		m = m_defrag(m_head, M_DONTWAIT);
1534		if (!m) {
1535			/*
1536			 * Defrag failed, so free the mbuf and
1537			 * therefore drop the packet.
1538			 */
1539			m_freem(m_head);
1540			return (EMSGSIZE);
1541		}
1542		m_head = m;
1543	}
1544
1545	/* Determine how many fragments now exist */
1546	nfrags = xn_count_frags(m_head);
1547
1548	/*
1549	 * Check to see whether the defragmented packet has too many
1550	 * segments for the Linux netback driver.
1551	 */
1552	/**
1553	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1554	 * of mbufs longer than Linux can handle.  Make sure we don't
1555	 * pass a too-long chain over to the other side by dropping the
1556	 * packet.  It doesn't look like there is currently a way to
1557	 * tell the TCP stack to generate a shorter chain of packets.
1558	 */
1559	if (nfrags > MAX_TX_REQ_FRAGS) {
1560#ifdef DEBUG
1561		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1562		       "won't be able to handle it, dropping\n",
1563		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1564#endif
1565		m_freem(m_head);
1566		return (EMSGSIZE);
1567	}
1568
1569	/*
1570	 * This check should be redundant.  We've already verified that we
1571	 * have enough slots in the ring to handle a packet of maximum
1572	 * size, and that our packet is less than the maximum size.  Keep
1573	 * it in here as an assert for now just to make certain that
1574	 * xn_tx_chain_cnt is accurate.
1575	 */
1576	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1577		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1578		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1579                    (int) nfrags, (int) NET_TX_RING_SIZE));
1580
1581	/*
1582	 * Start packing the mbufs in this chain into
1583	 * the fragment pointers. Stop when we run out
1584	 * of fragments or hit the end of the mbuf chain.
1585	 */
1586	m = m_head;
1587	extra = NULL;
1588	otherend_id = xenbus_get_otherend_id(sc->xbdev);
1589	for (m = m_head; m; m = m->m_next) {
1590		netif_tx_request_t *tx;
1591		uintptr_t id;
1592		grant_ref_t ref;
1593		u_long mfn; /* XXX Wrong type? */
1594
1595		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1596		id = get_id_from_freelist(sc->tx_mbufs);
1597		if (id == 0)
1598			panic("xn_start_locked: was allocated the freelist head!\n");
1599		sc->xn_cdata.xn_tx_chain_cnt++;
1600		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1601			panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n");
1602		sc->tx_mbufs[id] = m;
1603		tx->id = id;
1604		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1605		KASSERT((short)ref >= 0, ("Negative ref"));
1606		mfn = virt_to_mfn(mtod(m, vm_offset_t));
1607		gnttab_grant_foreign_access_ref(ref, otherend_id,
1608		    mfn, GNTMAP_readonly);
1609		tx->gref = sc->grant_tx_ref[id] = ref;
1610		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1611		tx->flags = 0;
1612		if (m == m_head) {
1613			/*
1614			 * The first fragment has the entire packet
1615			 * size, subsequent fragments have just the
1616			 * fragment size. The backend works out the
1617			 * true size of the first fragment by
1618			 * subtracting the sizes of the other
1619			 * fragments.
1620			 */
1621			tx->size = m->m_pkthdr.len;
1622
1623			/*
1624			 * The first fragment contains the checksum flags
1625			 * and is optionally followed by extra data for
1626			 * TSO etc.
1627			 */
1628			/**
1629			 * CSUM_TSO requires checksum offloading.
1630			 * Some versions of FreeBSD fail to
1631			 * set CSUM_TCP in the CSUM_TSO case,
1632			 * so we have to test for CSUM_TSO
1633			 * explicitly.
1634			 */
1635			if (m->m_pkthdr.csum_flags
1636			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
1637				tx->flags |= (NETTXF_csum_blank
1638				    | NETTXF_data_validated);
1639			}
1640#if __FreeBSD_version >= 700000
1641			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1642				struct netif_extra_info *gso =
1643					(struct netif_extra_info *)
1644					RING_GET_REQUEST(&sc->tx,
1645							 ++sc->tx.req_prod_pvt);
1646
1647				tx->flags |= NETTXF_extra_info;
1648
1649				gso->u.gso.size = m->m_pkthdr.tso_segsz;
1650				gso->u.gso.type =
1651					XEN_NETIF_GSO_TYPE_TCPV4;
1652				gso->u.gso.pad = 0;
1653				gso->u.gso.features = 0;
1654
1655				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1656				gso->flags = 0;
1657			}
1658#endif
1659		} else {
1660			tx->size = m->m_len;
1661		}
1662		if (m->m_next)
1663			tx->flags |= NETTXF_more_data;
1664
1665		sc->tx.req_prod_pvt++;
1666	}
1667	BPF_MTAP(ifp, m_head);
1668
1669	sc->stats.tx_bytes += m_head->m_pkthdr.len;
1670	sc->stats.tx_packets++;
1671
1672	return (0);
1673}
1674
1675static void
1676xn_start_locked(struct ifnet *ifp)
1677{
1678	struct netfront_info *sc;
1679	struct mbuf *m_head;
1680	int notify;
1681
1682	sc = ifp->if_softc;
1683
1684	if (!netfront_carrier_ok(sc))
1685		return;
1686
1687	/*
1688	 * While we have enough transmit slots available for at least one
1689	 * maximum-sized packet, pull mbufs off the queue and put them on
1690	 * the transmit ring.
1691	 */
1692	while (xn_tx_slot_available(sc)) {
1693		IF_DEQUEUE(&ifp->if_snd, m_head);
1694		if (m_head == NULL)
1695			break;
1696
1697		if (xn_assemble_tx_request(sc, m_head) != 0)
1698			break;
1699	}
1700
1701	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1702	if (notify)
1703		notify_remote_via_irq(sc->irq);
1704
1705	if (RING_FULL(&sc->tx)) {
1706		sc->tx_full = 1;
1707#if 0
1708		netif_stop_queue(dev);
1709#endif
1710	}
1711}
1712
1713
1714static void
1715xn_start(struct ifnet *ifp)
1716{
1717	struct netfront_info *sc;
1718	sc = ifp->if_softc;
1719	XN_TX_LOCK(sc);
1720	xn_start_locked(ifp);
1721	XN_TX_UNLOCK(sc);
1722}
1723
1724/* equivalent of network_open() in Linux */
1725static void
1726xn_ifinit_locked(struct netfront_info *sc)
1727{
1728	struct ifnet *ifp;
1729
1730	XN_LOCK_ASSERT(sc);
1731
1732	ifp = sc->xn_ifp;
1733
1734	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1735		return;
1736
1737	xn_stop(sc);
1738
1739	network_alloc_rx_buffers(sc);
1740	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1741
1742	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1743	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1744	if_link_state_change(ifp, LINK_STATE_UP);
1745
1746	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1747
1748}
1749
1750
1751static void
1752xn_ifinit(void *xsc)
1753{
1754	struct netfront_info *sc = xsc;
1755
1756	XN_LOCK(sc);
1757	xn_ifinit_locked(sc);
1758	XN_UNLOCK(sc);
1759
1760}
1761
1762
1763static int
1764xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1765{
1766	struct netfront_info *sc = ifp->if_softc;
1767	struct ifreq *ifr = (struct ifreq *) data;
1768#ifdef INET
1769	struct ifaddr *ifa = (struct ifaddr *)data;
1770#endif
1771
1772	int mask, error = 0;
1773	switch(cmd) {
1774	case SIOCSIFADDR:
1775	case SIOCGIFADDR:
1776#ifdef INET
1777		XN_LOCK(sc);
1778		if (ifa->ifa_addr->sa_family == AF_INET) {
1779			ifp->if_flags |= IFF_UP;
1780			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1781				xn_ifinit_locked(sc);
1782			arp_ifinit(ifp, ifa);
1783			XN_UNLOCK(sc);
1784		} else {
1785			XN_UNLOCK(sc);
1786#endif
1787			error = ether_ioctl(ifp, cmd, data);
1788#ifdef INET
1789		}
1790#endif
1791		break;
1792	case SIOCSIFMTU:
1793		/* XXX can we alter the MTU on a VN ?*/
1794#ifdef notyet
1795		if (ifr->ifr_mtu > XN_JUMBO_MTU)
1796			error = EINVAL;
1797		else
1798#endif
1799		{
1800			ifp->if_mtu = ifr->ifr_mtu;
1801			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1802			xn_ifinit(sc);
1803		}
1804		break;
1805	case SIOCSIFFLAGS:
1806		XN_LOCK(sc);
1807		if (ifp->if_flags & IFF_UP) {
1808			/*
1809			 * If only the state of the PROMISC flag changed,
1810			 * then just use the 'set promisc mode' command
1811			 * instead of reinitializing the entire NIC. Doing
1812			 * a full re-init means reloading the firmware and
1813			 * waiting for it to start up, which may take a
1814			 * second or two.
1815			 */
1816#ifdef notyet
1817			/* No promiscuous mode with Xen */
1818			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1819			    ifp->if_flags & IFF_PROMISC &&
1820			    !(sc->xn_if_flags & IFF_PROMISC)) {
1821				XN_SETBIT(sc, XN_RX_MODE,
1822					  XN_RXMODE_RX_PROMISC);
1823			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1824				   !(ifp->if_flags & IFF_PROMISC) &&
1825				   sc->xn_if_flags & IFF_PROMISC) {
1826				XN_CLRBIT(sc, XN_RX_MODE,
1827					  XN_RXMODE_RX_PROMISC);
1828			} else
1829#endif
1830				xn_ifinit_locked(sc);
1831		} else {
1832			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1833				xn_stop(sc);
1834			}
1835		}
1836		sc->xn_if_flags = ifp->if_flags;
1837		XN_UNLOCK(sc);
1838		error = 0;
1839		break;
1840	case SIOCSIFCAP:
1841		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1842		if (mask & IFCAP_TXCSUM) {
1843			if (IFCAP_TXCSUM & ifp->if_capenable) {
1844				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1845				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1846				    | CSUM_IP | CSUM_TSO);
1847			} else {
1848				ifp->if_capenable |= IFCAP_TXCSUM;
1849				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1850				    | CSUM_IP);
1851			}
1852		}
1853		if (mask & IFCAP_RXCSUM) {
1854			ifp->if_capenable ^= IFCAP_RXCSUM;
1855		}
1856#if __FreeBSD_version >= 700000
1857		if (mask & IFCAP_TSO4) {
1858			if (IFCAP_TSO4 & ifp->if_capenable) {
1859				ifp->if_capenable &= ~IFCAP_TSO4;
1860				ifp->if_hwassist &= ~CSUM_TSO;
1861			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1862				ifp->if_capenable |= IFCAP_TSO4;
1863				ifp->if_hwassist |= CSUM_TSO;
1864			} else {
1865				IPRINTK("Xen requires tx checksum offload"
1866				    " be enabled to use TSO\n");
1867				error = EINVAL;
1868			}
1869		}
1870		if (mask & IFCAP_LRO) {
1871			ifp->if_capenable ^= IFCAP_LRO;
1872
1873		}
1874#endif
1875		error = 0;
1876		break;
1877	case SIOCADDMULTI:
1878	case SIOCDELMULTI:
1879#ifdef notyet
1880		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1881			XN_LOCK(sc);
1882			xn_setmulti(sc);
1883			XN_UNLOCK(sc);
1884			error = 0;
1885		}
1886#endif
1887		/* FALLTHROUGH */
1888	case SIOCSIFMEDIA:
1889	case SIOCGIFMEDIA:
1890		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1891		break;
1892	default:
1893		error = ether_ioctl(ifp, cmd, data);
1894	}
1895
1896	return (error);
1897}
1898
1899static void
1900xn_stop(struct netfront_info *sc)
1901{
1902	struct ifnet *ifp;
1903
1904	XN_LOCK_ASSERT(sc);
1905
1906	ifp = sc->xn_ifp;
1907
1908	callout_stop(&sc->xn_stat_ch);
1909
1910	xn_free_rx_ring(sc);
1911	xn_free_tx_ring(sc);
1912
1913	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1914	if_link_state_change(ifp, LINK_STATE_DOWN);
1915}
1916
1917/* START of Xenolinux helper functions adapted to FreeBSD */
1918int
1919network_connect(struct netfront_info *np)
1920{
1921	int i, requeue_idx, error;
1922	grant_ref_t ref;
1923	netif_rx_request_t *req;
1924	u_int feature_rx_copy, feature_rx_flip;
1925
1926	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1927	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1928	if (error)
1929		feature_rx_copy = 0;
1930	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1931	    "feature-rx-flip", NULL, "%u", &feature_rx_flip);
1932	if (error)
1933		feature_rx_flip = 1;
1934
1935	/*
1936	 * Copy packets on receive path if:
1937	 *  (a) This was requested by user, and the backend supports it; or
1938	 *  (b) Flipping was requested, but this is unsupported by the backend.
1939	 */
1940	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1941				(MODPARM_rx_flip && !feature_rx_flip));
1942
1943	/* Recovery procedure: */
1944	error = talk_to_backend(np->xbdev, np);
1945	if (error)
1946		return (error);
1947
1948	/* Step 1: Reinitialise variables. */
1949	xn_query_features(np);
1950	xn_configure_features(np);
1951	netif_release_tx_bufs(np);
1952
1953	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1954	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1955		struct mbuf *m;
1956		u_long pfn;
1957
1958		if (np->rx_mbufs[i] == NULL)
1959			continue;
1960
1961		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1962		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1963
1964		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1965		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1966
1967		if (!np->copying_receiver) {
1968			gnttab_grant_foreign_transfer_ref(ref,
1969			    xenbus_get_otherend_id(np->xbdev),
1970			    pfn);
1971		} else {
1972			gnttab_grant_foreign_access_ref(ref,
1973			    xenbus_get_otherend_id(np->xbdev),
1974			    PFNTOMFN(pfn), 0);
1975		}
1976		req->gref = ref;
1977		req->id   = requeue_idx;
1978
1979		requeue_idx++;
1980	}
1981
1982	np->rx.req_prod_pvt = requeue_idx;
1983
1984	/* Step 3: All public and private state should now be sane.  Get
1985	 * ready to start sending and receiving packets and give the driver
1986	 * domain a kick because we've probably just requeued some
1987	 * packets.
1988	 */
1989	netfront_carrier_on(np);
1990	notify_remote_via_irq(np->irq);
1991	XN_TX_LOCK(np);
1992	xn_txeof(np);
1993	XN_TX_UNLOCK(np);
1994	network_alloc_rx_buffers(np);
1995
1996	return (0);
1997}
1998
1999static void
2000show_device(struct netfront_info *sc)
2001{
2002#ifdef DEBUG
2003	if (sc) {
2004		IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
2005			sc->xn_ifno,
2006			be_state_name[sc->xn_backend_state],
2007			sc->xn_user_state ? "open" : "closed",
2008			sc->xn_evtchn,
2009			sc->xn_irq,
2010			sc->xn_tx_if,
2011			sc->xn_rx_if);
2012	} else {
2013		IPRINTK("<vif NULL>\n");
2014	}
2015#endif
2016}
2017
2018static void
2019xn_query_features(struct netfront_info *np)
2020{
2021	int val;
2022
2023	device_printf(np->xbdev, "backend features:");
2024
2025	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2026		"feature-sg", NULL, "%d", &val) < 0)
2027		val = 0;
2028
2029	np->maxfrags = 1;
2030	if (val) {
2031		np->maxfrags = MAX_TX_REQ_FRAGS;
2032		printf(" feature-sg");
2033	}
2034
2035	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2036		"feature-gso-tcpv4", NULL, "%d", &val) < 0)
2037		val = 0;
2038
2039	np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
2040	if (val) {
2041		np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
2042		printf(" feature-gso-tcp4");
2043	}
2044
2045	printf("\n");
2046}
2047
2048static int
2049xn_configure_features(struct netfront_info *np)
2050{
2051	int err;
2052
2053	err = 0;
2054#if __FreeBSD_version >= 700000
2055	if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0)
2056		tcp_lro_free(&np->xn_lro);
2057#endif
2058    	np->xn_ifp->if_capenable =
2059	    np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4);
2060	np->xn_ifp->if_hwassist &= ~CSUM_TSO;
2061#if __FreeBSD_version >= 700000
2062	if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) != 0) {
2063		err = tcp_lro_init(&np->xn_lro);
2064		if (err) {
2065			device_printf(np->xbdev, "LRO initialization failed\n");
2066		} else {
2067			np->xn_lro.ifp = np->xn_ifp;
2068			np->xn_ifp->if_capenable |= IFCAP_LRO;
2069		}
2070	}
2071	if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) != 0) {
2072		np->xn_ifp->if_capenable |= IFCAP_TSO4;
2073		np->xn_ifp->if_hwassist |= CSUM_TSO;
2074	}
2075#endif
2076	return (err);
2077}
2078
2079/** Create a network device.
2080 * @param handle device handle
2081 */
2082int
2083create_netdev(device_t dev)
2084{
2085	int i;
2086	struct netfront_info *np;
2087	int err;
2088	struct ifnet *ifp;
2089
2090	np = device_get_softc(dev);
2091
2092	np->xbdev         = dev;
2093
2094	XN_LOCK_INIT(np, xennetif);
2095
2096	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2097	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2098	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2099
2100	np->rx_target     = RX_MIN_TARGET;
2101	np->rx_min_target = RX_MIN_TARGET;
2102	np->rx_max_target = RX_MAX_TARGET;
2103
2104	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
2105	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2106		np->tx_mbufs[i] = (void *) ((u_long) i+1);
2107		np->grant_tx_ref[i] = GRANT_REF_INVALID;
2108	}
2109	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2110
2111	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2112
2113		np->rx_mbufs[i] = NULL;
2114		np->grant_rx_ref[i] = GRANT_REF_INVALID;
2115	}
2116	/* A grant for every tx ring slot */
2117	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2118					  &np->gref_tx_head) != 0) {
2119		IPRINTK("#### netfront can't alloc tx grant refs\n");
2120		err = ENOMEM;
2121		goto exit;
2122	}
2123	/* A grant for every rx ring slot */
2124	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2125					  &np->gref_rx_head) != 0) {
2126		WPRINTK("#### netfront can't alloc rx grant refs\n");
2127		gnttab_free_grant_references(np->gref_tx_head);
2128		err = ENOMEM;
2129		goto exit;
2130	}
2131
2132	err = xen_net_read_mac(dev, np->mac);
2133	if (err)
2134		goto out;
2135
2136	/* Set up ifnet structure */
2137	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2138    	ifp->if_softc = np;
2139    	if_initname(ifp, "xn",  device_get_unit(dev));
2140    	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2141    	ifp->if_ioctl = xn_ioctl;
2142    	ifp->if_output = ether_output;
2143    	ifp->if_start = xn_start;
2144#ifdef notyet
2145    	ifp->if_watchdog = xn_watchdog;
2146#endif
2147    	ifp->if_init = xn_ifinit;
2148    	ifp->if_mtu = ETHERMTU;
2149    	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
2150
2151    	ifp->if_hwassist = XN_CSUM_FEATURES;
2152    	ifp->if_capabilities = IFCAP_HWCSUM;
2153
2154    	ether_ifattach(ifp, np->mac);
2155    	callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
2156	netfront_carrier_off(np);
2157
2158	return (0);
2159
2160exit:
2161	gnttab_free_grant_references(np->gref_tx_head);
2162out:
2163	return (err);
2164}
2165
2166/**
2167 * Handle the change of state of the backend to Closing.  We must delete our
2168 * device-layer structures now, to ensure that writes are flushed through to
2169 * the backend.  Once is this done, we can switch to Closed in
2170 * acknowledgement.
2171 */
2172#if 0
2173static void
2174netfront_closing(device_t dev)
2175{
2176#if 0
2177	struct netfront_info *info = dev->dev_driver_data;
2178
2179	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
2180
2181	close_netdev(info);
2182#endif
2183	xenbus_switch_state(dev, XenbusStateClosed);
2184}
2185#endif
2186
2187static int
2188netfront_detach(device_t dev)
2189{
2190	struct netfront_info *info = device_get_softc(dev);
2191
2192	DPRINTK("%s\n", xenbus_get_node(dev));
2193
2194	netif_free(info);
2195
2196	return 0;
2197}
2198
2199static void
2200netif_free(struct netfront_info *info)
2201{
2202	netif_disconnect_backend(info);
2203#if 0
2204	close_netdev(info);
2205#endif
2206}
2207
2208static void
2209netif_disconnect_backend(struct netfront_info *info)
2210{
2211	XN_RX_LOCK(info);
2212	XN_TX_LOCK(info);
2213	netfront_carrier_off(info);
2214	XN_TX_UNLOCK(info);
2215	XN_RX_UNLOCK(info);
2216
2217	free_ring(&info->tx_ring_ref, &info->tx.sring);
2218	free_ring(&info->rx_ring_ref, &info->rx.sring);
2219
2220	if (info->irq)
2221		unbind_from_irqhandler(info->irq);
2222
2223	info->irq = 0;
2224}
2225
2226static void
2227free_ring(int *ref, void *ring_ptr_ref)
2228{
2229	void **ring_ptr_ptr = ring_ptr_ref;
2230
2231	if (*ref != GRANT_REF_INVALID) {
2232		/* This API frees the associated storage. */
2233		gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2234		*ref = GRANT_REF_INVALID;
2235	}
2236	*ring_ptr_ptr = NULL;
2237}
2238
2239static int
2240xn_ifmedia_upd(struct ifnet *ifp)
2241{
2242	return (0);
2243}
2244
2245static void
2246xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2247{
2248	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2249	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2250}
2251
2252/* ** Driver registration ** */
2253static device_method_t netfront_methods[] = {
2254	/* Device interface */
2255	DEVMETHOD(device_probe,         netfront_probe),
2256	DEVMETHOD(device_attach,        netfront_attach),
2257	DEVMETHOD(device_detach,        netfront_detach),
2258	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2259	DEVMETHOD(device_suspend,       netfront_suspend),
2260	DEVMETHOD(device_resume,        netfront_resume),
2261
2262	/* Xenbus interface */
2263	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2264
2265	{ 0, 0 }
2266};
2267
2268static driver_t netfront_driver = {
2269	"xn",
2270	netfront_methods,
2271	sizeof(struct netfront_info),
2272};
2273devclass_t netfront_devclass;
2274
2275DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, 0, 0);
2276